mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
Merge branch 'develop' into feature/remove-substance-painter-addon
This commit is contained in:
commit
f8ea5af82c
217 changed files with 96 additions and 30259 deletions
|
|
@ -87,7 +87,9 @@ class IntegrateHeroVersion(
|
|||
]
|
||||
# QUESTION/TODO this process should happen on server if crashed due to
|
||||
# permissions error on files (files were used or user didn't have perms)
|
||||
# *but all other plugins must be sucessfully completed
|
||||
# *but all other plugins must be successfully completed
|
||||
|
||||
use_hardlinks = False
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
|
|
@ -617,24 +619,32 @@ class IntegrateHeroVersion(
|
|||
|
||||
self.log.debug("Folder already exists: \"{}\"".format(dirname))
|
||||
|
||||
if self.use_hardlinks:
|
||||
# First try hardlink and copy if paths are cross drive
|
||||
self.log.debug("Hardlinking file \"{}\" to \"{}\"".format(
|
||||
src_path, dst_path
|
||||
))
|
||||
try:
|
||||
create_hard_link(src_path, dst_path)
|
||||
# Return when successful
|
||||
return
|
||||
|
||||
except OSError as exc:
|
||||
# re-raise exception if different than
|
||||
# EXDEV - cross drive path
|
||||
# EINVAL - wrong format, must be NTFS
|
||||
self.log.debug(
|
||||
"Hardlink failed with errno:'{}'".format(exc.errno))
|
||||
if exc.errno not in [errno.EXDEV, errno.EINVAL]:
|
||||
raise
|
||||
|
||||
self.log.debug(
|
||||
"Hardlinking failed, falling back to regular copy...")
|
||||
|
||||
self.log.debug("Copying file \"{}\" to \"{}\"".format(
|
||||
src_path, dst_path
|
||||
))
|
||||
|
||||
# First try hardlink and copy if paths are cross drive
|
||||
try:
|
||||
create_hard_link(src_path, dst_path)
|
||||
# Return when successful
|
||||
return
|
||||
|
||||
except OSError as exc:
|
||||
# re-raise exception if different than
|
||||
# EXDEV - cross drive path
|
||||
# EINVAL - wrong format, must be NTFS
|
||||
self.log.debug("Hardlink failed with errno:'{}'".format(exc.errno))
|
||||
if exc.errno not in [errno.EXDEV, errno.EINVAL]:
|
||||
raise
|
||||
|
||||
shutil.copy(src_path, dst_path)
|
||||
|
||||
def version_from_representations(self, project_name, repres):
|
||||
|
|
|
|||
|
|
@ -743,6 +743,14 @@ class IntegrateHeroVersionModel(BaseSettingsModel):
|
|||
optional: bool = SettingsField(False, title="Optional")
|
||||
active: bool = SettingsField(True, title="Active")
|
||||
families: list[str] = SettingsField(default_factory=list, title="Families")
|
||||
use_hardlinks: bool = SettingsField(
|
||||
False, title="Use Hardlinks",
|
||||
description="When enabled first try to make a hardlink of the version "
|
||||
"instead of a copy. This helps reduce disk usage, but may "
|
||||
"create issues.\nFor example there are known issues on "
|
||||
"Windows being unable to delete any of the hardlinks if "
|
||||
"any of the links is in use creating issues with updating "
|
||||
"hero versions.")
|
||||
|
||||
|
||||
class CleanUpModel(BaseSettingsModel):
|
||||
|
|
@ -1136,7 +1144,8 @@ DEFAULT_PUBLISH_VALUES = {
|
|||
"layout",
|
||||
"mayaScene",
|
||||
"simpleUnrealTexture"
|
||||
]
|
||||
],
|
||||
"use_hardlinks": False
|
||||
},
|
||||
"CleanUp": {
|
||||
"paterns": [],
|
||||
|
|
|
|||
|
|
@ -1,13 +0,0 @@
|
|||
from .version import __version__
|
||||
from .addon import (
|
||||
CELACTION_ROOT_DIR,
|
||||
CelactionAddon,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"__version__",
|
||||
|
||||
"CELACTION_ROOT_DIR",
|
||||
"CelactionAddon",
|
||||
)
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
import os
|
||||
from ayon_core.addon import AYONAddon, IHostAddon
|
||||
|
||||
from .version import __version__
|
||||
|
||||
CELACTION_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
class CelactionAddon(AYONAddon, IHostAddon):
|
||||
name = "celaction"
|
||||
version = __version__
|
||||
host_name = "celaction"
|
||||
|
||||
def get_launch_hook_paths(self, app):
|
||||
if app.host_name != self.host_name:
|
||||
return []
|
||||
return [
|
||||
os.path.join(CELACTION_ROOT_DIR, "hooks")
|
||||
]
|
||||
|
||||
def add_implementation_envs(self, env, _app):
|
||||
# Set default values if are not already set via settings
|
||||
defaults = {
|
||||
"LOGLEVEL": "DEBUG"
|
||||
}
|
||||
for key, value in defaults.items():
|
||||
if not env.get(key):
|
||||
env[key] = value
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return [".scn"]
|
||||
|
|
@ -1,152 +0,0 @@
|
|||
import os
|
||||
import shutil
|
||||
import winreg
|
||||
import subprocess
|
||||
from ayon_core.lib import get_ayon_launcher_args
|
||||
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||
from ayon_celaction import CELACTION_ROOT_DIR
|
||||
|
||||
|
||||
class CelactionPrelaunchHook(PreLaunchHook):
|
||||
"""Bootstrap celacion with AYON"""
|
||||
app_groups = {"celaction"}
|
||||
platforms = {"windows"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
folder_attributes = self.data["folder_entity"]["attrib"]
|
||||
width = folder_attributes["resolutionWidth"]
|
||||
height = folder_attributes["resolutionHeight"]
|
||||
|
||||
# Add workfile path to launch arguments
|
||||
workfile_path = self.workfile_path()
|
||||
if workfile_path:
|
||||
self.launch_context.launch_args.append(workfile_path)
|
||||
|
||||
# setting output parameters
|
||||
path_user_settings = "\\".join([
|
||||
"Software", "CelAction", "CelAction2D", "User Settings"
|
||||
])
|
||||
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_user_settings)
|
||||
hKey = winreg.OpenKey(
|
||||
winreg.HKEY_CURRENT_USER, path_user_settings, 0,
|
||||
winreg.KEY_ALL_ACCESS
|
||||
)
|
||||
|
||||
path_to_cli = os.path.join(
|
||||
CELACTION_ROOT_DIR, "scripts", "publish_cli.py"
|
||||
)
|
||||
subprocess_args = get_ayon_launcher_args("run", path_to_cli)
|
||||
executable = subprocess_args.pop(0)
|
||||
workfile_settings = self.get_workfile_settings()
|
||||
|
||||
winreg.SetValueEx(
|
||||
hKey,
|
||||
"SubmitAppTitle",
|
||||
0,
|
||||
winreg.REG_SZ,
|
||||
executable
|
||||
)
|
||||
|
||||
# add required arguments for workfile path
|
||||
parameters = subprocess_args + [
|
||||
"--currentFile", "*SCENE*"
|
||||
]
|
||||
|
||||
# Add custom parameters from workfile settings
|
||||
if "render_chunk" in workfile_settings["submission_overrides"]:
|
||||
parameters += [
|
||||
"--chunk", "*CHUNK*"
|
||||
]
|
||||
if "resolution" in workfile_settings["submission_overrides"]:
|
||||
parameters += [
|
||||
"--resolutionWidth", "*X*",
|
||||
"--resolutionHeight", "*Y*"
|
||||
]
|
||||
if "frame_range" in workfile_settings["submission_overrides"]:
|
||||
parameters += [
|
||||
"--frameStart", "*START*",
|
||||
"--frameEnd", "*END*"
|
||||
]
|
||||
|
||||
winreg.SetValueEx(
|
||||
hKey, "SubmitParametersTitle", 0, winreg.REG_SZ,
|
||||
subprocess.list2cmdline(parameters)
|
||||
)
|
||||
|
||||
self.log.debug(f"__ parameters: \"{parameters}\"")
|
||||
|
||||
# setting resolution parameters
|
||||
path_submit = "\\".join([
|
||||
path_user_settings, "Dialogs", "SubmitOutput"
|
||||
])
|
||||
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_submit)
|
||||
hKey = winreg.OpenKey(
|
||||
winreg.HKEY_CURRENT_USER, path_submit, 0,
|
||||
winreg.KEY_ALL_ACCESS
|
||||
)
|
||||
winreg.SetValueEx(hKey, "SaveScene", 0, winreg.REG_DWORD, 1)
|
||||
winreg.SetValueEx(hKey, "CustomX", 0, winreg.REG_DWORD, width)
|
||||
winreg.SetValueEx(hKey, "CustomY", 0, winreg.REG_DWORD, height)
|
||||
|
||||
# making sure message dialogs don't appear when overwriting
|
||||
path_overwrite_scene = "\\".join([
|
||||
path_user_settings, "Messages", "OverwriteScene"
|
||||
])
|
||||
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_overwrite_scene)
|
||||
hKey = winreg.OpenKey(
|
||||
winreg.HKEY_CURRENT_USER, path_overwrite_scene, 0,
|
||||
winreg.KEY_ALL_ACCESS
|
||||
)
|
||||
winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 6)
|
||||
winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1)
|
||||
|
||||
# set scane as not saved
|
||||
path_scene_saved = "\\".join([
|
||||
path_user_settings, "Messages", "SceneSaved"
|
||||
])
|
||||
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_scene_saved)
|
||||
hKey = winreg.OpenKey(
|
||||
winreg.HKEY_CURRENT_USER, path_scene_saved, 0,
|
||||
winreg.KEY_ALL_ACCESS
|
||||
)
|
||||
winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 1)
|
||||
winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1)
|
||||
|
||||
def workfile_path(self):
|
||||
workfile_path = self.data["last_workfile_path"]
|
||||
|
||||
# copy workfile from template if doesn't exist any on path
|
||||
if not os.path.exists(workfile_path):
|
||||
# TODO add ability to set different template workfile path via
|
||||
# settings
|
||||
template_path = os.path.join(
|
||||
CELACTION_ROOT_DIR,
|
||||
"resources",
|
||||
"celaction_template_scene.scn"
|
||||
)
|
||||
|
||||
if not os.path.exists(template_path):
|
||||
self.log.warning(
|
||||
"Couldn't find workfile template file in {}".format(
|
||||
template_path
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
self.log.info(
|
||||
f"Creating workfile from template: \"{template_path}\""
|
||||
)
|
||||
|
||||
# Copy template workfile to new destinantion
|
||||
shutil.copy2(
|
||||
os.path.normpath(template_path),
|
||||
os.path.normpath(workfile_path)
|
||||
)
|
||||
|
||||
self.log.info(f"Workfile to open: \"{workfile_path}\"")
|
||||
|
||||
return workfile_path
|
||||
|
||||
def get_workfile_settings(self):
|
||||
return self.data["project_settings"]["celaction"]["workfile"]
|
||||
|
|
@ -1,60 +0,0 @@
|
|||
import pyblish.api
|
||||
import sys
|
||||
from pprint import pformat
|
||||
|
||||
|
||||
class CollectCelactionCliKwargs(pyblish.api.ContextPlugin):
|
||||
""" Collects all keyword arguments passed from the terminal """
|
||||
|
||||
label = "Collect Celaction Cli Kwargs"
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
|
||||
def process(self, context):
|
||||
args = list(sys.argv[1:])
|
||||
self.log.info(str(args))
|
||||
missing_kwargs = []
|
||||
passing_kwargs = {}
|
||||
for key in (
|
||||
"chunk",
|
||||
"frameStart",
|
||||
"frameEnd",
|
||||
"resolutionWidth",
|
||||
"resolutionHeight",
|
||||
"currentFile",
|
||||
):
|
||||
arg_key = f"--{key}"
|
||||
if arg_key not in args:
|
||||
missing_kwargs.append(key)
|
||||
continue
|
||||
arg_idx = args.index(arg_key)
|
||||
args.pop(arg_idx)
|
||||
if key != "currentFile":
|
||||
value = args.pop(arg_idx)
|
||||
else:
|
||||
path_parts = []
|
||||
while arg_idx < len(args):
|
||||
path_parts.append(args.pop(arg_idx))
|
||||
value = " ".join(path_parts).strip('"')
|
||||
|
||||
passing_kwargs[key] = value
|
||||
|
||||
if missing_kwargs:
|
||||
self.log.debug("Missing arguments {}".format(
|
||||
", ".join(
|
||||
[f'"{key}"' for key in missing_kwargs]
|
||||
)
|
||||
))
|
||||
|
||||
self.log.info("Storing kwargs ...")
|
||||
self.log.debug("_ passing_kwargs: {}".format(pformat(passing_kwargs)))
|
||||
|
||||
# set kwargs to context data
|
||||
context.set_data("passingKwargs", passing_kwargs)
|
||||
|
||||
# get kwargs onto context data as keys with values
|
||||
for k, v in passing_kwargs.items():
|
||||
self.log.info(f"Setting `{k}` to instance.data with value: `{v}`")
|
||||
if k in ["frameStart", "frameEnd"]:
|
||||
context.data[k] = passing_kwargs[k] = int(v)
|
||||
else:
|
||||
context.data[k] = v
|
||||
|
|
@ -1,96 +0,0 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectCelactionInstances(pyblish.api.ContextPlugin):
|
||||
""" Adds the celaction render instances """
|
||||
|
||||
label = "Collect Celaction Instances"
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
|
||||
def process(self, context):
|
||||
task = context.data["task"]
|
||||
current_file = context.data["currentFile"]
|
||||
staging_dir = os.path.dirname(current_file)
|
||||
scene_file = os.path.basename(current_file)
|
||||
version = context.data["version"]
|
||||
|
||||
folder_entity = context.data["folderEntity"]
|
||||
|
||||
folder_attributes = folder_entity["attrib"]
|
||||
|
||||
shared_instance_data = {
|
||||
"folderPath": folder_entity["path"],
|
||||
"frameStart": folder_attributes["frameStart"],
|
||||
"frameEnd": folder_attributes["frameEnd"],
|
||||
"handleStart": folder_attributes["handleStart"],
|
||||
"handleEnd": folder_attributes["handleEnd"],
|
||||
"fps": folder_attributes["fps"],
|
||||
"resolutionWidth": folder_attributes["resolutionWidth"],
|
||||
"resolutionHeight": folder_attributes["resolutionHeight"],
|
||||
"pixelAspect": 1,
|
||||
"step": 1,
|
||||
"version": version
|
||||
}
|
||||
|
||||
celaction_kwargs = context.data.get(
|
||||
"passingKwargs", {})
|
||||
|
||||
if celaction_kwargs:
|
||||
shared_instance_data.update(celaction_kwargs)
|
||||
|
||||
# workfile instance
|
||||
product_type = "workfile"
|
||||
product_name = product_type + task.capitalize()
|
||||
# Create instance
|
||||
instance = context.create_instance(product_name)
|
||||
|
||||
# creating instance data
|
||||
instance.data.update({
|
||||
"label": scene_file,
|
||||
"productName": product_name,
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type],
|
||||
"representations": []
|
||||
})
|
||||
|
||||
# adding basic script data
|
||||
instance.data.update(shared_instance_data)
|
||||
|
||||
# creating representation
|
||||
representation = {
|
||||
'name': 'scn',
|
||||
'ext': 'scn',
|
||||
'files': scene_file,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info('Publishing Celaction workfile')
|
||||
|
||||
# render instance
|
||||
product_name = f"render{task}Main"
|
||||
product_type = "render.farm"
|
||||
instance = context.create_instance(name=product_name)
|
||||
# getting instance state
|
||||
instance.data["publish"] = True
|
||||
|
||||
# add folderEntity data into instance
|
||||
instance.data.update({
|
||||
"label": "{} - farm".format(product_name),
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type],
|
||||
"productName": product_name
|
||||
})
|
||||
|
||||
# adding basic script data
|
||||
instance.data.update(shared_instance_data)
|
||||
|
||||
self.log.info('Publishing Celaction render instance')
|
||||
self.log.debug(f"Instance data: `{instance.data}`")
|
||||
|
||||
for i in context:
|
||||
self.log.debug(f"{i.data['families']}")
|
||||
|
|
@ -1,65 +0,0 @@
|
|||
import os
|
||||
import copy
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectRenderPath(pyblish.api.InstancePlugin):
|
||||
"""Generate file and directory path where rendered images will be"""
|
||||
|
||||
label = "Collect Render Path"
|
||||
order = pyblish.api.CollectorOrder + 0.495
|
||||
families = ["render.farm"]
|
||||
|
||||
settings_category = "celaction"
|
||||
|
||||
# Presets
|
||||
output_extension = "png"
|
||||
anatomy_template_key_render_files = None
|
||||
anatomy_template_key_metadata = None
|
||||
|
||||
def process(self, instance):
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
anatomy_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
padding = anatomy.templates_obj.frame_padding
|
||||
product_type = "render"
|
||||
anatomy_data.update({
|
||||
"frame": f"%0{padding}d",
|
||||
"family": product_type,
|
||||
"representation": self.output_extension,
|
||||
"ext": self.output_extension
|
||||
})
|
||||
anatomy_data["product"]["type"] = product_type
|
||||
|
||||
# get anatomy rendering keys
|
||||
r_anatomy_key = self.anatomy_template_key_render_files
|
||||
m_anatomy_key = self.anatomy_template_key_metadata
|
||||
|
||||
# get folder and path for rendering images from celaction
|
||||
r_template_item = anatomy.get_template_item("publish", r_anatomy_key)
|
||||
render_dir = r_template_item["directory"].format_strict(anatomy_data)
|
||||
render_path = r_template_item["path"].format_strict(anatomy_data)
|
||||
self.log.debug("__ render_path: `{}`".format(render_path))
|
||||
|
||||
# create dir if it doesn't exists
|
||||
try:
|
||||
if not os.path.isdir(render_dir):
|
||||
os.makedirs(render_dir, exist_ok=True)
|
||||
except OSError:
|
||||
# directory is not available
|
||||
self.log.warning("Path is unreachable: `{}`".format(render_dir))
|
||||
|
||||
# add rendering path to instance data
|
||||
instance.data["path"] = render_path
|
||||
|
||||
# get anatomy for published renders folder path
|
||||
m_template_item = anatomy.get_template_item(
|
||||
"publish", m_anatomy_key, default=None
|
||||
)
|
||||
if m_template_item is not None:
|
||||
metadata_path = m_template_item["directory"].format_strict(
|
||||
anatomy_data
|
||||
)
|
||||
instance.data["publishRenderMetadataFolder"] = metadata_path
|
||||
self.log.info("Metadata render path: `{}`".format(metadata_path))
|
||||
|
||||
self.log.info(f"Render output path set to: `{render_path}`")
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
import shutil
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.lib import version_up
|
||||
|
||||
|
||||
class VersionUpScene(pyblish.api.ContextPlugin):
|
||||
order = pyblish.api.IntegratorOrder + 0.5
|
||||
label = 'Version Up Scene'
|
||||
families = ['workfile']
|
||||
optional = True
|
||||
active = True
|
||||
|
||||
def process(self, context):
|
||||
current_file = context.data.get('currentFile')
|
||||
v_up = version_up(current_file)
|
||||
self.log.debug('Current file is: {}'.format(current_file))
|
||||
self.log.debug('Version up: {}'.format(v_up))
|
||||
|
||||
shutil.copy2(current_file, v_up)
|
||||
self.log.info('Scene saved into new version: {}'.format(v_up))
|
||||
Binary file not shown.
|
|
@ -1,36 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
import pyblish.api
|
||||
import pyblish.util
|
||||
|
||||
from ayon_celaction import CELACTION_ROOT_DIR
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.tools.utils import host_tools
|
||||
from ayon_core.pipeline import install_ayon_plugins
|
||||
|
||||
|
||||
log = Logger.get_logger("celaction")
|
||||
|
||||
PUBLISH_HOST = "celaction"
|
||||
PLUGINS_DIR = os.path.join(CELACTION_ROOT_DIR, "plugins")
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||
|
||||
|
||||
def main():
|
||||
# Registers global pyblish plugins
|
||||
install_ayon_plugins()
|
||||
|
||||
if os.path.exists(PUBLISH_PATH):
|
||||
log.info(f"Registering path: {PUBLISH_PATH}")
|
||||
pyblish.api.register_plugin_path(PUBLISH_PATH)
|
||||
|
||||
pyblish.api.register_host(PUBLISH_HOST)
|
||||
pyblish.api.register_target("local")
|
||||
|
||||
return host_tools.show_publish()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = main()
|
||||
sys.exit(not bool(result))
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON addon 'celaction' version."""
|
||||
__version__ = "0.2.0"
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
name = "celaction"
|
||||
title = "CelAction"
|
||||
version = "0.2.0"
|
||||
|
||||
client_dir = "ayon_celaction"
|
||||
|
||||
ayon_required_addons = {
|
||||
"core": ">0.3.2",
|
||||
}
|
||||
ayon_compatible_addons = {
|
||||
"applications": ">=0.2.0",
|
||||
}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
from typing import Type
|
||||
|
||||
from ayon_server.addons import BaseServerAddon
|
||||
|
||||
from .settings import CelActionSettings, DEFAULT_VALUES
|
||||
|
||||
|
||||
class CelActionAddon(BaseServerAddon):
|
||||
settings_model: Type[CelActionSettings] = CelActionSettings
|
||||
|
||||
async def get_default_settings(self):
|
||||
settings_model_cls = self.get_settings_model()
|
||||
return settings_model_cls(**DEFAULT_VALUES)
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
from pydantic import validator
|
||||
from ayon_server.settings import BaseSettingsModel, SettingsField
|
||||
from ayon_server.settings.validators import ensure_unique_names
|
||||
|
||||
|
||||
class ImageIOConfigModel(BaseSettingsModel):
|
||||
"""[DEPRECATED] Addon OCIO config settings. Please set the OCIO config
|
||||
path in the Core addon profiles here
|
||||
(ayon+settings://core/imageio/ocio_config_profiles).
|
||||
"""
|
||||
|
||||
override_global_config: bool = SettingsField(
|
||||
False,
|
||||
title="Override global OCIO config",
|
||||
description=(
|
||||
"DEPRECATED functionality. Please set the OCIO config path in the "
|
||||
"Core addon profiles here (ayon+settings://core/imageio/"
|
||||
"ocio_config_profiles)."
|
||||
),
|
||||
)
|
||||
filepath: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Config path",
|
||||
description=(
|
||||
"DEPRECATED functionality. Please set the OCIO config path in the "
|
||||
"Core addon profiles here (ayon+settings://core/imageio/"
|
||||
"ocio_config_profiles)."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class ImageIOFileRuleModel(BaseSettingsModel):
|
||||
name: str = SettingsField("", title="Rule name")
|
||||
pattern: str = SettingsField("", title="Regex pattern")
|
||||
colorspace: str = SettingsField("", title="Colorspace name")
|
||||
ext: str = SettingsField("", title="File extension")
|
||||
|
||||
|
||||
class ImageIOFileRulesModel(BaseSettingsModel):
|
||||
activate_host_rules: bool = SettingsField(False)
|
||||
rules: list[ImageIOFileRuleModel] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Rules"
|
||||
)
|
||||
|
||||
@validator("rules")
|
||||
def validate_unique_outputs(cls, value):
|
||||
ensure_unique_names(value)
|
||||
return value
|
||||
|
||||
|
||||
class CelActionImageIOModel(BaseSettingsModel):
|
||||
activate_host_color_management: bool = SettingsField(
|
||||
True, title="Enable Color Management"
|
||||
)
|
||||
ocio_config: ImageIOConfigModel = SettingsField(
|
||||
default_factory=ImageIOConfigModel,
|
||||
title="OCIO config"
|
||||
)
|
||||
file_rules: ImageIOFileRulesModel = SettingsField(
|
||||
default_factory=ImageIOFileRulesModel,
|
||||
title="File Rules"
|
||||
)
|
||||
|
|
@ -1,91 +0,0 @@
|
|||
from ayon_server.settings import BaseSettingsModel, SettingsField
|
||||
from .imageio import CelActionImageIOModel
|
||||
|
||||
|
||||
class CollectRenderPathModel(BaseSettingsModel):
|
||||
output_extension: str = SettingsField(
|
||||
"",
|
||||
title="Output render file extension"
|
||||
)
|
||||
anatomy_template_key_render_files: str = SettingsField(
|
||||
"",
|
||||
title="Anatomy template key: render files"
|
||||
)
|
||||
anatomy_template_key_metadata: str = SettingsField(
|
||||
"",
|
||||
title="Anatomy template key: metadata job file"
|
||||
)
|
||||
|
||||
|
||||
def _workfile_submit_overrides():
|
||||
return [
|
||||
{
|
||||
"value": "render_chunk",
|
||||
"label": "Pass chunk size"
|
||||
},
|
||||
{
|
||||
"value": "frame_range",
|
||||
"label": "Pass frame range"
|
||||
},
|
||||
{
|
||||
"value": "resolution",
|
||||
"label": "Pass resolution"
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
class WorkfileModel(BaseSettingsModel):
|
||||
submission_overrides: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Submission workfile overrides",
|
||||
enum_resolver=_workfile_submit_overrides
|
||||
)
|
||||
|
||||
|
||||
class PublishPluginsModel(BaseSettingsModel):
|
||||
CollectRenderPath: CollectRenderPathModel = SettingsField(
|
||||
default_factory=CollectRenderPathModel,
|
||||
title="Collect Render Path"
|
||||
)
|
||||
|
||||
|
||||
class CelActionSettings(BaseSettingsModel):
|
||||
imageio: CelActionImageIOModel = SettingsField(
|
||||
default_factory=CelActionImageIOModel,
|
||||
title="Color Management (ImageIO)"
|
||||
)
|
||||
workfile: WorkfileModel = SettingsField(
|
||||
title="Workfile"
|
||||
)
|
||||
publish: PublishPluginsModel = SettingsField(
|
||||
default_factory=PublishPluginsModel,
|
||||
title="Publish plugins",
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_VALUES = {
|
||||
"imageio": {
|
||||
"ocio_config": {
|
||||
"enabled": False,
|
||||
"filepath": []
|
||||
},
|
||||
"file_rules": {
|
||||
"enabled": False,
|
||||
"rules": []
|
||||
}
|
||||
},
|
||||
"workfile": {
|
||||
"submission_overrides": [
|
||||
"render_chunk",
|
||||
"frame_range",
|
||||
"resolution"
|
||||
]
|
||||
},
|
||||
"publish": {
|
||||
"CollectRenderPath": {
|
||||
"output_extension": "png",
|
||||
"anatomy_template_key_render_files": "render",
|
||||
"anatomy_template_key_metadata": "render"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
from .addon import ClockifyAddon
|
||||
|
||||
__all__ = (
|
||||
"ClockifyAddon",
|
||||
)
|
||||
|
|
@ -1,290 +0,0 @@
|
|||
import os
|
||||
import threading
|
||||
import time
|
||||
|
||||
from ayon_core.addon import AYONAddon, ITrayAddon, IPluginPaths
|
||||
|
||||
from .version import __version__
|
||||
from .constants import CLOCKIFY_FTRACK_USER_PATH, CLOCKIFY_FTRACK_SERVER_PATH
|
||||
|
||||
|
||||
class ClockifyAddon(AYONAddon, ITrayAddon, IPluginPaths):
|
||||
name = "clockify"
|
||||
version = __version__
|
||||
|
||||
def initialize(self, studio_settings):
|
||||
enabled = self.name in studio_settings
|
||||
workspace_name = None
|
||||
if enabled:
|
||||
clockify_settings = studio_settings[self.name]
|
||||
workspace_name = clockify_settings["workspace_name"]
|
||||
|
||||
if enabled and workspace_name:
|
||||
self.log.warning("Clockify Workspace is not set in settings.")
|
||||
enabled = False
|
||||
self.enabled = enabled
|
||||
self.workspace_name = workspace_name
|
||||
|
||||
self.timer_manager = None
|
||||
self.MessageWidgetClass = None
|
||||
self.message_widget = None
|
||||
self._clockify_api = None
|
||||
|
||||
# TimersManager attributes
|
||||
# - set `timers_manager_connector` only in `tray_init`
|
||||
self.timers_manager_connector = None
|
||||
self._timer_manager_addon = None
|
||||
|
||||
@property
|
||||
def clockify_api(self):
|
||||
if self._clockify_api is None:
|
||||
from .clockify_api import ClockifyAPI
|
||||
|
||||
self._clockify_api = ClockifyAPI(master_parent=self)
|
||||
return self._clockify_api
|
||||
|
||||
def get_global_environments(self):
|
||||
return {"CLOCKIFY_WORKSPACE": self.workspace_name}
|
||||
|
||||
def tray_init(self):
|
||||
from .widgets import ClockifySettings, MessageWidget
|
||||
|
||||
self.MessageWidgetClass = MessageWidget
|
||||
|
||||
self.message_widget = None
|
||||
self.widget_settings = ClockifySettings(self.clockify_api)
|
||||
self.widget_settings_required = None
|
||||
|
||||
self.thread_timer_check = None
|
||||
# Bools
|
||||
self.bool_thread_check_running = False
|
||||
self.bool_api_key_set = False
|
||||
self.bool_workspace_set = False
|
||||
self.bool_timer_run = False
|
||||
self.bool_api_key_set = self.clockify_api.set_api()
|
||||
|
||||
# Define itself as TimersManager connector
|
||||
self.timers_manager_connector = self
|
||||
|
||||
def tray_start(self):
|
||||
if self.bool_api_key_set is False:
|
||||
self.show_settings()
|
||||
return
|
||||
|
||||
self.bool_workspace_set = self.clockify_api.workspace_id is not None
|
||||
if self.bool_workspace_set is False:
|
||||
return
|
||||
|
||||
self.start_timer_check()
|
||||
self.set_menu_visibility()
|
||||
|
||||
def tray_exit(self, *_a, **_kw):
|
||||
return
|
||||
|
||||
def get_plugin_paths(self):
|
||||
"""Implementation of IPluginPaths to get plugin paths."""
|
||||
actions_path = os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)), "launcher_actions"
|
||||
)
|
||||
return {"actions": [actions_path]}
|
||||
|
||||
def get_ftrack_event_handler_paths(self):
|
||||
"""Function for ftrack addon to add ftrack event handler paths."""
|
||||
return {
|
||||
"user": [CLOCKIFY_FTRACK_USER_PATH],
|
||||
"server": [CLOCKIFY_FTRACK_SERVER_PATH],
|
||||
}
|
||||
|
||||
def clockify_timer_stopped(self):
|
||||
self.bool_timer_run = False
|
||||
self.timer_stopped()
|
||||
|
||||
def start_timer_check(self):
|
||||
self.bool_thread_check_running = True
|
||||
if self.thread_timer_check is None:
|
||||
self.thread_timer_check = threading.Thread(
|
||||
target=self.check_running
|
||||
)
|
||||
self.thread_timer_check.daemon = True
|
||||
self.thread_timer_check.start()
|
||||
|
||||
def stop_timer_check(self):
|
||||
self.bool_thread_check_running = True
|
||||
if self.thread_timer_check is not None:
|
||||
self.thread_timer_check.join()
|
||||
self.thread_timer_check = None
|
||||
|
||||
def check_running(self):
|
||||
while self.bool_thread_check_running is True:
|
||||
bool_timer_run = False
|
||||
if self.clockify_api.get_in_progress() is not None:
|
||||
bool_timer_run = True
|
||||
|
||||
if self.bool_timer_run != bool_timer_run:
|
||||
if self.bool_timer_run is True:
|
||||
self.clockify_timer_stopped()
|
||||
elif self.bool_timer_run is False:
|
||||
current_timer = self.clockify_api.get_in_progress()
|
||||
if current_timer is None:
|
||||
continue
|
||||
current_proj_id = current_timer.get("projectId")
|
||||
if not current_proj_id:
|
||||
continue
|
||||
|
||||
project = self.clockify_api.get_project_by_id(
|
||||
current_proj_id
|
||||
)
|
||||
if project and project.get("code") == 501:
|
||||
continue
|
||||
|
||||
project_name = project.get("name")
|
||||
|
||||
current_timer_hierarchy = current_timer.get("description")
|
||||
if not current_timer_hierarchy:
|
||||
continue
|
||||
hierarchy_items = current_timer_hierarchy.split("/")
|
||||
# Each pype timer must have at least 2 items!
|
||||
if len(hierarchy_items) < 2:
|
||||
continue
|
||||
|
||||
task_name = hierarchy_items[-1]
|
||||
hierarchy = hierarchy_items[:-1]
|
||||
|
||||
data = {
|
||||
"task_name": task_name,
|
||||
"hierarchy": hierarchy,
|
||||
"project_name": project_name,
|
||||
}
|
||||
self.timer_started(data)
|
||||
|
||||
self.bool_timer_run = bool_timer_run
|
||||
self.set_menu_visibility()
|
||||
time.sleep(5)
|
||||
|
||||
def signed_in(self):
|
||||
if not self.timer_manager:
|
||||
return
|
||||
|
||||
if not self.timer_manager.last_task:
|
||||
return
|
||||
|
||||
if self.timer_manager.is_running:
|
||||
self.start_timer_manager(self.timer_manager.last_task)
|
||||
|
||||
def on_message_widget_close(self):
|
||||
self.message_widget = None
|
||||
|
||||
# Definition of Tray menu
|
||||
def tray_menu(self, parent_menu):
|
||||
# Menu for Tray App
|
||||
from qtpy import QtWidgets
|
||||
|
||||
menu = QtWidgets.QMenu("Clockify", parent_menu)
|
||||
menu.setProperty("submenu", "on")
|
||||
|
||||
# Actions
|
||||
action_show_settings = QtWidgets.QAction("Settings", menu)
|
||||
action_stop_timer = QtWidgets.QAction("Stop timer", menu)
|
||||
|
||||
menu.addAction(action_show_settings)
|
||||
menu.addAction(action_stop_timer)
|
||||
|
||||
action_show_settings.triggered.connect(self.show_settings)
|
||||
action_stop_timer.triggered.connect(self.stop_timer)
|
||||
|
||||
self.action_stop_timer = action_stop_timer
|
||||
|
||||
self.set_menu_visibility()
|
||||
|
||||
parent_menu.addMenu(menu)
|
||||
|
||||
def show_settings(self):
|
||||
self.widget_settings.input_api_key.setText(
|
||||
self.clockify_api.get_api_key()
|
||||
)
|
||||
self.widget_settings.show()
|
||||
|
||||
def set_menu_visibility(self):
|
||||
self.action_stop_timer.setVisible(self.bool_timer_run)
|
||||
|
||||
# --- TimersManager connection methods ---
|
||||
def register_timers_manager(self, timer_manager_addon):
|
||||
"""Store TimersManager for future use."""
|
||||
self._timer_manager_addon = timer_manager_addon
|
||||
|
||||
def timer_started(self, data):
|
||||
"""Tell TimersManager that timer started."""
|
||||
if self._timer_manager_addon is not None:
|
||||
self._timer_manager_addon.timer_started(self.id, data)
|
||||
|
||||
def timer_stopped(self):
|
||||
"""Tell TimersManager that timer stopped."""
|
||||
if self._timer_manager_addon is not None:
|
||||
self._timer_manager_addon.timer_stopped(self.id)
|
||||
|
||||
def stop_timer(self):
|
||||
"""Called from TimersManager to stop timer."""
|
||||
self.clockify_api.finish_time_entry()
|
||||
|
||||
def _verify_project_exists(self, project_name):
|
||||
project_id = self.clockify_api.get_project_id(project_name)
|
||||
if not project_id:
|
||||
self.log.warning(
|
||||
'Project "{}" was not found in Clockify. Timer won\'t start.'
|
||||
).format(project_name)
|
||||
|
||||
if not self.MessageWidgetClass:
|
||||
return
|
||||
|
||||
msg = (
|
||||
'Project <b>"{}"</b> is not'
|
||||
' in Clockify Workspace <b>"{}"</b>.'
|
||||
"<br><br>Please inform your Project Manager."
|
||||
).format(project_name, str(self.clockify_api.workspace_name))
|
||||
|
||||
self.message_widget = self.MessageWidgetClass(
|
||||
msg, "Clockify - Info Message"
|
||||
)
|
||||
self.message_widget.closed.connect(self.on_message_widget_close)
|
||||
self.message_widget.show()
|
||||
return False
|
||||
return project_id
|
||||
|
||||
def start_timer(self, input_data):
|
||||
"""Called from TimersManager to start timer."""
|
||||
# If not api key is not entered then skip
|
||||
if not self.clockify_api.get_api_key():
|
||||
return
|
||||
|
||||
project_name = input_data.get("project_name")
|
||||
folder_path = input_data.get("folder_path")
|
||||
task_name = input_data.get("task_name")
|
||||
task_type = input_data.get("task_type")
|
||||
if not all((project_name, folder_path, task_name, task_type)):
|
||||
return
|
||||
|
||||
# Concatenate hierarchy and task to get description
|
||||
description = "/".join([folder_path.lstrip("/"), task_name])
|
||||
|
||||
# Check project existence
|
||||
project_id = self._verify_project_exists(project_name)
|
||||
if not project_id:
|
||||
return
|
||||
|
||||
# Setup timer tags
|
||||
if not task_type:
|
||||
self.log.info("No tag information found for the timer")
|
||||
|
||||
tag_ids = []
|
||||
task_tag_id = self.clockify_api.get_tag_id(task_type)
|
||||
if task_tag_id is not None:
|
||||
tag_ids.append(task_tag_id)
|
||||
|
||||
# Start timer
|
||||
self.clockify_api.start_time_entry(
|
||||
description,
|
||||
project_id,
|
||||
tag_ids=tag_ids,
|
||||
workspace_id=self.clockify_api.workspace_id,
|
||||
user_id=self.clockify_api.user_id,
|
||||
)
|
||||
|
|
@ -1,447 +0,0 @@
|
|||
import os
|
||||
import json
|
||||
import datetime
|
||||
|
||||
import requests
|
||||
|
||||
from ayon_core.lib.local_settings import AYONSecureRegistry
|
||||
from ayon_core.lib import Logger
|
||||
|
||||
from .constants import (
|
||||
CLOCKIFY_ENDPOINT,
|
||||
ADMIN_PERMISSION_NAMES,
|
||||
)
|
||||
|
||||
|
||||
class ClockifyAPI:
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
def __init__(self, api_key=None, master_parent=None):
|
||||
self.workspace_name = None
|
||||
self.master_parent = master_parent
|
||||
self.api_key = api_key
|
||||
self._workspace_id = None
|
||||
self._user_id = None
|
||||
self._secure_registry = None
|
||||
|
||||
@property
|
||||
def secure_registry(self):
|
||||
if self._secure_registry is None:
|
||||
self._secure_registry = AYONSecureRegistry("clockify")
|
||||
return self._secure_registry
|
||||
|
||||
@property
|
||||
def headers(self):
|
||||
return {"x-api-key": self.api_key}
|
||||
|
||||
@property
|
||||
def workspace_id(self):
|
||||
return self._workspace_id
|
||||
|
||||
@property
|
||||
def user_id(self):
|
||||
return self._user_id
|
||||
|
||||
def verify_api(self):
|
||||
for key, value in self.headers.items():
|
||||
if value is None or value.strip() == "":
|
||||
return False
|
||||
return True
|
||||
|
||||
def set_api(self, api_key=None):
|
||||
if api_key is None:
|
||||
api_key = self.get_api_key()
|
||||
|
||||
if api_key is not None and self.validate_api_key(api_key) is True:
|
||||
self.api_key = api_key
|
||||
self.set_workspace()
|
||||
self.set_user_id()
|
||||
if self.master_parent:
|
||||
self.master_parent.signed_in()
|
||||
return True
|
||||
return False
|
||||
|
||||
def validate_api_key(self, api_key):
|
||||
test_headers = {"x-api-key": api_key}
|
||||
action_url = "user"
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=test_headers
|
||||
)
|
||||
if response.status_code != 200:
|
||||
return False
|
||||
return True
|
||||
|
||||
def validate_workspace_permissions(self, workspace_id=None, user_id=None):
|
||||
if user_id is None:
|
||||
self.log.info("No user_id found during validation")
|
||||
return False
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = f"workspaces/{workspace_id}/users?includeRoles=1"
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
data = response.json()
|
||||
for user in data:
|
||||
if user.get("id") == user_id:
|
||||
roles_data = user.get("roles")
|
||||
for entities in roles_data:
|
||||
if entities.get("role") in ADMIN_PERMISSION_NAMES:
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_user_id(self):
|
||||
action_url = "user"
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
result = response.json()
|
||||
user_id = result.get("id", None)
|
||||
|
||||
return user_id
|
||||
|
||||
def set_workspace(self, name=None):
|
||||
if name is None:
|
||||
name = os.environ.get("CLOCKIFY_WORKSPACE", None)
|
||||
self.workspace_name = name
|
||||
if self.workspace_name is None:
|
||||
return
|
||||
try:
|
||||
result = self.validate_workspace()
|
||||
except Exception:
|
||||
result = False
|
||||
if result is not False:
|
||||
self._workspace_id = result
|
||||
if self.master_parent is not None:
|
||||
self.master_parent.start_timer_check()
|
||||
return True
|
||||
return False
|
||||
|
||||
def validate_workspace(self, name=None):
|
||||
if name is None:
|
||||
name = self.workspace_name
|
||||
all_workspaces = self.get_workspaces()
|
||||
if name in all_workspaces:
|
||||
return all_workspaces[name]
|
||||
return False
|
||||
|
||||
def set_user_id(self):
|
||||
try:
|
||||
user_id = self.get_user_id()
|
||||
except Exception:
|
||||
user_id = None
|
||||
if user_id is not None:
|
||||
self._user_id = user_id
|
||||
|
||||
def get_api_key(self):
|
||||
return self.secure_registry.get_item("api_key", None)
|
||||
|
||||
def save_api_key(self, api_key):
|
||||
self.secure_registry.set_item("api_key", api_key)
|
||||
|
||||
def get_workspaces(self):
|
||||
action_url = "workspaces/"
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
return {
|
||||
workspace["name"]: workspace["id"] for workspace in response.json()
|
||||
}
|
||||
|
||||
def get_projects(self, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = f"workspaces/{workspace_id}/projects"
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
if response.status_code != 403:
|
||||
result = response.json()
|
||||
return {project["name"]: project["id"] for project in result}
|
||||
|
||||
def get_project_by_id(self, project_id, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = "workspaces/{}/projects/{}".format(
|
||||
workspace_id, project_id
|
||||
)
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
|
||||
return response.json()
|
||||
|
||||
def get_tags(self, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = "workspaces/{}/tags".format(workspace_id)
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
|
||||
return {tag["name"]: tag["id"] for tag in response.json()}
|
||||
|
||||
def get_tasks(self, project_id, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = "workspaces/{}/projects/{}/tasks".format(
|
||||
workspace_id, project_id
|
||||
)
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
|
||||
return {task["name"]: task["id"] for task in response.json()}
|
||||
|
||||
def get_workspace_id(self, workspace_name):
|
||||
all_workspaces = self.get_workspaces()
|
||||
if workspace_name not in all_workspaces:
|
||||
return None
|
||||
return all_workspaces[workspace_name]
|
||||
|
||||
def get_project_id(self, project_name, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
all_projects = self.get_projects(workspace_id)
|
||||
if project_name not in all_projects:
|
||||
return None
|
||||
return all_projects[project_name]
|
||||
|
||||
def get_tag_id(self, tag_name, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
all_tasks = self.get_tags(workspace_id)
|
||||
if tag_name not in all_tasks:
|
||||
return None
|
||||
return all_tasks[tag_name]
|
||||
|
||||
def get_task_id(self, task_name, project_id, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
all_tasks = self.get_tasks(project_id, workspace_id)
|
||||
if task_name not in all_tasks:
|
||||
return None
|
||||
return all_tasks[task_name]
|
||||
|
||||
def get_current_time(self):
|
||||
return str(datetime.datetime.utcnow().isoformat()) + "Z"
|
||||
|
||||
def start_time_entry(
|
||||
self,
|
||||
description,
|
||||
project_id,
|
||||
task_id=None,
|
||||
tag_ids=None,
|
||||
workspace_id=None,
|
||||
user_id=None,
|
||||
billable=True,
|
||||
):
|
||||
# Workspace
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
# User ID
|
||||
if user_id is None:
|
||||
user_id = self._user_id
|
||||
|
||||
# get running timer to check if we need to start it
|
||||
current_timer = self.get_in_progress()
|
||||
|
||||
# Check if is currently run another times and has same values
|
||||
# DO not restart the timer, if it is already running for current task
|
||||
if current_timer:
|
||||
current_timer_hierarchy = current_timer.get("description")
|
||||
current_project_id = current_timer.get("projectId")
|
||||
current_task_id = current_timer.get("taskId")
|
||||
if (
|
||||
description == current_timer_hierarchy
|
||||
and project_id == current_project_id
|
||||
and task_id == current_task_id
|
||||
):
|
||||
self.log.info(
|
||||
"Timer for the current project is already running"
|
||||
)
|
||||
self.bool_timer_run = True
|
||||
return self.bool_timer_run
|
||||
self.finish_time_entry()
|
||||
|
||||
# Convert billable to strings
|
||||
if billable:
|
||||
billable = "true"
|
||||
else:
|
||||
billable = "false"
|
||||
# Rest API Action
|
||||
action_url = "workspaces/{}/user/{}/time-entries".format(
|
||||
workspace_id, user_id
|
||||
)
|
||||
start = self.get_current_time()
|
||||
body = {
|
||||
"start": start,
|
||||
"billable": billable,
|
||||
"description": description,
|
||||
"projectId": project_id,
|
||||
"taskId": task_id,
|
||||
"tagIds": tag_ids,
|
||||
}
|
||||
response = requests.post(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
|
||||
)
|
||||
if response.status_code < 300:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _get_current_timer_values(self, response):
|
||||
if response is None:
|
||||
return
|
||||
try:
|
||||
output = response.json()
|
||||
except json.decoder.JSONDecodeError:
|
||||
return None
|
||||
if output and isinstance(output, list):
|
||||
return output[0]
|
||||
return None
|
||||
|
||||
def get_in_progress(self, user_id=None, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
if user_id is None:
|
||||
user_id = self.user_id
|
||||
|
||||
action_url = (
|
||||
f"workspaces/{workspace_id}/user/"
|
||||
f"{user_id}/time-entries?in-progress=1"
|
||||
)
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
return self._get_current_timer_values(response)
|
||||
|
||||
def finish_time_entry(self, workspace_id=None, user_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
if user_id is None:
|
||||
user_id = self.user_id
|
||||
current_timer = self.get_in_progress()
|
||||
if not current_timer:
|
||||
return
|
||||
action_url = "workspaces/{}/user/{}/time-entries".format(
|
||||
workspace_id, user_id
|
||||
)
|
||||
body = {"end": self.get_current_time()}
|
||||
response = requests.patch(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
|
||||
)
|
||||
return response.json()
|
||||
|
||||
def get_time_entries(self, workspace_id=None, user_id=None, quantity=10):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
if user_id is None:
|
||||
user_id = self.user_id
|
||||
action_url = "workspaces/{}/user/{}/time-entries".format(
|
||||
workspace_id, user_id
|
||||
)
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
return response.json()[:quantity]
|
||||
|
||||
def remove_time_entry(self, tid, workspace_id=None, user_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = "workspaces/{}/user/{}/time-entries/{}".format(
|
||||
workspace_id, user_id, tid
|
||||
)
|
||||
response = requests.delete(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
return response.json()
|
||||
|
||||
def add_project(self, name, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = "workspaces/{}/projects".format(workspace_id)
|
||||
body = {
|
||||
"name": name,
|
||||
"clientId": "",
|
||||
"isPublic": "false",
|
||||
"estimate": {"estimate": 0, "type": "AUTO"},
|
||||
"color": "#f44336",
|
||||
"billable": "true",
|
||||
}
|
||||
response = requests.post(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
|
||||
)
|
||||
return response.json()
|
||||
|
||||
def add_workspace(self, name):
|
||||
action_url = "workspaces/"
|
||||
body = {"name": name}
|
||||
response = requests.post(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
|
||||
)
|
||||
return response.json()
|
||||
|
||||
def add_task(self, name, project_id, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = "workspaces/{}/projects/{}/tasks".format(
|
||||
workspace_id, project_id
|
||||
)
|
||||
body = {"name": name, "projectId": project_id}
|
||||
response = requests.post(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
|
||||
)
|
||||
return response.json()
|
||||
|
||||
def add_tag(self, name, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = "workspaces/{}/tags".format(workspace_id)
|
||||
body = {"name": name}
|
||||
response = requests.post(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
|
||||
)
|
||||
return response.json()
|
||||
|
||||
def delete_project(self, project_id, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = "/workspaces/{}/projects/{}".format(
|
||||
workspace_id, project_id
|
||||
)
|
||||
response = requests.delete(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers,
|
||||
)
|
||||
return response.json()
|
||||
|
||||
def convert_input(
|
||||
self, entity_id, entity_name, mode="Workspace", project_id=None
|
||||
):
|
||||
if entity_id is None:
|
||||
error = False
|
||||
error_msg = 'Missing information "{}"'
|
||||
if mode.lower() == "workspace":
|
||||
if entity_id is None and entity_name is None:
|
||||
if self.workspace_id is not None:
|
||||
entity_id = self.workspace_id
|
||||
else:
|
||||
error = True
|
||||
else:
|
||||
entity_id = self.get_workspace_id(entity_name)
|
||||
else:
|
||||
if entity_id is None and entity_name is None:
|
||||
error = True
|
||||
elif mode.lower() == "project":
|
||||
entity_id = self.get_project_id(entity_name)
|
||||
elif mode.lower() == "task":
|
||||
entity_id = self.get_task_id(
|
||||
task_name=entity_name, project_id=project_id
|
||||
)
|
||||
else:
|
||||
raise TypeError("Unknown type")
|
||||
# Raise error
|
||||
if error:
|
||||
raise ValueError(error_msg.format(mode))
|
||||
|
||||
return entity_id
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
import os
|
||||
|
||||
|
||||
CLOCKIFY_FTRACK_SERVER_PATH = os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)), "ftrack", "server"
|
||||
)
|
||||
CLOCKIFY_FTRACK_USER_PATH = os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)), "ftrack", "user"
|
||||
)
|
||||
|
||||
ADMIN_PERMISSION_NAMES = ["WORKSPACE_OWN", "WORKSPACE_ADMIN"]
|
||||
CLOCKIFY_ENDPOINT = "https://api.clockify.me/api/v1/"
|
||||
|
|
@ -1,146 +0,0 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
from ayon_clockify.clockify_api import ClockifyAPI
|
||||
|
||||
from ayon_ftrack.lib import ServerAction
|
||||
|
||||
|
||||
class SyncClockifyServer(ServerAction):
|
||||
'''Synchronise project names and task types.'''
|
||||
|
||||
identifier = "clockify.sync.server"
|
||||
label = "Sync To Clockify (server)"
|
||||
description = "Synchronise data to Clockify workspace"
|
||||
|
||||
role_list = ["Administrator", "project Manager"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(SyncClockifyServer, self).__init__(*args, **kwargs)
|
||||
|
||||
workspace_name = os.environ.get("CLOCKIFY_WORKSPACE")
|
||||
api_key = os.environ.get("CLOCKIFY_API_KEY")
|
||||
self.clockify_api = ClockifyAPI(api_key)
|
||||
self.clockify_api.set_workspace(workspace_name)
|
||||
if api_key is None:
|
||||
modified_key = "None"
|
||||
else:
|
||||
str_len = int(len(api_key) / 2)
|
||||
start_replace = int(len(api_key) / 4)
|
||||
modified_key = ""
|
||||
for idx in range(len(api_key)):
|
||||
if idx >= start_replace and idx < start_replace + str_len:
|
||||
replacement = "X"
|
||||
else:
|
||||
replacement = api_key[idx]
|
||||
modified_key += replacement
|
||||
|
||||
self.log.info(
|
||||
"Clockify info. Workspace: \"{}\" API key: \"{}\"".format(
|
||||
str(workspace_name), str(modified_key)
|
||||
)
|
||||
)
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
if (
|
||||
len(entities) != 1
|
||||
or entities[0].entity_type.lower() != "project"
|
||||
):
|
||||
return False
|
||||
return True
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
self.clockify_api.set_api()
|
||||
if self.clockify_api.workspace_id is None:
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Clockify Workspace or API key are not set!"
|
||||
}
|
||||
|
||||
if not self.clockify_api.validate_workspace_permissions(
|
||||
self.clockify_api.workspace_id, self.clockify_api.user_id
|
||||
):
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Missing permissions for this action!"
|
||||
}
|
||||
|
||||
# JOB SETTINGS
|
||||
user_id = event["source"]["user"]["id"]
|
||||
user = session.query("User where id is " + user_id).one()
|
||||
|
||||
job = session.create("Job", {
|
||||
"user": user,
|
||||
"status": "running",
|
||||
"data": json.dumps({"description": "Sync Ftrack to Clockify"})
|
||||
})
|
||||
session.commit()
|
||||
|
||||
project_entity = entities[0]
|
||||
if project_entity.entity_type.lower() != "project":
|
||||
project_entity = self.get_project_from_entity(project_entity)
|
||||
|
||||
project_name = project_entity["full_name"]
|
||||
self.log.info(
|
||||
"Synchronization of project \"{}\" to clockify begins.".format(
|
||||
project_name
|
||||
)
|
||||
)
|
||||
task_types = (
|
||||
project_entity["project_schema"]["_task_type_schema"]["types"]
|
||||
)
|
||||
task_type_names = [
|
||||
task_type["name"] for task_type in task_types
|
||||
]
|
||||
try:
|
||||
clockify_projects = self.clockify_api.get_projects()
|
||||
if project_name not in clockify_projects:
|
||||
response = self.clockify_api.add_project(project_name)
|
||||
if "id" not in response:
|
||||
self.log.warning(
|
||||
"Project \"{}\" can't be created. Response: {}".format(
|
||||
project_name, response
|
||||
)
|
||||
)
|
||||
return {
|
||||
"success": False,
|
||||
"message": (
|
||||
"Can't create clockify project \"{}\"."
|
||||
" Unexpected error."
|
||||
).format(project_name)
|
||||
}
|
||||
|
||||
clockify_workspace_tags = self.clockify_api.get_tags()
|
||||
for task_type_name in task_type_names:
|
||||
if task_type_name in clockify_workspace_tags:
|
||||
self.log.debug(
|
||||
"Task \"{}\" already exist".format(task_type_name)
|
||||
)
|
||||
continue
|
||||
|
||||
response = self.clockify_api.add_tag(task_type_name)
|
||||
if "id" not in response:
|
||||
self.log.warning(
|
||||
"Task \"{}\" can't be created. Response: {}".format(
|
||||
task_type_name, response
|
||||
)
|
||||
)
|
||||
|
||||
job["status"] = "done"
|
||||
|
||||
except Exception:
|
||||
self.log.warning(
|
||||
"Synchronization to clockify failed.",
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
finally:
|
||||
if job["status"] != "done":
|
||||
job["status"] = "failed"
|
||||
session.commit()
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def register(session, **kw):
|
||||
SyncClockifyServer(session).register()
|
||||
|
|
@ -1,123 +0,0 @@
|
|||
import json
|
||||
from ayon_clockify.clockify_api import ClockifyAPI
|
||||
from ayon_ftrack.lib import BaseAction, statics_icon
|
||||
|
||||
|
||||
class SyncClockifyLocal(BaseAction):
|
||||
"""Synchronise project names and task types."""
|
||||
|
||||
identifier = "clockify.sync.local"
|
||||
label = "Sync To Clockify"
|
||||
description = "Synchronise data to Clockify workspace"
|
||||
role_list = ["Administrator", "project Manager"]
|
||||
icon = statics_icon("app_icons", "clockify-white.png")
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(SyncClockifyLocal, self).__init__(*args, **kwargs)
|
||||
|
||||
self.clockify_api = ClockifyAPI()
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
if (
|
||||
len(entities) == 1
|
||||
and entities[0].entity_type.lower() == "project"
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
self.clockify_api.set_api()
|
||||
if self.clockify_api.workspace_id is None:
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Clockify Workspace or API key are not set!"
|
||||
}
|
||||
|
||||
if (
|
||||
self.clockify_api.validate_workspace_permissions(
|
||||
self.clockify_api.workspace_id, self.clockify_api.user_id)
|
||||
is False
|
||||
):
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Missing permissions for this action!"
|
||||
}
|
||||
|
||||
# JOB SETTINGS
|
||||
userId = event['source']['user']['id']
|
||||
user = session.query('User where id is ' + userId).one()
|
||||
|
||||
job = session.create('Job', {
|
||||
'user': user,
|
||||
'status': 'running',
|
||||
'data': json.dumps({
|
||||
'description': 'Sync ftrack to Clockify'
|
||||
})
|
||||
})
|
||||
session.commit()
|
||||
|
||||
project_entity = entities[0]
|
||||
if project_entity.entity_type.lower() != "project":
|
||||
project_entity = self.get_project_from_entity(project_entity)
|
||||
|
||||
project_name = project_entity["full_name"]
|
||||
self.log.info(
|
||||
"Synchronization of project \"{}\" to clockify begins.".format(
|
||||
project_name
|
||||
)
|
||||
)
|
||||
task_types = (
|
||||
project_entity["project_schema"]["_task_type_schema"]["types"]
|
||||
)
|
||||
task_type_names = [
|
||||
task_type["name"] for task_type in task_types
|
||||
]
|
||||
try:
|
||||
clockify_projects = self.clockify_api.get_projects()
|
||||
if project_name not in clockify_projects:
|
||||
response = self.clockify_api.add_project(project_name)
|
||||
if "id" not in response:
|
||||
self.log.warning(
|
||||
"Project \"{}\" can't be created. Response: {}".format(
|
||||
project_name, response
|
||||
)
|
||||
)
|
||||
return {
|
||||
"success": False,
|
||||
"message": (
|
||||
"Can't create clockify project \"{}\"."
|
||||
" Unexpected error."
|
||||
).format(project_name)
|
||||
}
|
||||
|
||||
clockify_workspace_tags = self.clockify_api.get_tags()
|
||||
for task_type_name in task_type_names:
|
||||
if task_type_name in clockify_workspace_tags:
|
||||
self.log.debug(
|
||||
"Task \"{}\" already exist".format(task_type_name)
|
||||
)
|
||||
continue
|
||||
|
||||
response = self.clockify_api.add_tag(task_type_name)
|
||||
if "id" not in response:
|
||||
self.log.warning(
|
||||
"Task \"{}\" can't be created. Response: {}".format(
|
||||
task_type_name, response
|
||||
)
|
||||
)
|
||||
|
||||
job["status"] = "done"
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
finally:
|
||||
if job["status"] != "done":
|
||||
job["status"] = "failed"
|
||||
session.commit()
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def register(session, **kw):
|
||||
SyncClockifyLocal(session).register()
|
||||
|
|
@ -1,49 +0,0 @@
|
|||
import ayon_api
|
||||
|
||||
from ayon_clockify.clockify_api import ClockifyAPI
|
||||
|
||||
from ayon_core.pipeline import LauncherAction
|
||||
|
||||
|
||||
class ClockifyStart(LauncherAction):
|
||||
name = "clockify_start_timer"
|
||||
label = "Clockify - Start Timer"
|
||||
icon = "app_icons/clockify.png"
|
||||
order = 500
|
||||
clockify_api = ClockifyAPI()
|
||||
|
||||
def is_compatible(self, selection):
|
||||
"""Return whether the action is compatible with the session"""
|
||||
return selection.is_task_selected
|
||||
|
||||
def process(self, selection, **kwargs):
|
||||
self.clockify_api.set_api()
|
||||
user_id = self.clockify_api.user_id
|
||||
workspace_id = self.clockify_api.workspace_id
|
||||
project_name = selection.project_name
|
||||
folder_path = selection.folder_path
|
||||
task_name = selection.task_name
|
||||
description = "/".join([folder_path.lstrip("/"), task_name])
|
||||
|
||||
# fetch folder entity
|
||||
folder_entity = ayon_api.get_folder_by_path(project_name, folder_path)
|
||||
task_entity = ayon_api.get_task_by_name(
|
||||
project_name, folder_entity["id"], task_name
|
||||
)
|
||||
|
||||
# get task type to fill the timer tag
|
||||
task_type = task_entity["taskType"]
|
||||
|
||||
project_id = self.clockify_api.get_project_id(
|
||||
project_name, workspace_id
|
||||
)
|
||||
tag_ids = []
|
||||
tag_name = task_type
|
||||
tag_ids.append(self.clockify_api.get_tag_id(tag_name, workspace_id))
|
||||
self.clockify_api.start_time_entry(
|
||||
description,
|
||||
project_id,
|
||||
tag_ids=tag_ids,
|
||||
workspace_id=workspace_id,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
|
@ -1,81 +0,0 @@
|
|||
import ayon_api
|
||||
|
||||
from ayon_clockify.clockify_api import ClockifyAPI
|
||||
from ayon_core.pipeline import LauncherAction
|
||||
|
||||
|
||||
class ClockifyPermissionsCheckFailed(Exception):
|
||||
"""Timer start failed due to user permissions check.
|
||||
Message should be self explanatory as traceback won't be shown.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ClockifySync(LauncherAction):
|
||||
name = "sync_to_clockify"
|
||||
label = "Sync to Clockify"
|
||||
icon = "app_icons/clockify-white.png"
|
||||
order = 500
|
||||
clockify_api = ClockifyAPI()
|
||||
|
||||
def is_compatible(self, selection):
|
||||
"""Check if there's some projects to sync"""
|
||||
if selection.is_project_selected:
|
||||
return True
|
||||
|
||||
try:
|
||||
next(ayon_api.get_projects())
|
||||
return True
|
||||
except StopIteration:
|
||||
return False
|
||||
|
||||
def process(self, selection, **kwargs):
|
||||
self.clockify_api.set_api()
|
||||
workspace_id = self.clockify_api.workspace_id
|
||||
user_id = self.clockify_api.user_id
|
||||
if not self.clockify_api.validate_workspace_permissions(
|
||||
workspace_id, user_id
|
||||
):
|
||||
raise ClockifyPermissionsCheckFailed(
|
||||
"Current CLockify user is missing permissions for this action!"
|
||||
)
|
||||
|
||||
if selection.is_project_selected:
|
||||
projects_to_sync = [selection.project_entity]
|
||||
else:
|
||||
projects_to_sync = ayon_api.get_projects()
|
||||
|
||||
projects_info = {
|
||||
project["name"]: {
|
||||
task_type["name"]
|
||||
for task_type in project["taskTypes"]
|
||||
}
|
||||
for project in projects_to_sync
|
||||
}
|
||||
|
||||
clockify_projects = self.clockify_api.get_projects(workspace_id)
|
||||
for project_name, task_types in projects_info.items():
|
||||
if project_name in clockify_projects:
|
||||
continue
|
||||
|
||||
response = self.clockify_api.add_project(
|
||||
project_name, workspace_id
|
||||
)
|
||||
if "id" not in response:
|
||||
self.log.error(
|
||||
"Project {} can't be created".format(project_name)
|
||||
)
|
||||
continue
|
||||
|
||||
clockify_workspace_tags = self.clockify_api.get_tags(workspace_id)
|
||||
for task_type in task_types:
|
||||
if task_type not in clockify_workspace_tags:
|
||||
response = self.clockify_api.add_tag(
|
||||
task_type, workspace_id
|
||||
)
|
||||
if "id" not in response:
|
||||
self.log.error(
|
||||
"Task {} can't be created".format(task_type)
|
||||
)
|
||||
continue
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON addon 'clockify' version."""
|
||||
__version__ = "0.2.1"
|
||||
|
|
@ -1,207 +0,0 @@
|
|||
from qtpy import QtCore, QtGui, QtWidgets
|
||||
from ayon_core import resources, style
|
||||
|
||||
|
||||
class MessageWidget(QtWidgets.QWidget):
|
||||
|
||||
SIZE_W = 300
|
||||
SIZE_H = 130
|
||||
|
||||
closed = QtCore.Signal()
|
||||
|
||||
def __init__(self, messages, title):
|
||||
super(MessageWidget, self).__init__()
|
||||
|
||||
# Icon
|
||||
icon = QtGui.QIcon(resources.get_ayon_icon_filepath())
|
||||
self.setWindowIcon(icon)
|
||||
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.WindowCloseButtonHint |
|
||||
QtCore.Qt.WindowMinimizeButtonHint
|
||||
)
|
||||
|
||||
# Size setting
|
||||
self.resize(self.SIZE_W, self.SIZE_H)
|
||||
self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H))
|
||||
self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100))
|
||||
|
||||
# Style
|
||||
self.setStyleSheet(style.load_stylesheet())
|
||||
|
||||
self.setLayout(self._ui_layout(messages))
|
||||
self.setWindowTitle(title)
|
||||
|
||||
def _ui_layout(self, messages):
|
||||
if not messages:
|
||||
messages = ["*Missing messages (This is a bug)*", ]
|
||||
|
||||
elif not isinstance(messages, (tuple, list)):
|
||||
messages = [messages, ]
|
||||
|
||||
main_layout = QtWidgets.QVBoxLayout(self)
|
||||
|
||||
labels = []
|
||||
for message in messages:
|
||||
label = QtWidgets.QLabel(message)
|
||||
label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
|
||||
label.setTextFormat(QtCore.Qt.RichText)
|
||||
label.setWordWrap(True)
|
||||
|
||||
labels.append(label)
|
||||
main_layout.addWidget(label)
|
||||
|
||||
btn_close = QtWidgets.QPushButton("Close")
|
||||
btn_close.setToolTip('Close this window')
|
||||
btn_close.clicked.connect(self.on_close_clicked)
|
||||
|
||||
btn_group = QtWidgets.QHBoxLayout()
|
||||
btn_group.addStretch(1)
|
||||
btn_group.addWidget(btn_close)
|
||||
|
||||
main_layout.addLayout(btn_group)
|
||||
|
||||
self.labels = labels
|
||||
self.btn_group = btn_group
|
||||
self.btn_close = btn_close
|
||||
self.main_layout = main_layout
|
||||
|
||||
return main_layout
|
||||
|
||||
def on_close_clicked(self):
|
||||
self.close()
|
||||
|
||||
def close(self, *args, **kwargs):
|
||||
self.closed.emit()
|
||||
super(MessageWidget, self).close(*args, **kwargs)
|
||||
|
||||
|
||||
class ClockifySettings(QtWidgets.QWidget):
|
||||
SIZE_W = 500
|
||||
SIZE_H = 130
|
||||
|
||||
loginSignal = QtCore.Signal(object, object, object)
|
||||
|
||||
def __init__(self, clockify_api, optional=True):
|
||||
super(ClockifySettings, self).__init__()
|
||||
|
||||
self.clockify_api = clockify_api
|
||||
self.optional = optional
|
||||
self.validated = False
|
||||
|
||||
# Icon
|
||||
icon = QtGui.QIcon(resources.get_ayon_icon_filepath())
|
||||
self.setWindowIcon(icon)
|
||||
|
||||
self.setWindowTitle("Clockify settings")
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.WindowCloseButtonHint |
|
||||
QtCore.Qt.WindowMinimizeButtonHint
|
||||
)
|
||||
|
||||
# Size setting
|
||||
self.resize(self.SIZE_W, self.SIZE_H)
|
||||
self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H))
|
||||
self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100))
|
||||
self.setStyleSheet(style.load_stylesheet())
|
||||
|
||||
self._ui_init()
|
||||
|
||||
def _ui_init(self):
|
||||
label_api_key = QtWidgets.QLabel("Clockify API key:")
|
||||
|
||||
input_api_key = QtWidgets.QLineEdit()
|
||||
input_api_key.setFrame(True)
|
||||
input_api_key.setPlaceholderText("e.g. XX1XxXX2x3x4xXxx")
|
||||
|
||||
error_label = QtWidgets.QLabel("")
|
||||
error_label.setTextFormat(QtCore.Qt.RichText)
|
||||
error_label.setWordWrap(True)
|
||||
error_label.hide()
|
||||
|
||||
form_layout = QtWidgets.QFormLayout()
|
||||
form_layout.setContentsMargins(10, 15, 10, 5)
|
||||
form_layout.addRow(label_api_key, input_api_key)
|
||||
form_layout.addRow(error_label)
|
||||
|
||||
btn_ok = QtWidgets.QPushButton("Ok")
|
||||
btn_ok.setToolTip('Sets Clockify API Key so can Start/Stop timer')
|
||||
|
||||
btn_cancel = QtWidgets.QPushButton("Cancel")
|
||||
cancel_tooltip = 'Application won\'t start'
|
||||
if self.optional:
|
||||
cancel_tooltip = 'Close this window'
|
||||
btn_cancel.setToolTip(cancel_tooltip)
|
||||
|
||||
btn_group = QtWidgets.QHBoxLayout()
|
||||
btn_group.addStretch(1)
|
||||
btn_group.addWidget(btn_ok)
|
||||
btn_group.addWidget(btn_cancel)
|
||||
|
||||
main_layout = QtWidgets.QVBoxLayout(self)
|
||||
main_layout.addLayout(form_layout)
|
||||
main_layout.addLayout(btn_group)
|
||||
|
||||
btn_ok.clicked.connect(self.click_ok)
|
||||
btn_cancel.clicked.connect(self._close_widget)
|
||||
|
||||
self.label_api_key = label_api_key
|
||||
self.input_api_key = input_api_key
|
||||
self.error_label = error_label
|
||||
|
||||
self.btn_ok = btn_ok
|
||||
self.btn_cancel = btn_cancel
|
||||
|
||||
def setError(self, msg):
|
||||
self.error_label.setText(msg)
|
||||
self.error_label.show()
|
||||
|
||||
def invalid_input(self, entity):
|
||||
entity.setStyleSheet("border: 1px solid red;")
|
||||
|
||||
def click_ok(self):
|
||||
api_key = self.input_api_key.text().strip()
|
||||
if self.optional is True and api_key == '':
|
||||
self.clockify_api.save_api_key(None)
|
||||
self.clockify_api.set_api(api_key)
|
||||
self.validated = False
|
||||
self._close_widget()
|
||||
return
|
||||
|
||||
validation = self.clockify_api.validate_api_key(api_key)
|
||||
|
||||
if validation:
|
||||
self.clockify_api.save_api_key(api_key)
|
||||
self.clockify_api.set_api(api_key)
|
||||
self.validated = True
|
||||
self._close_widget()
|
||||
else:
|
||||
self.invalid_input(self.input_api_key)
|
||||
self.validated = False
|
||||
self.setError(
|
||||
"Entered invalid API key"
|
||||
)
|
||||
|
||||
def showEvent(self, event):
|
||||
super(ClockifySettings, self).showEvent(event)
|
||||
|
||||
# Make btns same width
|
||||
max_width = max(
|
||||
self.btn_ok.sizeHint().width(),
|
||||
self.btn_cancel.sizeHint().width()
|
||||
)
|
||||
self.btn_ok.setMinimumWidth(max_width)
|
||||
self.btn_cancel.setMinimumWidth(max_width)
|
||||
|
||||
def closeEvent(self, event):
|
||||
if self.optional is True:
|
||||
event.ignore()
|
||||
self._close_widget()
|
||||
else:
|
||||
self.validated = False
|
||||
|
||||
def _close_widget(self):
|
||||
if self.optional is True:
|
||||
self.hide()
|
||||
else:
|
||||
self.close()
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
name = "clockify"
|
||||
title = "Clockify"
|
||||
version = "0.2.1"
|
||||
client_dir = "ayon_clockify"
|
||||
|
||||
ayon_required_addons = {
|
||||
"core": ">0.3.2",
|
||||
}
|
||||
ayon_compatible_addons = {}
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
from typing import Type
|
||||
|
||||
from ayon_server.addons import BaseServerAddon
|
||||
|
||||
from .settings import ClockifySettings
|
||||
|
||||
|
||||
class ClockifyAddon(BaseServerAddon):
|
||||
settings_model: Type[ClockifySettings] = ClockifySettings
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
from ayon_server.settings import BaseSettingsModel, SettingsField
|
||||
|
||||
|
||||
class ClockifySettings(BaseSettingsModel):
|
||||
workspace_name: str = SettingsField(
|
||||
"",
|
||||
title="Workspace name",
|
||||
scope=["studio"]
|
||||
)
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
from .version import __version__
|
||||
from .addon import (
|
||||
FLAME_ADDON_ROOT,
|
||||
FlameAddon,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"__version__",
|
||||
|
||||
"FLAME_ADDON_ROOT",
|
||||
"FlameAddon",
|
||||
)
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
import os
|
||||
from ayon_core.addon import AYONAddon, IHostAddon
|
||||
|
||||
from .version import __version__
|
||||
|
||||
FLAME_ADDON_ROOT = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
class FlameAddon(AYONAddon, IHostAddon):
|
||||
name = "flame"
|
||||
version = __version__
|
||||
host_name = "flame"
|
||||
|
||||
def add_implementation_envs(self, env, _app):
|
||||
# Add requirements to DL_PYTHON_HOOK_PATH
|
||||
env["DL_PYTHON_HOOK_PATH"] = os.path.join(FLAME_ADDON_ROOT, "startup")
|
||||
env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None)
|
||||
|
||||
# Set default values if are not already set via settings
|
||||
defaults = {
|
||||
"LOGLEVEL": "DEBUG"
|
||||
}
|
||||
for key, value in defaults.items():
|
||||
if not env.get(key):
|
||||
env[key] = value
|
||||
|
||||
def get_launch_hook_paths(self, app):
|
||||
if app.host_name != self.host_name:
|
||||
return []
|
||||
return [
|
||||
os.path.join(FLAME_ADDON_ROOT, "hooks")
|
||||
]
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return [".otoc"]
|
||||
|
|
@ -1,159 +0,0 @@
|
|||
"""
|
||||
AYON Autodesk Flame api
|
||||
"""
|
||||
from .constants import (
|
||||
COLOR_MAP,
|
||||
MARKER_NAME,
|
||||
MARKER_COLOR,
|
||||
MARKER_DURATION,
|
||||
MARKER_PUBLISH_DEFAULT
|
||||
)
|
||||
from .lib import (
|
||||
CTX,
|
||||
FlameAppFramework,
|
||||
get_current_project,
|
||||
get_current_sequence,
|
||||
create_segment_data_marker,
|
||||
get_segment_data_marker,
|
||||
set_segment_data_marker,
|
||||
set_publish_attribute,
|
||||
get_publish_attribute,
|
||||
get_sequence_segments,
|
||||
maintained_segment_selection,
|
||||
reset_segment_selection,
|
||||
get_segment_attributes,
|
||||
get_clips_in_reels,
|
||||
get_reformatted_filename,
|
||||
get_frame_from_filename,
|
||||
get_padding_from_filename,
|
||||
maintained_object_duplication,
|
||||
maintained_temp_file_path,
|
||||
get_clip_segment,
|
||||
get_batch_group_from_desktop,
|
||||
MediaInfoFile,
|
||||
TimeEffectMetadata
|
||||
)
|
||||
from .utils import (
|
||||
setup,
|
||||
get_flame_version,
|
||||
get_flame_install_root
|
||||
)
|
||||
from .pipeline import (
|
||||
install,
|
||||
uninstall,
|
||||
ls,
|
||||
containerise,
|
||||
update_container,
|
||||
remove_instance,
|
||||
list_instances,
|
||||
imprint,
|
||||
maintained_selection
|
||||
)
|
||||
from .menu import (
|
||||
FlameMenuProjectConnect,
|
||||
FlameMenuTimeline,
|
||||
FlameMenuUniversal
|
||||
)
|
||||
from .plugin import (
|
||||
Creator,
|
||||
PublishableClip,
|
||||
ClipLoader,
|
||||
OpenClipSolver
|
||||
)
|
||||
from .workio import (
|
||||
open_file,
|
||||
save_file,
|
||||
current_file,
|
||||
has_unsaved_changes,
|
||||
file_extensions,
|
||||
work_root
|
||||
)
|
||||
from .render_utils import (
|
||||
export_clip,
|
||||
get_preset_path_by_xml_name,
|
||||
modify_preset_file
|
||||
)
|
||||
from .batch_utils import (
|
||||
create_batch_group,
|
||||
create_batch_group_conent
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# constants
|
||||
"COLOR_MAP",
|
||||
"MARKER_NAME",
|
||||
"MARKER_COLOR",
|
||||
"MARKER_DURATION",
|
||||
"MARKER_PUBLISH_DEFAULT",
|
||||
|
||||
# lib
|
||||
"CTX",
|
||||
"FlameAppFramework",
|
||||
"get_current_project",
|
||||
"get_current_sequence",
|
||||
"create_segment_data_marker",
|
||||
"get_segment_data_marker",
|
||||
"set_segment_data_marker",
|
||||
"set_publish_attribute",
|
||||
"get_publish_attribute",
|
||||
"get_sequence_segments",
|
||||
"maintained_segment_selection",
|
||||
"reset_segment_selection",
|
||||
"get_segment_attributes",
|
||||
"get_clips_in_reels",
|
||||
"get_reformatted_filename",
|
||||
"get_frame_from_filename",
|
||||
"get_padding_from_filename",
|
||||
"maintained_object_duplication",
|
||||
"maintained_temp_file_path",
|
||||
"get_clip_segment",
|
||||
"get_batch_group_from_desktop",
|
||||
"MediaInfoFile",
|
||||
"TimeEffectMetadata",
|
||||
|
||||
# pipeline
|
||||
"install",
|
||||
"uninstall",
|
||||
"ls",
|
||||
"containerise",
|
||||
"update_container",
|
||||
"reload_pipeline",
|
||||
"maintained_selection",
|
||||
"remove_instance",
|
||||
"list_instances",
|
||||
"imprint",
|
||||
"maintained_selection",
|
||||
|
||||
# utils
|
||||
"setup",
|
||||
"get_flame_version",
|
||||
"get_flame_install_root",
|
||||
|
||||
# menu
|
||||
"FlameMenuProjectConnect",
|
||||
"FlameMenuTimeline",
|
||||
"FlameMenuUniversal",
|
||||
|
||||
# plugin
|
||||
"Creator",
|
||||
"PublishableClip",
|
||||
"ClipLoader",
|
||||
"OpenClipSolver",
|
||||
|
||||
# workio
|
||||
"open_file",
|
||||
"save_file",
|
||||
"current_file",
|
||||
"has_unsaved_changes",
|
||||
"file_extensions",
|
||||
"work_root",
|
||||
|
||||
# render utils
|
||||
"export_clip",
|
||||
"get_preset_path_by_xml_name",
|
||||
"modify_preset_file",
|
||||
|
||||
# batch utils
|
||||
"create_batch_group",
|
||||
"create_batch_group_conent"
|
||||
]
|
||||
|
|
@ -1,151 +0,0 @@
|
|||
import flame
|
||||
|
||||
|
||||
def create_batch_group(
|
||||
name,
|
||||
frame_start,
|
||||
frame_duration,
|
||||
update_batch_group=None,
|
||||
**kwargs
|
||||
):
|
||||
"""Create Batch Group in active project's Desktop
|
||||
|
||||
Args:
|
||||
name (str): name of batch group to be created
|
||||
frame_start (int): start frame of batch
|
||||
frame_end (int): end frame of batch
|
||||
update_batch_group (PyBatch)[optional]: batch group to update
|
||||
|
||||
Return:
|
||||
PyBatch: active flame batch group
|
||||
"""
|
||||
# make sure some batch obj is present
|
||||
batch_group = update_batch_group or flame.batch
|
||||
|
||||
schematic_reels = kwargs.get("shematic_reels") or ['LoadedReel1']
|
||||
shelf_reels = kwargs.get("shelf_reels") or ['ShelfReel1']
|
||||
|
||||
handle_start = kwargs.get("handleStart") or 0
|
||||
handle_end = kwargs.get("handleEnd") or 0
|
||||
|
||||
frame_start -= handle_start
|
||||
frame_duration += handle_start + handle_end
|
||||
|
||||
if not update_batch_group:
|
||||
# Create batch group with name, start_frame value, duration value,
|
||||
# set of schematic reel names, set of shelf reel names
|
||||
batch_group = batch_group.create_batch_group(
|
||||
name,
|
||||
start_frame=frame_start,
|
||||
duration=frame_duration,
|
||||
reels=schematic_reels,
|
||||
shelf_reels=shelf_reels
|
||||
)
|
||||
else:
|
||||
batch_group.name = name
|
||||
batch_group.start_frame = frame_start
|
||||
batch_group.duration = frame_duration
|
||||
|
||||
# add reels to batch group
|
||||
_add_reels_to_batch_group(
|
||||
batch_group, schematic_reels, shelf_reels)
|
||||
|
||||
# TODO: also update write node if there is any
|
||||
# TODO: also update loaders to start from correct frameStart
|
||||
|
||||
if kwargs.get("switch_batch_tab"):
|
||||
# use this command to switch to the batch tab
|
||||
batch_group.go_to()
|
||||
|
||||
return batch_group
|
||||
|
||||
|
||||
def _add_reels_to_batch_group(batch_group, reels, shelf_reels):
|
||||
# update or create defined reels
|
||||
# helper variables
|
||||
reel_names = [
|
||||
r.name.get_value()
|
||||
for r in batch_group.reels
|
||||
]
|
||||
shelf_reel_names = [
|
||||
r.name.get_value()
|
||||
for r in batch_group.shelf_reels
|
||||
]
|
||||
# add schematic reels
|
||||
for _r in reels:
|
||||
if _r in reel_names:
|
||||
continue
|
||||
batch_group.create_reel(_r)
|
||||
|
||||
# add shelf reels
|
||||
for _sr in shelf_reels:
|
||||
if _sr in shelf_reel_names:
|
||||
continue
|
||||
batch_group.create_shelf_reel(_sr)
|
||||
|
||||
|
||||
def create_batch_group_conent(batch_nodes, batch_links, batch_group=None):
|
||||
"""Creating batch group with links
|
||||
|
||||
Args:
|
||||
batch_nodes (list of dict): each dict is node definition
|
||||
batch_links (list of dict): each dict is link definition
|
||||
batch_group (PyBatch, optional): batch group. Defaults to None.
|
||||
|
||||
Return:
|
||||
dict: all batch nodes {name or id: PyNode}
|
||||
"""
|
||||
# make sure some batch obj is present
|
||||
batch_group = batch_group or flame.batch
|
||||
all_batch_nodes = {
|
||||
b.name.get_value(): b
|
||||
for b in batch_group.nodes
|
||||
}
|
||||
for node in batch_nodes:
|
||||
# NOTE: node_props needs to be ideally OrederDict type
|
||||
node_id, node_type, node_props = (
|
||||
node["id"], node["type"], node["properties"])
|
||||
|
||||
# get node name for checking if exists
|
||||
node_name = node_props.pop("name", None) or node_id
|
||||
|
||||
if all_batch_nodes.get(node_name):
|
||||
# update existing batch node
|
||||
batch_node = all_batch_nodes[node_name]
|
||||
else:
|
||||
# create new batch node
|
||||
batch_node = batch_group.create_node(node_type)
|
||||
|
||||
# set name
|
||||
batch_node.name.set_value(node_name)
|
||||
|
||||
# set attributes found in node props
|
||||
for key, value in node_props.items():
|
||||
if not hasattr(batch_node, key):
|
||||
continue
|
||||
setattr(batch_node, key, value)
|
||||
|
||||
# add created node for possible linking
|
||||
all_batch_nodes[node_id] = batch_node
|
||||
|
||||
# link nodes to each other
|
||||
for link in batch_links:
|
||||
_from_n, _to_n = link["from_node"], link["to_node"]
|
||||
|
||||
# check if all linking nodes are available
|
||||
if not all([
|
||||
all_batch_nodes.get(_from_n["id"]),
|
||||
all_batch_nodes.get(_to_n["id"])
|
||||
]):
|
||||
continue
|
||||
|
||||
# link nodes in defined link
|
||||
batch_group.connect_nodes(
|
||||
all_batch_nodes[_from_n["id"]], _from_n["connector"],
|
||||
all_batch_nodes[_to_n["id"]], _to_n["connector"]
|
||||
)
|
||||
|
||||
# sort batch nodes
|
||||
batch_group.organize()
|
||||
|
||||
return all_batch_nodes
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
|
||||
"""
|
||||
AYON Flame api constances
|
||||
"""
|
||||
# AYON marker workflow variables
|
||||
MARKER_NAME = "OpenPypeData"
|
||||
MARKER_DURATION = 0
|
||||
MARKER_COLOR = "cyan"
|
||||
MARKER_PUBLISH_DEFAULT = False
|
||||
|
||||
# AYON color definitions
|
||||
COLOR_MAP = {
|
||||
"red": (1.0, 0.0, 0.0),
|
||||
"orange": (1.0, 0.5, 0.0),
|
||||
"yellow": (1.0, 1.0, 0.0),
|
||||
"pink": (1.0, 0.5, 1.0),
|
||||
"white": (1.0, 1.0, 1.0),
|
||||
"green": (0.0, 1.0, 0.0),
|
||||
"cyan": (0.0, 1.0, 1.0),
|
||||
"blue": (0.0, 0.0, 1.0),
|
||||
"purple": (0.5, 0.0, 0.5),
|
||||
"magenta": (0.5, 0.0, 1.0),
|
||||
"black": (0.0, 0.0, 0.0)
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,256 +0,0 @@
|
|||
from copy import deepcopy
|
||||
from pprint import pformat
|
||||
|
||||
from qtpy import QtWidgets
|
||||
|
||||
from ayon_core.pipeline import get_current_project_name
|
||||
from ayon_core.tools.utils.host_tools import HostToolsHelper
|
||||
|
||||
menu_group_name = 'OpenPype'
|
||||
|
||||
default_flame_export_presets = {
|
||||
'Publish': {
|
||||
'PresetVisibility': 2,
|
||||
'PresetType': 0,
|
||||
'PresetFile': 'OpenEXR/OpenEXR (16-bit fp PIZ).xml'
|
||||
},
|
||||
'Preview': {
|
||||
'PresetVisibility': 3,
|
||||
'PresetType': 2,
|
||||
'PresetFile': 'Generate Preview.xml'
|
||||
},
|
||||
'Thumbnail': {
|
||||
'PresetVisibility': 3,
|
||||
'PresetType': 0,
|
||||
'PresetFile': 'Generate Thumbnail.xml'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def callback_selection(selection, function):
|
||||
import ayon_flame.api as opfapi
|
||||
opfapi.CTX.selection = selection
|
||||
print("Hook Selection: \n\t{}".format(
|
||||
pformat({
|
||||
index: (type(item), item.name)
|
||||
for index, item in enumerate(opfapi.CTX.selection)})
|
||||
))
|
||||
function()
|
||||
|
||||
|
||||
class _FlameMenuApp(object):
|
||||
def __init__(self, framework):
|
||||
self.name = self.__class__.__name__
|
||||
self.framework = framework
|
||||
self.log = framework.log
|
||||
self.menu_group_name = menu_group_name
|
||||
self.dynamic_menu_data = {}
|
||||
|
||||
# flame module is only available when a
|
||||
# flame project is loaded and initialized
|
||||
self.flame = None
|
||||
try:
|
||||
import flame
|
||||
self.flame = flame
|
||||
except ImportError:
|
||||
self.flame = None
|
||||
|
||||
self.flame_project_name = flame.project.current_project.name
|
||||
self.prefs = self.framework.prefs_dict(self.framework.prefs, self.name)
|
||||
self.prefs_user = self.framework.prefs_dict(
|
||||
self.framework.prefs_user, self.name)
|
||||
self.prefs_global = self.framework.prefs_dict(
|
||||
self.framework.prefs_global, self.name)
|
||||
|
||||
self.mbox = QtWidgets.QMessageBox()
|
||||
project_name = get_current_project_name()
|
||||
self.menu = {
|
||||
"actions": [{
|
||||
'name': project_name or "project",
|
||||
'isEnabled': False
|
||||
}],
|
||||
"name": self.menu_group_name
|
||||
}
|
||||
self.tools_helper = HostToolsHelper()
|
||||
|
||||
def __getattr__(self, name):
|
||||
def method(*args, **kwargs):
|
||||
print('calling %s' % name)
|
||||
return method
|
||||
|
||||
def rescan(self, *args, **kwargs):
|
||||
if not self.flame:
|
||||
try:
|
||||
import flame
|
||||
self.flame = flame
|
||||
except ImportError:
|
||||
self.flame = None
|
||||
|
||||
if self.flame:
|
||||
self.flame.execute_shortcut('Rescan Python Hooks')
|
||||
self.log.info('Rescan Python Hooks')
|
||||
|
||||
|
||||
class FlameMenuProjectConnect(_FlameMenuApp):
|
||||
|
||||
# flameMenuProjectconnect app takes care of the preferences dialog as well
|
||||
|
||||
def __init__(self, framework):
|
||||
_FlameMenuApp.__init__(self, framework)
|
||||
|
||||
def __getattr__(self, name):
|
||||
def method(*args, **kwargs):
|
||||
project = self.dynamic_menu_data.get(name)
|
||||
if project:
|
||||
self.link_project(project)
|
||||
return method
|
||||
|
||||
def build_menu(self):
|
||||
if not self.flame:
|
||||
return []
|
||||
|
||||
menu = deepcopy(self.menu)
|
||||
|
||||
menu['actions'].append({
|
||||
"name": "Workfiles...",
|
||||
"execute": lambda x: self.tools_helper.show_workfiles()
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Load...",
|
||||
"execute": lambda x: self.tools_helper.show_loader()
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Manage...",
|
||||
"execute": lambda x: self.tools_helper.show_scene_inventory()
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Library...",
|
||||
"execute": lambda x: self.tools_helper.show_library_loader()
|
||||
})
|
||||
return menu
|
||||
|
||||
def refresh(self, *args, **kwargs):
|
||||
self.rescan()
|
||||
|
||||
def rescan(self, *args, **kwargs):
|
||||
if not self.flame:
|
||||
try:
|
||||
import flame
|
||||
self.flame = flame
|
||||
except ImportError:
|
||||
self.flame = None
|
||||
|
||||
if self.flame:
|
||||
self.flame.execute_shortcut('Rescan Python Hooks')
|
||||
self.log.info('Rescan Python Hooks')
|
||||
|
||||
|
||||
class FlameMenuTimeline(_FlameMenuApp):
|
||||
|
||||
# flameMenuProjectconnect app takes care of the preferences dialog as well
|
||||
|
||||
def __init__(self, framework):
|
||||
_FlameMenuApp.__init__(self, framework)
|
||||
|
||||
def __getattr__(self, name):
|
||||
def method(*args, **kwargs):
|
||||
project = self.dynamic_menu_data.get(name)
|
||||
if project:
|
||||
self.link_project(project)
|
||||
return method
|
||||
|
||||
def build_menu(self):
|
||||
if not self.flame:
|
||||
return []
|
||||
|
||||
menu = deepcopy(self.menu)
|
||||
|
||||
menu['actions'].append({
|
||||
"name": "Create...",
|
||||
"execute": lambda x: callback_selection(
|
||||
x, self.tools_helper.show_creator)
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Publish...",
|
||||
"execute": lambda x: callback_selection(
|
||||
x, self.tools_helper.show_publish)
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Load...",
|
||||
"execute": lambda x: self.tools_helper.show_loader()
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Manage...",
|
||||
"execute": lambda x: self.tools_helper.show_scene_inventory()
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Library...",
|
||||
"execute": lambda x: self.tools_helper.show_library_loader()
|
||||
})
|
||||
return menu
|
||||
|
||||
def refresh(self, *args, **kwargs):
|
||||
self.rescan()
|
||||
|
||||
def rescan(self, *args, **kwargs):
|
||||
if not self.flame:
|
||||
try:
|
||||
import flame
|
||||
self.flame = flame
|
||||
except ImportError:
|
||||
self.flame = None
|
||||
|
||||
if self.flame:
|
||||
self.flame.execute_shortcut('Rescan Python Hooks')
|
||||
self.log.info('Rescan Python Hooks')
|
||||
|
||||
|
||||
class FlameMenuUniversal(_FlameMenuApp):
|
||||
|
||||
# flameMenuProjectconnect app takes care of the preferences dialog as well
|
||||
|
||||
def __init__(self, framework):
|
||||
_FlameMenuApp.__init__(self, framework)
|
||||
|
||||
def __getattr__(self, name):
|
||||
def method(*args, **kwargs):
|
||||
project = self.dynamic_menu_data.get(name)
|
||||
if project:
|
||||
self.link_project(project)
|
||||
return method
|
||||
|
||||
def build_menu(self):
|
||||
if not self.flame:
|
||||
return []
|
||||
|
||||
menu = deepcopy(self.menu)
|
||||
|
||||
menu['actions'].append({
|
||||
"name": "Load...",
|
||||
"execute": lambda x: callback_selection(
|
||||
x, self.tools_helper.show_loader)
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Manage...",
|
||||
"execute": lambda x: self.tools_helper.show_scene_inventory()
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Library...",
|
||||
"execute": lambda x: self.tools_helper.show_library_loader()
|
||||
})
|
||||
return menu
|
||||
|
||||
def refresh(self, *args, **kwargs):
|
||||
self.rescan()
|
||||
|
||||
def rescan(self, *args, **kwargs):
|
||||
if not self.flame:
|
||||
try:
|
||||
import flame
|
||||
self.flame = flame
|
||||
except ImportError:
|
||||
self.flame = None
|
||||
|
||||
if self.flame:
|
||||
self.flame.execute_shortcut('Rescan Python Hooks')
|
||||
self.log.info('Rescan Python Hooks')
|
||||
|
|
@ -1,174 +0,0 @@
|
|||
"""
|
||||
Basic avalon integration
|
||||
"""
|
||||
import os
|
||||
import contextlib
|
||||
from pyblish import api as pyblish
|
||||
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.pipeline import (
|
||||
register_loader_plugin_path,
|
||||
register_creator_plugin_path,
|
||||
deregister_loader_plugin_path,
|
||||
deregister_creator_plugin_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
)
|
||||
from ayon_flame import FLAME_ADDON_ROOT
|
||||
from .lib import (
|
||||
set_segment_data_marker,
|
||||
set_publish_attribute,
|
||||
maintained_segment_selection,
|
||||
get_current_sequence,
|
||||
reset_segment_selection
|
||||
)
|
||||
|
||||
PLUGINS_DIR = os.path.join(FLAME_ADDON_ROOT, "plugins")
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
||||
|
||||
AVALON_CONTAINERS = "AVALON_CONTAINERS"
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def install():
|
||||
pyblish.register_host("flame")
|
||||
pyblish.register_plugin_path(PUBLISH_PATH)
|
||||
register_loader_plugin_path(LOAD_PATH)
|
||||
register_creator_plugin_path(CREATE_PATH)
|
||||
log.info("AYON Flame plug-ins registered ...")
|
||||
|
||||
# register callback for switching publishable
|
||||
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
|
||||
|
||||
log.info("AYON Flame host installed ...")
|
||||
|
||||
|
||||
def uninstall():
|
||||
pyblish.deregister_host("flame")
|
||||
|
||||
log.info("Deregistering Flame plug-ins..")
|
||||
pyblish.deregister_plugin_path(PUBLISH_PATH)
|
||||
deregister_loader_plugin_path(LOAD_PATH)
|
||||
deregister_creator_plugin_path(CREATE_PATH)
|
||||
|
||||
# register callback for switching publishable
|
||||
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
|
||||
|
||||
log.info("AYON Flame host uninstalled ...")
|
||||
|
||||
|
||||
def containerise(flame_clip_segment,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
loader=None,
|
||||
data=None):
|
||||
|
||||
data_imprint = {
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": AVALON_CONTAINER_ID,
|
||||
"name": str(name),
|
||||
"namespace": str(namespace),
|
||||
"loader": str(loader),
|
||||
"representation": context["representation"]["id"],
|
||||
}
|
||||
|
||||
if data:
|
||||
for k, v in data.items():
|
||||
data_imprint[k] = v
|
||||
|
||||
log.debug("_ data_imprint: {}".format(data_imprint))
|
||||
|
||||
set_segment_data_marker(flame_clip_segment, data_imprint)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def ls():
|
||||
"""List available containers.
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
def parse_container(tl_segment, validate=True):
|
||||
"""Return container data from timeline_item's openpype tag.
|
||||
"""
|
||||
# TODO: parse_container
|
||||
pass
|
||||
|
||||
|
||||
def update_container(tl_segment, data=None):
|
||||
"""Update container data to input timeline_item's openpype tag.
|
||||
"""
|
||||
# TODO: update_container
|
||||
pass
|
||||
|
||||
|
||||
def on_pyblish_instance_toggled(instance, old_value, new_value):
|
||||
"""Toggle node passthrough states on instance toggles."""
|
||||
|
||||
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
|
||||
instance, old_value, new_value))
|
||||
|
||||
# # Whether instances should be passthrough based on new value
|
||||
# timeline_item = instance.data["item"]
|
||||
# set_publish_attribute(timeline_item, new_value)
|
||||
|
||||
|
||||
def remove_instance(instance):
|
||||
"""Remove instance marker from track item."""
|
||||
# TODO: remove_instance
|
||||
pass
|
||||
|
||||
|
||||
def list_instances():
|
||||
"""List all created instances from current workfile."""
|
||||
# TODO: list_instances
|
||||
pass
|
||||
|
||||
|
||||
def imprint(segment, data=None):
|
||||
"""
|
||||
Adding openpype data to Flame timeline segment.
|
||||
|
||||
Also including publish attribute into tag.
|
||||
|
||||
Arguments:
|
||||
segment (flame.PySegment)): flame api object
|
||||
data (dict): Any data which needst to be imprinted
|
||||
|
||||
Examples:
|
||||
data = {
|
||||
'asset': 'sq020sh0280',
|
||||
'productType': 'render',
|
||||
'productName': 'productMain'
|
||||
}
|
||||
"""
|
||||
data = data or {}
|
||||
|
||||
set_segment_data_marker(segment, data)
|
||||
|
||||
# add publish attribute
|
||||
set_publish_attribute(segment, True)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_selection():
|
||||
import flame
|
||||
from .lib import CTX
|
||||
|
||||
# check if segment is selected
|
||||
if isinstance(CTX.selection[0], flame.PySegment):
|
||||
sequence = get_current_sequence(CTX.selection)
|
||||
|
||||
try:
|
||||
with maintained_segment_selection(sequence) as selected:
|
||||
yield
|
||||
finally:
|
||||
# reset all selected clips
|
||||
reset_segment_selection(sequence)
|
||||
# select only original selection of segments
|
||||
for segment in selected:
|
||||
segment.selected = True
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,185 +0,0 @@
|
|||
import os
|
||||
from xml.etree import ElementTree as ET
|
||||
from ayon_core.lib import Logger
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def export_clip(export_path, clip, preset_path, **kwargs):
|
||||
"""Flame exported wrapper
|
||||
|
||||
Args:
|
||||
export_path (str): exporting directory path
|
||||
clip (PyClip): flame api object
|
||||
preset_path (str): full export path to xml file
|
||||
|
||||
Kwargs:
|
||||
thumb_frame_number (int)[optional]: source frame number
|
||||
in_mark (int)[optional]: cut in mark
|
||||
out_mark (int)[optional]: cut out mark
|
||||
|
||||
Raises:
|
||||
KeyError: Missing input kwarg `thumb_frame_number`
|
||||
in case `thumbnail` in `export_preset`
|
||||
FileExistsError: Missing export preset in shared folder
|
||||
"""
|
||||
import flame
|
||||
|
||||
in_mark = out_mark = None
|
||||
|
||||
# Set exporter
|
||||
exporter = flame.PyExporter()
|
||||
exporter.foreground = True
|
||||
exporter.export_between_marks = True
|
||||
|
||||
if kwargs.get("thumb_frame_number"):
|
||||
thumb_frame_number = kwargs["thumb_frame_number"]
|
||||
# make sure it exists in kwargs
|
||||
if not thumb_frame_number:
|
||||
raise KeyError(
|
||||
"Missing key `thumb_frame_number` in input kwargs")
|
||||
|
||||
in_mark = int(thumb_frame_number)
|
||||
out_mark = int(thumb_frame_number) + 1
|
||||
|
||||
elif kwargs.get("in_mark") and kwargs.get("out_mark"):
|
||||
in_mark = int(kwargs["in_mark"])
|
||||
out_mark = int(kwargs["out_mark"])
|
||||
else:
|
||||
exporter.export_between_marks = False
|
||||
|
||||
try:
|
||||
# set in and out marks if they are available
|
||||
if in_mark and out_mark:
|
||||
clip.in_mark = in_mark
|
||||
clip.out_mark = out_mark
|
||||
|
||||
# export with exporter
|
||||
exporter.export(clip, preset_path, export_path)
|
||||
finally:
|
||||
print('Exported: {} at {}-{}'.format(
|
||||
clip.name.get_value(),
|
||||
clip.in_mark,
|
||||
clip.out_mark
|
||||
))
|
||||
|
||||
|
||||
def get_preset_path_by_xml_name(xml_preset_name):
|
||||
def _search_path(root):
|
||||
output = []
|
||||
for root, _dirs, files in os.walk(root):
|
||||
for f in files:
|
||||
if f != xml_preset_name:
|
||||
continue
|
||||
file_path = os.path.join(root, f)
|
||||
output.append(file_path)
|
||||
return output
|
||||
|
||||
def _validate_results(results):
|
||||
if results and len(results) == 1:
|
||||
return results.pop()
|
||||
elif results and len(results) > 1:
|
||||
print((
|
||||
"More matching presets for `{}`: /n"
|
||||
"{}").format(xml_preset_name, results))
|
||||
return results.pop()
|
||||
else:
|
||||
return None
|
||||
|
||||
from .utils import (
|
||||
get_flame_install_root,
|
||||
get_flame_version
|
||||
)
|
||||
|
||||
# get actual flame version and install path
|
||||
_version = get_flame_version()["full"]
|
||||
_install_root = get_flame_install_root()
|
||||
|
||||
# search path templates
|
||||
shared_search_root = "{install_root}/shared/export/presets"
|
||||
install_search_root = (
|
||||
"{install_root}/presets/{version}/export/presets/flame")
|
||||
|
||||
# fill templates
|
||||
shared_search_root = shared_search_root.format(
|
||||
install_root=_install_root
|
||||
)
|
||||
install_search_root = install_search_root.format(
|
||||
install_root=_install_root,
|
||||
version=_version
|
||||
)
|
||||
|
||||
# get search results
|
||||
shared_results = _search_path(shared_search_root)
|
||||
installed_results = _search_path(install_search_root)
|
||||
|
||||
# first try to return shared results
|
||||
shared_preset_path = _validate_results(shared_results)
|
||||
|
||||
if shared_preset_path:
|
||||
return os.path.dirname(shared_preset_path)
|
||||
|
||||
# then try installed results
|
||||
installed_preset_path = _validate_results(installed_results)
|
||||
|
||||
if installed_preset_path:
|
||||
return os.path.dirname(installed_preset_path)
|
||||
|
||||
# if nothing found then return False
|
||||
return False
|
||||
|
||||
|
||||
def modify_preset_file(xml_path, staging_dir, data):
|
||||
"""Modify xml preset with input data
|
||||
|
||||
Args:
|
||||
xml_path (str ): path for input xml preset
|
||||
staging_dir (str): staging dir path
|
||||
data (dict): data where key is xmlTag and value as string
|
||||
|
||||
Returns:
|
||||
str: _description_
|
||||
"""
|
||||
# create temp path
|
||||
dirname, basename = os.path.split(xml_path)
|
||||
temp_path = os.path.join(staging_dir, basename)
|
||||
|
||||
# change xml following data keys
|
||||
with open(xml_path, "r") as datafile:
|
||||
_root = ET.parse(datafile)
|
||||
|
||||
for key, value in data.items():
|
||||
try:
|
||||
if "/" in key:
|
||||
if not key.startswith("./"):
|
||||
key = ".//" + key
|
||||
|
||||
split_key_path = key.split("/")
|
||||
element_key = split_key_path[-1]
|
||||
parent_obj_path = "/".join(split_key_path[:-1])
|
||||
|
||||
parent_obj = _root.find(parent_obj_path)
|
||||
element_obj = parent_obj.find(element_key)
|
||||
if not element_obj:
|
||||
append_element(parent_obj, element_key, value)
|
||||
else:
|
||||
finds = _root.findall(".//{}".format(key))
|
||||
if not finds:
|
||||
raise AttributeError
|
||||
for element in finds:
|
||||
element.text = str(value)
|
||||
except AttributeError:
|
||||
log.warning(
|
||||
"Cannot create attribute: {}: {}. Skipping".format(
|
||||
key, value
|
||||
))
|
||||
_root.write(temp_path)
|
||||
|
||||
return temp_path
|
||||
|
||||
|
||||
def append_element(root_element_obj, key, value):
|
||||
new_element_obj = ET.Element(key)
|
||||
log.debug("__ new_element_obj: {}".format(new_element_obj))
|
||||
new_element_obj.text = str(value)
|
||||
root_element_obj.insert(0, new_element_obj)
|
||||
|
|
@ -1,504 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import json
|
||||
import xml.dom.minidom as minidom
|
||||
from copy import deepcopy
|
||||
import datetime
|
||||
from libwiretapPythonClientAPI import ( # noqa
|
||||
WireTapClientInit,
|
||||
WireTapClientUninit,
|
||||
WireTapNodeHandle,
|
||||
WireTapServerHandle,
|
||||
WireTapInt,
|
||||
WireTapStr
|
||||
)
|
||||
|
||||
|
||||
class WireTapCom(object):
|
||||
"""
|
||||
Comunicator class wrapper for talking to WireTap db.
|
||||
|
||||
This way we are able to set new project with settings and
|
||||
correct colorspace policy. Also we are able to create new user
|
||||
or get actual user with similar name (users are usually cloning
|
||||
their profiles and adding date stamp into suffix).
|
||||
"""
|
||||
|
||||
def __init__(self, host_name=None, volume_name=None, group_name=None):
|
||||
"""Initialisation of WireTap communication class
|
||||
|
||||
Args:
|
||||
host_name (str, optional): Name of host server. Defaults to None.
|
||||
volume_name (str, optional): Name of volume. Defaults to None.
|
||||
group_name (str, optional): Name of user group. Defaults to None.
|
||||
"""
|
||||
# set main attributes of server
|
||||
# if there are none set the default installation
|
||||
self.host_name = host_name or "localhost"
|
||||
self.volume_name = volume_name or "stonefs"
|
||||
self.group_name = group_name or "staff"
|
||||
|
||||
# wiretap tools dir path
|
||||
self.wiretap_tools_dir = os.getenv("AYON_WIRETAP_TOOLS")
|
||||
|
||||
# initialize WireTap client
|
||||
WireTapClientInit()
|
||||
|
||||
# add the server to shared variable
|
||||
self._server = WireTapServerHandle("{}:IFFFS".format(self.host_name))
|
||||
print("WireTap connected at '{}'...".format(
|
||||
self.host_name))
|
||||
|
||||
def close(self):
|
||||
self._server = None
|
||||
WireTapClientUninit()
|
||||
print("WireTap closed...")
|
||||
|
||||
def get_launch_args(
|
||||
self, project_name, project_data, user_name, *args, **kwargs):
|
||||
"""Forming launch arguments for AYON launcher.
|
||||
|
||||
Args:
|
||||
project_name (str): name of project
|
||||
project_data (dict): Flame compatible project data
|
||||
user_name (str): name of user
|
||||
|
||||
Returns:
|
||||
list: arguments
|
||||
"""
|
||||
|
||||
workspace_name = kwargs.get("workspace_name")
|
||||
color_policy = kwargs.get("color_policy")
|
||||
|
||||
project_exists = self._project_prep(project_name)
|
||||
if not project_exists:
|
||||
self._set_project_settings(project_name, project_data)
|
||||
self._set_project_colorspace(project_name, color_policy)
|
||||
|
||||
user_name = self._user_prep(user_name)
|
||||
|
||||
if workspace_name is None:
|
||||
# default workspace
|
||||
print("Using a default workspace")
|
||||
return [
|
||||
"--start-project={}".format(project_name),
|
||||
"--start-user={}".format(user_name),
|
||||
"--create-workspace"
|
||||
]
|
||||
|
||||
else:
|
||||
print(
|
||||
"Using a custom workspace '{}'".format(workspace_name))
|
||||
|
||||
self._workspace_prep(project_name, workspace_name)
|
||||
return [
|
||||
"--start-project={}".format(project_name),
|
||||
"--start-user={}".format(user_name),
|
||||
"--create-workspace",
|
||||
"--start-workspace={}".format(workspace_name)
|
||||
]
|
||||
|
||||
def _workspace_prep(self, project_name, workspace_name):
|
||||
"""Preparing a workspace
|
||||
|
||||
In case it doesn not exists it will create one
|
||||
|
||||
Args:
|
||||
project_name (str): project name
|
||||
workspace_name (str): workspace name
|
||||
|
||||
Raises:
|
||||
AttributeError: unable to create workspace
|
||||
"""
|
||||
workspace_exists = self._child_is_in_parent_path(
|
||||
"/projects/{}".format(project_name), workspace_name, "WORKSPACE"
|
||||
)
|
||||
if not workspace_exists:
|
||||
project = WireTapNodeHandle(
|
||||
self._server, "/projects/{}".format(project_name))
|
||||
|
||||
workspace_node = WireTapNodeHandle()
|
||||
created_workspace = project.createNode(
|
||||
workspace_name, "WORKSPACE", workspace_node)
|
||||
|
||||
if not created_workspace:
|
||||
raise AttributeError(
|
||||
"Cannot create workspace `{}` in "
|
||||
"project `{}`: `{}`".format(
|
||||
workspace_name, project_name, project.lastError())
|
||||
)
|
||||
|
||||
print(
|
||||
"Workspace `{}` is successfully created".format(workspace_name))
|
||||
|
||||
def _project_prep(self, project_name):
|
||||
"""Preparing a project
|
||||
|
||||
In case it doesn not exists it will create one
|
||||
|
||||
Args:
|
||||
project_name (str): project name
|
||||
|
||||
Raises:
|
||||
AttributeError: unable to create project
|
||||
"""
|
||||
# test if projeft exists
|
||||
project_exists = self._child_is_in_parent_path(
|
||||
"/projects", project_name, "PROJECT")
|
||||
|
||||
if not project_exists:
|
||||
volumes = self._get_all_volumes()
|
||||
|
||||
if len(volumes) == 0:
|
||||
raise AttributeError(
|
||||
"Not able to create new project. No Volumes existing"
|
||||
)
|
||||
|
||||
# check if volumes exists
|
||||
if self.volume_name not in volumes:
|
||||
raise AttributeError(
|
||||
("Volume '{}' does not exist in '{}'").format(
|
||||
self.volume_name, volumes)
|
||||
)
|
||||
|
||||
# form cmd arguments
|
||||
project_create_cmd = [
|
||||
os.path.join(
|
||||
self.wiretap_tools_dir,
|
||||
"wiretap_create_node"
|
||||
),
|
||||
'-n',
|
||||
os.path.join("/volumes", self.volume_name),
|
||||
'-d',
|
||||
project_name,
|
||||
'-g',
|
||||
]
|
||||
|
||||
project_create_cmd.append(self.group_name)
|
||||
|
||||
print(project_create_cmd)
|
||||
|
||||
exit_code = subprocess.call(
|
||||
project_create_cmd,
|
||||
cwd=os.path.expanduser('~'),
|
||||
preexec_fn=_subprocess_preexec_fn
|
||||
)
|
||||
|
||||
if exit_code != 0:
|
||||
RuntimeError("Cannot create project in flame db")
|
||||
|
||||
print(
|
||||
"A new project '{}' is created.".format(project_name))
|
||||
return project_exists
|
||||
|
||||
def _get_all_volumes(self):
|
||||
"""Request all available volumens from WireTap
|
||||
|
||||
Returns:
|
||||
list: all available volumes in server
|
||||
|
||||
Rises:
|
||||
AttributeError: unable to get any volumes children from server
|
||||
"""
|
||||
root = WireTapNodeHandle(self._server, "/volumes")
|
||||
children_num = WireTapInt(0)
|
||||
|
||||
get_children_num = root.getNumChildren(children_num)
|
||||
if not get_children_num:
|
||||
raise AttributeError(
|
||||
"Cannot get number of volumes: {}".format(root.lastError())
|
||||
)
|
||||
|
||||
volumes = []
|
||||
|
||||
# go through all children and get volume names
|
||||
child_obj = WireTapNodeHandle()
|
||||
for child_idx in range(children_num):
|
||||
|
||||
# get a child
|
||||
if not root.getChild(child_idx, child_obj):
|
||||
raise AttributeError(
|
||||
"Unable to get child: {}".format(root.lastError()))
|
||||
|
||||
node_name = WireTapStr()
|
||||
get_children_name = child_obj.getDisplayName(node_name)
|
||||
|
||||
if not get_children_name:
|
||||
raise AttributeError(
|
||||
"Unable to get child name: {}".format(
|
||||
child_obj.lastError())
|
||||
)
|
||||
|
||||
volumes.append(node_name.c_str())
|
||||
|
||||
return volumes
|
||||
|
||||
def _user_prep(self, user_name):
|
||||
"""Ensuring user does exists in user's stack
|
||||
|
||||
Args:
|
||||
user_name (str): name of a user
|
||||
|
||||
Raises:
|
||||
AttributeError: unable to create user
|
||||
"""
|
||||
|
||||
# get all used usernames in db
|
||||
used_names = self._get_usernames()
|
||||
print(">> used_names: {}".format(used_names))
|
||||
|
||||
# filter only those which are sharing input user name
|
||||
filtered_users = [user for user in used_names if user_name in user]
|
||||
|
||||
if filtered_users:
|
||||
# TODO: need to find lastly created following regex pattern for
|
||||
# date used in name
|
||||
return filtered_users.pop()
|
||||
|
||||
# create new user name with date in suffix
|
||||
now = datetime.datetime.now() # current date and time
|
||||
date = now.strftime("%Y%m%d")
|
||||
new_user_name = "{}_{}".format(user_name, date)
|
||||
print(new_user_name)
|
||||
|
||||
if not self._child_is_in_parent_path("/users", new_user_name, "USER"):
|
||||
# Create the new user
|
||||
users = WireTapNodeHandle(self._server, "/users")
|
||||
|
||||
user_node = WireTapNodeHandle()
|
||||
created_user = users.createNode(new_user_name, "USER", user_node)
|
||||
if not created_user:
|
||||
raise AttributeError(
|
||||
"User {} cannot be created: {}".format(
|
||||
new_user_name, users.lastError())
|
||||
)
|
||||
|
||||
print("User `{}` is created".format(new_user_name))
|
||||
return new_user_name
|
||||
|
||||
def _get_usernames(self):
|
||||
"""Requesting all available users from WireTap
|
||||
|
||||
Returns:
|
||||
list: all available user names
|
||||
|
||||
Raises:
|
||||
AttributeError: there are no users in server
|
||||
"""
|
||||
root = WireTapNodeHandle(self._server, "/users")
|
||||
children_num = WireTapInt(0)
|
||||
|
||||
get_children_num = root.getNumChildren(children_num)
|
||||
if not get_children_num:
|
||||
raise AttributeError(
|
||||
"Cannot get number of volumes: {}".format(root.lastError())
|
||||
)
|
||||
|
||||
usernames = []
|
||||
|
||||
# go through all children and get volume names
|
||||
child_obj = WireTapNodeHandle()
|
||||
for child_idx in range(children_num):
|
||||
|
||||
# get a child
|
||||
if not root.getChild(child_idx, child_obj):
|
||||
raise AttributeError(
|
||||
"Unable to get child: {}".format(root.lastError()))
|
||||
|
||||
node_name = WireTapStr()
|
||||
get_children_name = child_obj.getDisplayName(node_name)
|
||||
|
||||
if not get_children_name:
|
||||
raise AttributeError(
|
||||
"Unable to get child name: {}".format(
|
||||
child_obj.lastError())
|
||||
)
|
||||
|
||||
usernames.append(node_name.c_str())
|
||||
|
||||
return usernames
|
||||
|
||||
def _child_is_in_parent_path(self, parent_path, child_name, child_type):
|
||||
"""Checking if a given child is in parent path.
|
||||
|
||||
Args:
|
||||
parent_path (str): db path to parent
|
||||
child_name (str): name of child
|
||||
child_type (str): type of child
|
||||
|
||||
Raises:
|
||||
AttributeError: Not able to get number of children
|
||||
AttributeError: Not able to get children form parent
|
||||
AttributeError: Not able to get children name
|
||||
AttributeError: Not able to get children type
|
||||
|
||||
Returns:
|
||||
bool: True if child is in parent path
|
||||
"""
|
||||
parent = WireTapNodeHandle(self._server, parent_path)
|
||||
|
||||
# iterate number of children
|
||||
children_num = WireTapInt(0)
|
||||
requested = parent.getNumChildren(children_num)
|
||||
if not requested:
|
||||
raise AttributeError((
|
||||
"Error: Cannot request number of "
|
||||
"children from the node {}. Make sure your "
|
||||
"wiretap service is running: {}").format(
|
||||
parent_path, parent.lastError())
|
||||
)
|
||||
|
||||
# iterate children
|
||||
child_obj = WireTapNodeHandle()
|
||||
for child_idx in range(children_num):
|
||||
if not parent.getChild(child_idx, child_obj):
|
||||
raise AttributeError(
|
||||
"Cannot get child: {}".format(
|
||||
parent.lastError()))
|
||||
|
||||
node_name = WireTapStr()
|
||||
node_type = WireTapStr()
|
||||
|
||||
if not child_obj.getDisplayName(node_name):
|
||||
raise AttributeError(
|
||||
"Unable to get child name: %s" % child_obj.lastError()
|
||||
)
|
||||
if not child_obj.getNodeTypeStr(node_type):
|
||||
raise AttributeError(
|
||||
"Unable to obtain child type: %s" % child_obj.lastError()
|
||||
)
|
||||
|
||||
if (node_name.c_str() == child_name) and (
|
||||
node_type.c_str() == child_type):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _set_project_settings(self, project_name, project_data):
|
||||
"""Setting project attributes.
|
||||
|
||||
Args:
|
||||
project_name (str): name of project
|
||||
project_data (dict): data with project attributes
|
||||
(flame compatible)
|
||||
|
||||
Raises:
|
||||
AttributeError: Not able to set project attributes
|
||||
"""
|
||||
# generated xml from project_data dict
|
||||
_xml = "<Project>"
|
||||
for key, value in project_data.items():
|
||||
_xml += "<{}>{}</{}>".format(key, value, key)
|
||||
_xml += "</Project>"
|
||||
|
||||
pretty_xml = minidom.parseString(_xml).toprettyxml()
|
||||
print("__ xml: {}".format(pretty_xml))
|
||||
|
||||
# set project data to wiretap
|
||||
project_node = WireTapNodeHandle(
|
||||
self._server, "/projects/{}".format(project_name))
|
||||
|
||||
if not project_node.setMetaData("XML", _xml):
|
||||
raise AttributeError(
|
||||
"Not able to set project attributes {}. Error: {}".format(
|
||||
project_name, project_node.lastError())
|
||||
)
|
||||
|
||||
print("Project settings successfully set.")
|
||||
|
||||
def _set_project_colorspace(self, project_name, color_policy):
|
||||
"""Set project's colorspace policy.
|
||||
|
||||
Args:
|
||||
project_name (str): name of project
|
||||
color_policy (str): name of policy
|
||||
|
||||
Raises:
|
||||
RuntimeError: Not able to set colorspace policy
|
||||
"""
|
||||
color_policy = color_policy or "Legacy"
|
||||
|
||||
# check if the colour policy in custom dir
|
||||
if "/" in color_policy:
|
||||
# if unlikelly full path was used make it redundant
|
||||
color_policy = color_policy.replace("/syncolor/policies/", "")
|
||||
# expecting input is `Shared/NameOfPolicy`
|
||||
color_policy = "/syncolor/policies/{}".format(
|
||||
color_policy)
|
||||
else:
|
||||
color_policy = "/syncolor/policies/Autodesk/{}".format(
|
||||
color_policy)
|
||||
|
||||
# create arguments
|
||||
project_colorspace_cmd = [
|
||||
os.path.join(
|
||||
self.wiretap_tools_dir,
|
||||
"wiretap_duplicate_node"
|
||||
),
|
||||
"-s",
|
||||
color_policy,
|
||||
"-n",
|
||||
"/projects/{}/syncolor".format(project_name)
|
||||
]
|
||||
|
||||
print(project_colorspace_cmd)
|
||||
|
||||
exit_code = subprocess.call(
|
||||
project_colorspace_cmd,
|
||||
cwd=os.path.expanduser('~'),
|
||||
preexec_fn=_subprocess_preexec_fn
|
||||
)
|
||||
|
||||
if exit_code != 0:
|
||||
RuntimeError("Cannot set colorspace {} on project {}".format(
|
||||
color_policy, project_name
|
||||
))
|
||||
|
||||
|
||||
def _subprocess_preexec_fn():
|
||||
""" Helper function
|
||||
|
||||
Setting permission mask to 0777
|
||||
"""
|
||||
os.setpgrp()
|
||||
os.umask(0o000)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# get json exchange data
|
||||
json_path = sys.argv[-1]
|
||||
json_data = open(json_path).read()
|
||||
in_data = json.loads(json_data)
|
||||
out_data = deepcopy(in_data)
|
||||
|
||||
# get main server attributes
|
||||
host_name = in_data.pop("host_name")
|
||||
volume_name = in_data.pop("volume_name")
|
||||
group_name = in_data.pop("group_name")
|
||||
|
||||
# initialize class
|
||||
wiretap_handler = WireTapCom(host_name, volume_name, group_name)
|
||||
|
||||
try:
|
||||
app_args = wiretap_handler.get_launch_args(
|
||||
project_name=in_data.pop("project_name"),
|
||||
project_data=in_data.pop("project_data"),
|
||||
user_name=in_data.pop("user_name"),
|
||||
**in_data
|
||||
)
|
||||
finally:
|
||||
wiretap_handler.close()
|
||||
|
||||
# set returned args back to out data
|
||||
out_data.update({
|
||||
"app_args": app_args
|
||||
})
|
||||
|
||||
# write it out back to the exchange json file
|
||||
with open(json_path, "w") as file_stream:
|
||||
json.dump(out_data, file_stream, indent=4)
|
||||
|
|
@ -1,143 +0,0 @@
|
|||
"""
|
||||
Flame utils for syncing scripts
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_flame import FLAME_ADDON_ROOT
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def _sync_utility_scripts(env=None):
|
||||
""" Synchronizing basic utlility scripts for flame.
|
||||
|
||||
To be able to run start AYON within Flame we have to copy
|
||||
all utility_scripts and additional FLAME_SCRIPT_DIR into
|
||||
`/opt/Autodesk/shared/python`. This will be always synchronizing those
|
||||
folders.
|
||||
"""
|
||||
|
||||
env = env or os.environ
|
||||
|
||||
# initiate inputs
|
||||
scripts = {}
|
||||
fsd_env = env.get("FLAME_SCRIPT_DIRS", "")
|
||||
flame_shared_dir = "/opt/Autodesk/shared/python"
|
||||
|
||||
fsd_paths = [os.path.join(
|
||||
FLAME_ADDON_ROOT,
|
||||
"api",
|
||||
"utility_scripts"
|
||||
)]
|
||||
|
||||
# collect script dirs
|
||||
log.info("FLAME_SCRIPT_DIRS: `{fsd_env}`".format(**locals()))
|
||||
log.info("fsd_paths: `{fsd_paths}`".format(**locals()))
|
||||
|
||||
# add application environment setting for FLAME_SCRIPT_DIR
|
||||
# to script path search
|
||||
for _dirpath in fsd_env.split(os.pathsep):
|
||||
if not os.path.isdir(_dirpath):
|
||||
log.warning("Path is not a valid dir: `{_dirpath}`".format(
|
||||
**locals()))
|
||||
continue
|
||||
fsd_paths.append(_dirpath)
|
||||
|
||||
# collect scripts from dirs
|
||||
for path in fsd_paths:
|
||||
scripts.update({path: os.listdir(path)})
|
||||
|
||||
remove_black_list = []
|
||||
for _k, s_list in scripts.items():
|
||||
remove_black_list += s_list
|
||||
|
||||
log.info("remove_black_list: `{remove_black_list}`".format(**locals()))
|
||||
log.info("Additional Flame script paths: `{fsd_paths}`".format(**locals()))
|
||||
log.info("Flame Scripts: `{scripts}`".format(**locals()))
|
||||
|
||||
# make sure no script file is in folder
|
||||
if next(iter(os.listdir(flame_shared_dir)), None):
|
||||
for _itm in os.listdir(flame_shared_dir):
|
||||
skip = False
|
||||
|
||||
# skip all scripts and folders which are not maintained
|
||||
if _itm not in remove_black_list:
|
||||
skip = True
|
||||
|
||||
# do not skip if pyc in extension
|
||||
if not os.path.isdir(_itm) and "pyc" in os.path.splitext(_itm)[-1]:
|
||||
skip = False
|
||||
|
||||
# continue if skip in true
|
||||
if skip:
|
||||
continue
|
||||
|
||||
path = os.path.join(flame_shared_dir, _itm)
|
||||
log.info("Removing `{path}`...".format(**locals()))
|
||||
|
||||
try:
|
||||
if os.path.isdir(path):
|
||||
shutil.rmtree(path, onerror=None)
|
||||
else:
|
||||
os.remove(path)
|
||||
except PermissionError as msg:
|
||||
log.warning(
|
||||
"Not able to remove: `{}`, Problem with: `{}`".format(
|
||||
path,
|
||||
msg
|
||||
)
|
||||
)
|
||||
|
||||
# copy scripts into Resolve's utility scripts dir
|
||||
for dirpath, scriptlist in scripts.items():
|
||||
# directory and scripts list
|
||||
for _script in scriptlist:
|
||||
# script in script list
|
||||
src = os.path.join(dirpath, _script)
|
||||
dst = os.path.join(flame_shared_dir, _script)
|
||||
log.info("Copying `{src}` to `{dst}`...".format(**locals()))
|
||||
|
||||
try:
|
||||
if os.path.isdir(src):
|
||||
shutil.copytree(
|
||||
src, dst, symlinks=False,
|
||||
ignore=None, ignore_dangling_symlinks=False
|
||||
)
|
||||
else:
|
||||
shutil.copy2(src, dst)
|
||||
except (PermissionError, FileExistsError) as msg:
|
||||
log.warning(
|
||||
"Not able to copy to: `{}`, Problem with: `{}`".format(
|
||||
dst,
|
||||
msg
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def setup(env=None):
|
||||
""" Wrapper installer started from
|
||||
`flame/hooks/pre_flame_setup.py`
|
||||
"""
|
||||
env = env or os.environ
|
||||
|
||||
# synchronize resolve utility scripts
|
||||
_sync_utility_scripts(env)
|
||||
|
||||
log.info("Flame AYON wrapper has been installed")
|
||||
|
||||
|
||||
def get_flame_version():
|
||||
import flame
|
||||
|
||||
return {
|
||||
"full": flame.get_version(),
|
||||
"major": flame.get_version_major(),
|
||||
"minor": flame.get_version_minor(),
|
||||
"patch": flame.get_version_patch()
|
||||
}
|
||||
|
||||
|
||||
def get_flame_install_root():
|
||||
return "/opt/Autodesk"
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
"""Host API required Work Files tool"""
|
||||
|
||||
import os
|
||||
from ayon_core.lib import Logger
|
||||
# from .. import (
|
||||
# get_project_manager,
|
||||
# get_current_project
|
||||
# )
|
||||
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
exported_projet_ext = ".otoc"
|
||||
|
||||
|
||||
def file_extensions():
|
||||
return [exported_projet_ext]
|
||||
|
||||
|
||||
def has_unsaved_changes():
|
||||
pass
|
||||
|
||||
|
||||
def save_file(filepath):
|
||||
pass
|
||||
|
||||
|
||||
def open_file(filepath):
|
||||
pass
|
||||
|
||||
|
||||
def current_file():
|
||||
pass
|
||||
|
||||
|
||||
def work_root(session):
|
||||
return os.path.normpath(session["AYON_WORKDIR"]).replace("\\", "/")
|
||||
|
|
@ -1,239 +0,0 @@
|
|||
import os
|
||||
import json
|
||||
import tempfile
|
||||
import contextlib
|
||||
import socket
|
||||
from pprint import pformat
|
||||
|
||||
from ayon_core.lib import (
|
||||
get_ayon_username,
|
||||
run_subprocess,
|
||||
)
|
||||
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||
from ayon_flame import FLAME_ADDON_ROOT
|
||||
|
||||
|
||||
class FlamePrelaunch(PreLaunchHook):
|
||||
""" Flame prelaunch hook
|
||||
|
||||
Will make sure flame_script_dirs are copied to user's folder defined
|
||||
in environment var FLAME_SCRIPT_DIR.
|
||||
"""
|
||||
app_groups = {"flame"}
|
||||
permissions = 0o777
|
||||
|
||||
wtc_script_path = os.path.join(
|
||||
FLAME_ADDON_ROOT, "api", "scripts", "wiretap_com.py"
|
||||
)
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.signature = "( {} )".format(self.__class__.__name__)
|
||||
|
||||
def execute(self):
|
||||
_env = self.launch_context.env
|
||||
self.flame_python_exe = _env["AYON_FLAME_PYTHON_EXEC"]
|
||||
self.flame_pythonpath = _env["AYON_FLAME_PYTHONPATH"]
|
||||
|
||||
"""Hook entry method."""
|
||||
project_entity = self.data["project_entity"]
|
||||
project_name = project_entity["name"]
|
||||
volume_name = _env.get("FLAME_WIRETAP_VOLUME")
|
||||
|
||||
# get image io
|
||||
project_settings = self.data["project_settings"]
|
||||
|
||||
imageio_flame = project_settings["flame"]["imageio"]
|
||||
|
||||
# Check whether 'enabled' key from host imageio settings exists
|
||||
# so we can tell if host is using the new colormanagement framework.
|
||||
# If the 'enabled' isn't found we want 'colormanaged' set to True
|
||||
# because prior to the key existing we always did colormanagement for
|
||||
# Flame
|
||||
colormanaged = imageio_flame.get("enabled")
|
||||
# if key was not found, set to True
|
||||
# ensuring backward compatibility
|
||||
if colormanaged is None:
|
||||
colormanaged = True
|
||||
|
||||
# get user name and host name
|
||||
user_name = get_ayon_username()
|
||||
user_name = user_name.replace(".", "_")
|
||||
|
||||
hostname = socket.gethostname() # not returning wiretap host name
|
||||
|
||||
self.log.debug("Collected user \"{}\"".format(user_name))
|
||||
self.log.info(pformat(project_entity))
|
||||
project_attribs = project_entity["attrib"]
|
||||
width = project_attribs["resolutionWidth"]
|
||||
height = project_attribs["resolutionHeight"]
|
||||
fps = float(project_attribs["fps"])
|
||||
|
||||
project_data = {
|
||||
"Name": project_entity["name"],
|
||||
"Nickname": project_entity["code"],
|
||||
"Description": "Created by AYON",
|
||||
"SetupDir": project_entity["name"],
|
||||
"FrameWidth": int(width),
|
||||
"FrameHeight": int(height),
|
||||
"AspectRatio": float(
|
||||
(width / height) * project_attribs["pixelAspect"]
|
||||
),
|
||||
"FrameRate": self._get_flame_fps(fps)
|
||||
}
|
||||
|
||||
data_to_script = {
|
||||
# from settings
|
||||
"host_name": _env.get("FLAME_WIRETAP_HOSTNAME") or hostname,
|
||||
"volume_name": volume_name,
|
||||
"group_name": _env.get("FLAME_WIRETAP_GROUP"),
|
||||
|
||||
# from project
|
||||
"project_name": project_name,
|
||||
"user_name": user_name,
|
||||
"project_data": project_data
|
||||
}
|
||||
|
||||
# add color management data
|
||||
if colormanaged:
|
||||
project_data.update({
|
||||
"FrameDepth": str(imageio_flame["project"]["frameDepth"]),
|
||||
"FieldDominance": str(
|
||||
imageio_flame["project"]["fieldDominance"])
|
||||
})
|
||||
data_to_script["color_policy"] = str(
|
||||
imageio_flame["project"]["colourPolicy"])
|
||||
|
||||
self.log.info(pformat(dict(_env)))
|
||||
self.log.info(pformat(data_to_script))
|
||||
|
||||
# add to python path from settings
|
||||
self._add_pythonpath()
|
||||
|
||||
app_arguments = self._get_launch_arguments(data_to_script)
|
||||
|
||||
# fix project data permission issue
|
||||
self._fix_permissions(project_name, volume_name)
|
||||
|
||||
self.launch_context.launch_args.extend(app_arguments)
|
||||
|
||||
def _fix_permissions(self, project_name, volume_name):
|
||||
"""Work around for project data permissions
|
||||
|
||||
Reported issue: when project is created locally on one machine,
|
||||
it is impossible to migrate it to other machine. Autodesk Flame
|
||||
is crating some unmanagable files which needs to be opened to 0o777.
|
||||
|
||||
Args:
|
||||
project_name (str): project name
|
||||
volume_name (str): studio volume
|
||||
"""
|
||||
dirs_to_modify = [
|
||||
"/usr/discreet/project/{}".format(project_name),
|
||||
"/opt/Autodesk/clip/{}/{}.prj".format(volume_name, project_name),
|
||||
"/usr/discreet/clip/{}/{}.prj".format(volume_name, project_name)
|
||||
]
|
||||
|
||||
for dirtm in dirs_to_modify:
|
||||
for root, dirs, files in os.walk(dirtm):
|
||||
try:
|
||||
for name in set(dirs) | set(files):
|
||||
path = os.path.join(root, name)
|
||||
st = os.stat(path)
|
||||
if oct(st.st_mode) != self.permissions:
|
||||
os.chmod(path, self.permissions)
|
||||
|
||||
except OSError as exc:
|
||||
self.log.warning("Not able to open files: {}".format(exc))
|
||||
|
||||
def _get_flame_fps(self, fps_num):
|
||||
fps_table = {
|
||||
float(23.976): "23.976 fps",
|
||||
int(25): "25 fps",
|
||||
int(24): "24 fps",
|
||||
float(29.97): "29.97 fps DF",
|
||||
int(30): "30 fps",
|
||||
int(50): "50 fps",
|
||||
float(59.94): "59.94 fps DF",
|
||||
int(60): "60 fps"
|
||||
}
|
||||
|
||||
match_key = min(fps_table.keys(), key=lambda x: abs(x - fps_num))
|
||||
|
||||
try:
|
||||
return fps_table[match_key]
|
||||
except KeyError as msg:
|
||||
raise KeyError((
|
||||
"Missing FPS key in conversion table. "
|
||||
"Following keys are available: {}".format(fps_table.keys())
|
||||
)) from msg
|
||||
|
||||
def _add_pythonpath(self):
|
||||
pythonpath = self.launch_context.env.get("PYTHONPATH")
|
||||
|
||||
# separate it explicitly by `;` that is what we use in settings
|
||||
new_pythonpath = self.flame_pythonpath.split(os.pathsep)
|
||||
new_pythonpath += pythonpath.split(os.pathsep)
|
||||
|
||||
self.launch_context.env["PYTHONPATH"] = os.pathsep.join(new_pythonpath)
|
||||
|
||||
def _get_launch_arguments(self, script_data):
|
||||
# Dump data to string
|
||||
dumped_script_data = json.dumps(script_data)
|
||||
|
||||
with make_temp_file(dumped_script_data) as tmp_json_path:
|
||||
# Prepare subprocess arguments
|
||||
args = [
|
||||
self.flame_python_exe.format(
|
||||
**self.launch_context.env
|
||||
),
|
||||
self.wtc_script_path,
|
||||
tmp_json_path
|
||||
]
|
||||
self.log.info("Executing: {}".format(" ".join(args)))
|
||||
|
||||
process_kwargs = {
|
||||
"logger": self.log,
|
||||
"env": self.launch_context.env
|
||||
}
|
||||
|
||||
run_subprocess(args, **process_kwargs)
|
||||
|
||||
# process returned json file to pass launch args
|
||||
return_json_data = open(tmp_json_path).read()
|
||||
returned_data = json.loads(return_json_data)
|
||||
app_args = returned_data.get("app_args")
|
||||
self.log.info("____ app_args: `{}`".format(app_args))
|
||||
|
||||
if not app_args:
|
||||
RuntimeError("App arguments were not solved")
|
||||
|
||||
return app_args
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def make_temp_file(data):
|
||||
try:
|
||||
# Store dumped json to temporary file
|
||||
temporary_json_file = tempfile.NamedTemporaryFile(
|
||||
mode="w", suffix=".json", delete=False
|
||||
)
|
||||
temporary_json_file.write(data)
|
||||
temporary_json_file.close()
|
||||
temporary_json_filepath = temporary_json_file.name.replace(
|
||||
"\\", "/"
|
||||
)
|
||||
|
||||
yield temporary_json_filepath
|
||||
|
||||
except IOError as _error:
|
||||
raise IOError(
|
||||
"Not able to create temp json file: {}".format(
|
||||
_error
|
||||
)
|
||||
)
|
||||
|
||||
finally:
|
||||
# Remove the temporary json
|
||||
os.remove(temporary_json_filepath)
|
||||
|
|
@ -1,624 +0,0 @@
|
|||
""" compatibility OpenTimelineIO 0.12.0 and newer
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import logging
|
||||
import opentimelineio as otio
|
||||
from . import utils
|
||||
|
||||
import flame
|
||||
from pprint import pformat
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
TRACK_TYPES = {
|
||||
"video": otio.schema.TrackKind.Video,
|
||||
"audio": otio.schema.TrackKind.Audio
|
||||
}
|
||||
MARKERS_COLOR_MAP = {
|
||||
(1.0, 0.0, 0.0): otio.schema.MarkerColor.RED,
|
||||
(1.0, 0.5, 0.0): otio.schema.MarkerColor.ORANGE,
|
||||
(1.0, 1.0, 0.0): otio.schema.MarkerColor.YELLOW,
|
||||
(1.0, 0.5, 1.0): otio.schema.MarkerColor.PINK,
|
||||
(1.0, 1.0, 1.0): otio.schema.MarkerColor.WHITE,
|
||||
(0.0, 1.0, 0.0): otio.schema.MarkerColor.GREEN,
|
||||
(0.0, 1.0, 1.0): otio.schema.MarkerColor.CYAN,
|
||||
(0.0, 0.0, 1.0): otio.schema.MarkerColor.BLUE,
|
||||
(0.5, 0.0, 0.5): otio.schema.MarkerColor.PURPLE,
|
||||
(0.5, 0.0, 1.0): otio.schema.MarkerColor.MAGENTA,
|
||||
(0.0, 0.0, 0.0): otio.schema.MarkerColor.BLACK
|
||||
}
|
||||
MARKERS_INCLUDE = True
|
||||
|
||||
|
||||
class CTX:
|
||||
_fps = None
|
||||
_tl_start_frame = None
|
||||
project = None
|
||||
clips = None
|
||||
|
||||
@classmethod
|
||||
def set_fps(cls, new_fps):
|
||||
if not isinstance(new_fps, float):
|
||||
raise TypeError("Invalid fps type {}".format(type(new_fps)))
|
||||
if cls._fps != new_fps:
|
||||
cls._fps = new_fps
|
||||
|
||||
@classmethod
|
||||
def get_fps(cls):
|
||||
return cls._fps
|
||||
|
||||
@classmethod
|
||||
def set_tl_start_frame(cls, number):
|
||||
if not isinstance(number, int):
|
||||
raise TypeError("Invalid timeline start frame type {}".format(
|
||||
type(number)))
|
||||
if cls._tl_start_frame != number:
|
||||
cls._tl_start_frame = number
|
||||
|
||||
@classmethod
|
||||
def get_tl_start_frame(cls):
|
||||
return cls._tl_start_frame
|
||||
|
||||
|
||||
def flatten(_list):
|
||||
for item in _list:
|
||||
if isinstance(item, (list, tuple)):
|
||||
for sub_item in flatten(item):
|
||||
yield sub_item
|
||||
else:
|
||||
yield item
|
||||
|
||||
|
||||
def get_current_flame_project():
|
||||
project = flame.project.current_project
|
||||
return project
|
||||
|
||||
|
||||
def create_otio_rational_time(frame, fps):
|
||||
return otio.opentime.RationalTime(
|
||||
float(frame),
|
||||
float(fps)
|
||||
)
|
||||
|
||||
|
||||
def create_otio_time_range(start_frame, frame_duration, fps):
|
||||
return otio.opentime.TimeRange(
|
||||
start_time=create_otio_rational_time(start_frame, fps),
|
||||
duration=create_otio_rational_time(frame_duration, fps)
|
||||
)
|
||||
|
||||
|
||||
def _get_metadata(item):
|
||||
if hasattr(item, 'metadata'):
|
||||
return dict(item.metadata) if item.metadata else {}
|
||||
return {}
|
||||
|
||||
|
||||
def create_time_effects(otio_clip, speed):
|
||||
otio_effect = None
|
||||
|
||||
# retime on track item
|
||||
if speed != 1.:
|
||||
# make effect
|
||||
otio_effect = otio.schema.LinearTimeWarp()
|
||||
otio_effect.name = "Speed"
|
||||
otio_effect.time_scalar = speed
|
||||
otio_effect.metadata = {}
|
||||
|
||||
# freeze frame effect
|
||||
if speed == 0.:
|
||||
otio_effect = otio.schema.FreezeFrame()
|
||||
otio_effect.name = "FreezeFrame"
|
||||
otio_effect.metadata = {}
|
||||
|
||||
if otio_effect:
|
||||
# add otio effect to clip effects
|
||||
otio_clip.effects.append(otio_effect)
|
||||
|
||||
|
||||
def _get_marker_color(flame_colour):
|
||||
# clamp colors to closes half numbers
|
||||
_flame_colour = [
|
||||
(lambda x: round(x * 2) / 2)(c)
|
||||
for c in flame_colour]
|
||||
|
||||
for color, otio_color_type in MARKERS_COLOR_MAP.items():
|
||||
if _flame_colour == list(color):
|
||||
return otio_color_type
|
||||
|
||||
return otio.schema.MarkerColor.RED
|
||||
|
||||
|
||||
def _get_flame_markers(item):
|
||||
output_markers = []
|
||||
|
||||
time_in = item.record_in.relative_frame
|
||||
|
||||
for marker in item.markers:
|
||||
log.debug(marker)
|
||||
start_frame = marker.location.get_value().relative_frame
|
||||
|
||||
start_frame = (start_frame - time_in) + 1
|
||||
|
||||
marker_data = {
|
||||
"name": marker.name.get_value(),
|
||||
"duration": marker.duration.get_value().relative_frame,
|
||||
"comment": marker.comment.get_value(),
|
||||
"start_frame": start_frame,
|
||||
"colour": marker.colour.get_value()
|
||||
}
|
||||
|
||||
output_markers.append(marker_data)
|
||||
|
||||
return output_markers
|
||||
|
||||
|
||||
def create_otio_markers(otio_item, item):
|
||||
markers = _get_flame_markers(item)
|
||||
for marker in markers:
|
||||
frame_rate = CTX.get_fps()
|
||||
|
||||
marked_range = otio.opentime.TimeRange(
|
||||
start_time=otio.opentime.RationalTime(
|
||||
marker["start_frame"],
|
||||
frame_rate
|
||||
),
|
||||
duration=otio.opentime.RationalTime(
|
||||
marker["duration"],
|
||||
frame_rate
|
||||
)
|
||||
)
|
||||
|
||||
# testing the comment if it is not containing json string
|
||||
check_if_json = re.findall(
|
||||
re.compile(r"[{:}]"),
|
||||
marker["comment"]
|
||||
)
|
||||
|
||||
# to identify this as json, at least 3 items in the list should
|
||||
# be present ["{", ":", "}"]
|
||||
metadata = {}
|
||||
if len(check_if_json) >= 3:
|
||||
# this is json string
|
||||
try:
|
||||
# capture exceptions which are related to strings only
|
||||
metadata.update(
|
||||
json.loads(marker["comment"])
|
||||
)
|
||||
except ValueError as msg:
|
||||
log.error("Marker json conversion: {}".format(msg))
|
||||
else:
|
||||
metadata["comment"] = marker["comment"]
|
||||
|
||||
otio_marker = otio.schema.Marker(
|
||||
name=marker["name"],
|
||||
color=_get_marker_color(
|
||||
marker["colour"]),
|
||||
marked_range=marked_range,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
otio_item.markers.append(otio_marker)
|
||||
|
||||
|
||||
def create_otio_reference(clip_data, fps=None):
|
||||
metadata = _get_metadata(clip_data)
|
||||
duration = int(clip_data["source_duration"])
|
||||
|
||||
# get file info for path and start frame
|
||||
frame_start = 0
|
||||
fps = fps or CTX.get_fps()
|
||||
|
||||
path = clip_data["fpath"]
|
||||
|
||||
file_name = os.path.basename(path)
|
||||
file_head, extension = os.path.splitext(file_name)
|
||||
|
||||
# get padding and other file infos
|
||||
log.debug("_ path: {}".format(path))
|
||||
|
||||
otio_ex_ref_item = None
|
||||
|
||||
is_sequence = frame_number = utils.get_frame_from_filename(file_name)
|
||||
if is_sequence:
|
||||
file_head = file_name.split(frame_number)[:-1]
|
||||
frame_start = int(frame_number)
|
||||
padding = len(frame_number)
|
||||
|
||||
metadata.update({
|
||||
"isSequence": True,
|
||||
"padding": padding
|
||||
})
|
||||
|
||||
# if it is file sequence try to create `ImageSequenceReference`
|
||||
# the OTIO might not be compatible so return nothing and do it old way
|
||||
try:
|
||||
dirname = os.path.dirname(path)
|
||||
otio_ex_ref_item = otio.schema.ImageSequenceReference(
|
||||
target_url_base=dirname + os.sep,
|
||||
name_prefix=file_head,
|
||||
name_suffix=extension,
|
||||
start_frame=frame_start,
|
||||
frame_zero_padding=padding,
|
||||
rate=fps,
|
||||
available_range=create_otio_time_range(
|
||||
frame_start,
|
||||
duration,
|
||||
fps
|
||||
)
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if not otio_ex_ref_item:
|
||||
dirname, file_name = os.path.split(path)
|
||||
file_name = utils.get_reformatted_filename(file_name, padded=False)
|
||||
reformated_path = os.path.join(dirname, file_name)
|
||||
# in case old OTIO or video file create `ExternalReference`
|
||||
otio_ex_ref_item = otio.schema.ExternalReference(
|
||||
target_url=reformated_path,
|
||||
available_range=create_otio_time_range(
|
||||
frame_start,
|
||||
duration,
|
||||
fps
|
||||
)
|
||||
)
|
||||
|
||||
# add metadata to otio item
|
||||
add_otio_metadata(otio_ex_ref_item, clip_data, **metadata)
|
||||
|
||||
return otio_ex_ref_item
|
||||
|
||||
|
||||
def create_otio_clip(clip_data):
|
||||
from ayon_flame.api import MediaInfoFile, TimeEffectMetadata
|
||||
|
||||
segment = clip_data["PySegment"]
|
||||
|
||||
# calculate source in
|
||||
media_info = MediaInfoFile(clip_data["fpath"], logger=log)
|
||||
media_timecode_start = media_info.start_frame
|
||||
media_fps = media_info.fps
|
||||
|
||||
# Timewarp metadata
|
||||
tw_data = TimeEffectMetadata(segment, logger=log).data
|
||||
log.debug("__ tw_data: {}".format(tw_data))
|
||||
|
||||
# define first frame
|
||||
file_first_frame = utils.get_frame_from_filename(
|
||||
clip_data["fpath"])
|
||||
if file_first_frame:
|
||||
file_first_frame = int(file_first_frame)
|
||||
|
||||
first_frame = media_timecode_start or file_first_frame or 0
|
||||
|
||||
_clip_source_in = int(clip_data["source_in"])
|
||||
_clip_source_out = int(clip_data["source_out"])
|
||||
_clip_record_in = clip_data["record_in"]
|
||||
_clip_record_out = clip_data["record_out"]
|
||||
_clip_record_duration = int(clip_data["record_duration"])
|
||||
|
||||
log.debug("_ file_first_frame: {}".format(file_first_frame))
|
||||
log.debug("_ first_frame: {}".format(first_frame))
|
||||
log.debug("_ _clip_source_in: {}".format(_clip_source_in))
|
||||
log.debug("_ _clip_source_out: {}".format(_clip_source_out))
|
||||
log.debug("_ _clip_record_in: {}".format(_clip_record_in))
|
||||
log.debug("_ _clip_record_out: {}".format(_clip_record_out))
|
||||
|
||||
# first solve if the reverse timing
|
||||
speed = 1
|
||||
if clip_data["source_in"] > clip_data["source_out"]:
|
||||
source_in = _clip_source_out - int(first_frame)
|
||||
source_out = _clip_source_in - int(first_frame)
|
||||
speed = -1
|
||||
else:
|
||||
source_in = _clip_source_in - int(first_frame)
|
||||
source_out = _clip_source_out - int(first_frame)
|
||||
|
||||
log.debug("_ source_in: {}".format(source_in))
|
||||
log.debug("_ source_out: {}".format(source_out))
|
||||
|
||||
if file_first_frame:
|
||||
log.debug("_ file_source_in: {}".format(
|
||||
file_first_frame + source_in))
|
||||
log.debug("_ file_source_in: {}".format(
|
||||
file_first_frame + source_out))
|
||||
|
||||
source_duration = (source_out - source_in + 1)
|
||||
|
||||
# secondly check if any change of speed
|
||||
if source_duration != _clip_record_duration:
|
||||
retime_speed = float(source_duration) / float(_clip_record_duration)
|
||||
log.debug("_ calculated speed: {}".format(retime_speed))
|
||||
speed *= retime_speed
|
||||
|
||||
# get speed from metadata if available
|
||||
if tw_data.get("speed"):
|
||||
speed = tw_data["speed"]
|
||||
log.debug("_ metadata speed: {}".format(speed))
|
||||
|
||||
log.debug("_ speed: {}".format(speed))
|
||||
log.debug("_ source_duration: {}".format(source_duration))
|
||||
log.debug("_ _clip_record_duration: {}".format(_clip_record_duration))
|
||||
|
||||
# create media reference
|
||||
media_reference = create_otio_reference(
|
||||
clip_data, media_fps)
|
||||
|
||||
# creatae source range
|
||||
source_range = create_otio_time_range(
|
||||
source_in,
|
||||
_clip_record_duration,
|
||||
CTX.get_fps()
|
||||
)
|
||||
|
||||
otio_clip = otio.schema.Clip(
|
||||
name=clip_data["segment_name"],
|
||||
source_range=source_range,
|
||||
media_reference=media_reference
|
||||
)
|
||||
|
||||
# Add markers
|
||||
if MARKERS_INCLUDE:
|
||||
create_otio_markers(otio_clip, segment)
|
||||
|
||||
if speed != 1:
|
||||
create_time_effects(otio_clip, speed)
|
||||
|
||||
return otio_clip
|
||||
|
||||
|
||||
def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
|
||||
return otio.schema.Gap(
|
||||
source_range=create_otio_time_range(
|
||||
gap_start,
|
||||
(clip_start - tl_start_frame) - gap_start,
|
||||
fps
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _get_colourspace_policy():
|
||||
|
||||
output = {}
|
||||
# get policies project path
|
||||
policy_dir = "/opt/Autodesk/project/{}/synColor/policy".format(
|
||||
CTX.project.name
|
||||
)
|
||||
log.debug(policy_dir)
|
||||
policy_fp = os.path.join(policy_dir, "policy.cfg")
|
||||
|
||||
if not os.path.exists(policy_fp):
|
||||
return output
|
||||
|
||||
with open(policy_fp) as file:
|
||||
dict_conf = dict(line.strip().split(' = ', 1) for line in file)
|
||||
output.update(
|
||||
{"openpype.flame.{}".format(k): v for k, v in dict_conf.items()}
|
||||
)
|
||||
return output
|
||||
|
||||
|
||||
def _create_otio_timeline(sequence):
|
||||
|
||||
metadata = _get_metadata(sequence)
|
||||
|
||||
# find colour policy files and add them to metadata
|
||||
colorspace_policy = _get_colourspace_policy()
|
||||
metadata.update(colorspace_policy)
|
||||
|
||||
metadata.update({
|
||||
"openpype.timeline.width": int(sequence.width),
|
||||
"openpype.timeline.height": int(sequence.height),
|
||||
"openpype.timeline.pixelAspect": 1
|
||||
})
|
||||
|
||||
rt_start_time = create_otio_rational_time(
|
||||
CTX.get_tl_start_frame(), CTX.get_fps())
|
||||
|
||||
return otio.schema.Timeline(
|
||||
name=str(sequence.name)[1:-1],
|
||||
global_start_time=rt_start_time,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
|
||||
def create_otio_track(track_type, track_name):
|
||||
return otio.schema.Track(
|
||||
name=track_name,
|
||||
kind=TRACK_TYPES[track_type]
|
||||
)
|
||||
|
||||
|
||||
def add_otio_gap(clip_data, otio_track, prev_out):
|
||||
gap_length = clip_data["record_in"] - prev_out
|
||||
if prev_out != 0:
|
||||
gap_length -= 1
|
||||
|
||||
gap = otio.opentime.TimeRange(
|
||||
duration=otio.opentime.RationalTime(
|
||||
gap_length,
|
||||
CTX.get_fps()
|
||||
)
|
||||
)
|
||||
otio_gap = otio.schema.Gap(source_range=gap)
|
||||
otio_track.append(otio_gap)
|
||||
|
||||
|
||||
def add_otio_metadata(otio_item, item, **kwargs):
|
||||
metadata = _get_metadata(item)
|
||||
|
||||
# add additional metadata from kwargs
|
||||
if kwargs:
|
||||
metadata.update(kwargs)
|
||||
|
||||
# add metadata to otio item metadata
|
||||
for key, value in metadata.items():
|
||||
otio_item.metadata.update({key: value})
|
||||
|
||||
|
||||
def _get_shot_tokens_values(clip, tokens):
|
||||
old_value = None
|
||||
output = {}
|
||||
|
||||
old_value = clip.shot_name.get_value()
|
||||
|
||||
for token in tokens:
|
||||
clip.shot_name.set_value(token)
|
||||
_key = re.sub("[ <>]", "", token)
|
||||
|
||||
try:
|
||||
output[_key] = int(clip.shot_name.get_value())
|
||||
except ValueError:
|
||||
output[_key] = clip.shot_name.get_value()
|
||||
|
||||
clip.shot_name.set_value(old_value)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def _get_segment_attributes(segment):
|
||||
|
||||
log.debug("Segment name|hidden: {}|{}".format(
|
||||
segment.name.get_value(), segment.hidden
|
||||
))
|
||||
if (
|
||||
segment.name.get_value() == ""
|
||||
or segment.hidden.get_value()
|
||||
):
|
||||
return None
|
||||
|
||||
# Add timeline segment to tree
|
||||
clip_data = {
|
||||
"segment_name": segment.name.get_value(),
|
||||
"segment_comment": segment.comment.get_value(),
|
||||
"shot_name": segment.shot_name.get_value(),
|
||||
"tape_name": segment.tape_name,
|
||||
"source_name": segment.source_name,
|
||||
"fpath": segment.file_path,
|
||||
"PySegment": segment
|
||||
}
|
||||
|
||||
# add all available shot tokens
|
||||
shot_tokens = _get_shot_tokens_values(
|
||||
segment,
|
||||
["<colour space>", "<width>", "<height>", "<depth>"]
|
||||
)
|
||||
clip_data.update(shot_tokens)
|
||||
|
||||
# populate shot source metadata
|
||||
segment_attrs = [
|
||||
"record_duration", "record_in", "record_out",
|
||||
"source_duration", "source_in", "source_out"
|
||||
]
|
||||
segment_attrs_data = {}
|
||||
for attr in segment_attrs:
|
||||
if not hasattr(segment, attr):
|
||||
continue
|
||||
_value = getattr(segment, attr)
|
||||
segment_attrs_data[attr] = str(_value).replace("+", ":")
|
||||
|
||||
if attr in ["record_in", "record_out"]:
|
||||
clip_data[attr] = _value.relative_frame
|
||||
else:
|
||||
clip_data[attr] = _value.frame
|
||||
|
||||
clip_data["segment_timecodes"] = segment_attrs_data
|
||||
|
||||
return clip_data
|
||||
|
||||
|
||||
def create_otio_timeline(sequence):
|
||||
log.info(dir(sequence))
|
||||
log.info(sequence.attributes)
|
||||
|
||||
CTX.project = get_current_flame_project()
|
||||
|
||||
# get current timeline
|
||||
CTX.set_fps(
|
||||
float(str(sequence.frame_rate)[:-4]))
|
||||
|
||||
tl_start_frame = utils.timecode_to_frames(
|
||||
str(sequence.start_time).replace("+", ":"),
|
||||
CTX.get_fps()
|
||||
)
|
||||
CTX.set_tl_start_frame(tl_start_frame)
|
||||
|
||||
# convert timeline to otio
|
||||
otio_timeline = _create_otio_timeline(sequence)
|
||||
|
||||
# create otio tracks and clips
|
||||
for ver in sequence.versions:
|
||||
for track in ver.tracks:
|
||||
# avoid all empty tracks
|
||||
# or hidden tracks
|
||||
if (
|
||||
len(track.segments) == 0
|
||||
or track.hidden.get_value()
|
||||
):
|
||||
continue
|
||||
|
||||
# convert track to otio
|
||||
otio_track = create_otio_track(
|
||||
"video", str(track.name)[1:-1])
|
||||
|
||||
all_segments = []
|
||||
for segment in track.segments:
|
||||
clip_data = _get_segment_attributes(segment)
|
||||
if not clip_data:
|
||||
continue
|
||||
all_segments.append(clip_data)
|
||||
|
||||
segments_ordered = dict(enumerate(all_segments))
|
||||
log.debug("_ segments_ordered: {}".format(
|
||||
pformat(segments_ordered)
|
||||
))
|
||||
if not segments_ordered:
|
||||
continue
|
||||
|
||||
for itemindex, segment_data in segments_ordered.items():
|
||||
log.debug("_ itemindex: {}".format(itemindex))
|
||||
|
||||
# Add Gap if needed
|
||||
prev_item = (
|
||||
segment_data
|
||||
if itemindex == 0
|
||||
else segments_ordered[itemindex - 1]
|
||||
)
|
||||
log.debug("_ segment_data: {}".format(segment_data))
|
||||
|
||||
# calculate clip frame range difference from each other
|
||||
clip_diff = segment_data["record_in"] - prev_item["record_out"]
|
||||
|
||||
# add gap if first track item is not starting
|
||||
# at first timeline frame
|
||||
if itemindex == 0 and segment_data["record_in"] > 0:
|
||||
add_otio_gap(segment_data, otio_track, 0)
|
||||
|
||||
# or add gap if following track items are having
|
||||
# frame range differences from each other
|
||||
elif itemindex and clip_diff != 1:
|
||||
add_otio_gap(
|
||||
segment_data, otio_track, prev_item["record_out"])
|
||||
|
||||
# create otio clip and add it to track
|
||||
otio_clip = create_otio_clip(segment_data)
|
||||
otio_track.append(otio_clip)
|
||||
|
||||
log.debug("_ otio_clip: {}".format(otio_clip))
|
||||
|
||||
# create otio marker
|
||||
# create otio metadata
|
||||
|
||||
# add track to otio timeline
|
||||
otio_timeline.tracks.append(otio_track)
|
||||
|
||||
return otio_timeline
|
||||
|
||||
|
||||
def write_to_file(otio_timeline, path):
|
||||
otio.adapters.write_to_file(otio_timeline, path)
|
||||
|
|
@ -1,91 +0,0 @@
|
|||
import re
|
||||
import opentimelineio as otio
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]")
|
||||
|
||||
|
||||
def timecode_to_frames(timecode, framerate):
|
||||
rt = otio.opentime.from_timecode(timecode, framerate)
|
||||
return int(otio.opentime.to_frames(rt))
|
||||
|
||||
|
||||
def frames_to_timecode(frames, framerate):
|
||||
rt = otio.opentime.from_frames(frames, framerate)
|
||||
return otio.opentime.to_timecode(rt)
|
||||
|
||||
|
||||
def frames_to_seconds(frames, framerate):
|
||||
rt = otio.opentime.from_frames(frames, framerate)
|
||||
return otio.opentime.to_seconds(rt)
|
||||
|
||||
|
||||
def get_reformatted_filename(filename, padded=True):
|
||||
"""
|
||||
Return fixed python expression path
|
||||
|
||||
Args:
|
||||
filename (str): file name
|
||||
|
||||
Returns:
|
||||
type: string with reformatted path
|
||||
|
||||
Example:
|
||||
get_reformatted_filename("plate.1001.exr") > plate.%04d.exr
|
||||
|
||||
"""
|
||||
found = FRAME_PATTERN.search(filename)
|
||||
|
||||
if not found:
|
||||
log.info("File name is not sequence: {}".format(filename))
|
||||
return filename
|
||||
|
||||
padding = get_padding_from_filename(filename)
|
||||
|
||||
replacement = "%0{}d".format(padding) if padded else "%d"
|
||||
start_idx, end_idx = found.span(1)
|
||||
|
||||
return replacement.join(
|
||||
[filename[:start_idx], filename[end_idx:]]
|
||||
)
|
||||
|
||||
|
||||
def get_padding_from_filename(filename):
|
||||
"""
|
||||
Return padding number from Flame path style
|
||||
|
||||
Args:
|
||||
filename (str): file name
|
||||
|
||||
Returns:
|
||||
int: padding number
|
||||
|
||||
Example:
|
||||
get_padding_from_filename("plate.0001.exr") > 4
|
||||
|
||||
"""
|
||||
found = get_frame_from_filename(filename)
|
||||
|
||||
return len(found) if found else None
|
||||
|
||||
|
||||
def get_frame_from_filename(filename):
|
||||
"""
|
||||
Return sequence number from Flame path style
|
||||
|
||||
Args:
|
||||
filename (str): file name
|
||||
|
||||
Returns:
|
||||
int: sequence frame number
|
||||
|
||||
Example:
|
||||
def get_frame_from_filename(path):
|
||||
("plate.0001.exr") > 0001
|
||||
|
||||
"""
|
||||
|
||||
found = re.findall(FRAME_PATTERN, filename)
|
||||
|
||||
return found.pop() if found else None
|
||||
|
|
@ -1,307 +0,0 @@
|
|||
from copy import deepcopy
|
||||
import ayon_flame.api as opfapi
|
||||
|
||||
|
||||
class CreateShotClip(opfapi.Creator):
|
||||
"""Publishable clip"""
|
||||
|
||||
label = "Create Publishable Clip"
|
||||
product_type = "clip"
|
||||
icon = "film"
|
||||
defaults = ["Main"]
|
||||
|
||||
presets = None
|
||||
|
||||
def process(self):
|
||||
# Creator copy of object attributes that are modified during `process`
|
||||
presets = deepcopy(self.presets)
|
||||
gui_inputs = self.get_gui_inputs()
|
||||
|
||||
# get key pairs from presets and match it on ui inputs
|
||||
for k, v in gui_inputs.items():
|
||||
if v["type"] in ("dict", "section"):
|
||||
# nested dictionary (only one level allowed
|
||||
# for sections and dict)
|
||||
for _k, _v in v["value"].items():
|
||||
if presets.get(_k) is not None:
|
||||
gui_inputs[k][
|
||||
"value"][_k]["value"] = presets[_k]
|
||||
|
||||
if presets.get(k) is not None:
|
||||
gui_inputs[k]["value"] = presets[k]
|
||||
|
||||
# open widget for plugins inputs
|
||||
results_back = self.create_widget(
|
||||
"AYON publish attributes creator",
|
||||
"Define sequential rename and fill hierarchy data.",
|
||||
gui_inputs
|
||||
)
|
||||
|
||||
if len(self.selected) < 1:
|
||||
return
|
||||
|
||||
if not results_back:
|
||||
print("Operation aborted")
|
||||
return
|
||||
|
||||
# get ui output for track name for vertical sync
|
||||
v_sync_track = results_back["vSyncTrack"]["value"]
|
||||
|
||||
# sort selected trackItems by
|
||||
sorted_selected_segments = []
|
||||
unsorted_selected_segments = []
|
||||
for _segment in self.selected:
|
||||
if _segment.parent.name.get_value() in v_sync_track:
|
||||
sorted_selected_segments.append(_segment)
|
||||
else:
|
||||
unsorted_selected_segments.append(_segment)
|
||||
|
||||
sorted_selected_segments.extend(unsorted_selected_segments)
|
||||
|
||||
kwargs = {
|
||||
"log": self.log,
|
||||
"ui_inputs": results_back,
|
||||
"avalon": self.data,
|
||||
"product_type": self.data["productType"]
|
||||
}
|
||||
|
||||
for i, segment in enumerate(sorted_selected_segments):
|
||||
kwargs["rename_index"] = i
|
||||
# convert track item to timeline media pool item
|
||||
opfapi.PublishableClip(segment, **kwargs).convert()
|
||||
|
||||
def get_gui_inputs(self):
|
||||
gui_tracks = self._get_video_track_names(
|
||||
opfapi.get_current_sequence(opfapi.CTX.selection)
|
||||
)
|
||||
return deepcopy({
|
||||
"renameHierarchy": {
|
||||
"type": "section",
|
||||
"label": "Shot Hierarchy And Rename Settings",
|
||||
"target": "ui",
|
||||
"order": 0,
|
||||
"value": {
|
||||
"hierarchy": {
|
||||
"value": "{folder}/{sequence}",
|
||||
"type": "QLineEdit",
|
||||
"label": "Shot Parent Hierarchy",
|
||||
"target": "tag",
|
||||
"toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa
|
||||
"order": 0},
|
||||
"useShotName": {
|
||||
"value": True,
|
||||
"type": "QCheckBox",
|
||||
"label": "Use Shot Name",
|
||||
"target": "ui",
|
||||
"toolTip": "Use name form Shot name clip attribute", # noqa
|
||||
"order": 1},
|
||||
"clipRename": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Rename clips",
|
||||
"target": "ui",
|
||||
"toolTip": "Renaming selected clips on fly", # noqa
|
||||
"order": 2},
|
||||
"clipName": {
|
||||
"value": "{sequence}{shot}",
|
||||
"type": "QLineEdit",
|
||||
"label": "Clip Name Template",
|
||||
"target": "ui",
|
||||
"toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa
|
||||
"order": 3},
|
||||
"segmentIndex": {
|
||||
"value": True,
|
||||
"type": "QCheckBox",
|
||||
"label": "Segment index",
|
||||
"target": "ui",
|
||||
"toolTip": "Take number from segment index", # noqa
|
||||
"order": 4},
|
||||
"countFrom": {
|
||||
"value": 10,
|
||||
"type": "QSpinBox",
|
||||
"label": "Count sequence from",
|
||||
"target": "ui",
|
||||
"toolTip": "Set when the sequence number stafrom", # noqa
|
||||
"order": 5},
|
||||
"countSteps": {
|
||||
"value": 10,
|
||||
"type": "QSpinBox",
|
||||
"label": "Stepping number",
|
||||
"target": "ui",
|
||||
"toolTip": "What number is adding every new step", # noqa
|
||||
"order": 6},
|
||||
}
|
||||
},
|
||||
"hierarchyData": {
|
||||
"type": "dict",
|
||||
"label": "Shot Template Keywords",
|
||||
"target": "tag",
|
||||
"order": 1,
|
||||
"value": {
|
||||
"folder": {
|
||||
"value": "shots",
|
||||
"type": "QLineEdit",
|
||||
"label": "{folder}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 0},
|
||||
"episode": {
|
||||
"value": "ep01",
|
||||
"type": "QLineEdit",
|
||||
"label": "{episode}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 1},
|
||||
"sequence": {
|
||||
"value": "sq01",
|
||||
"type": "QLineEdit",
|
||||
"label": "{sequence}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 2},
|
||||
"track": {
|
||||
"value": "{_track_}",
|
||||
"type": "QLineEdit",
|
||||
"label": "{track}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 3},
|
||||
"shot": {
|
||||
"value": "sh###",
|
||||
"type": "QLineEdit",
|
||||
"label": "{shot}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 4}
|
||||
}
|
||||
},
|
||||
"verticalSync": {
|
||||
"type": "section",
|
||||
"label": "Vertical Synchronization Of Attributes",
|
||||
"target": "ui",
|
||||
"order": 2,
|
||||
"value": {
|
||||
"vSyncOn": {
|
||||
"value": True,
|
||||
"type": "QCheckBox",
|
||||
"label": "Enable Vertical Sync",
|
||||
"target": "ui",
|
||||
"toolTip": "Switch on if you want clips above each other to share its attributes", # noqa
|
||||
"order": 0},
|
||||
"vSyncTrack": {
|
||||
"value": gui_tracks, # noqa
|
||||
"type": "QComboBox",
|
||||
"label": "Hero track",
|
||||
"target": "ui",
|
||||
"toolTip": "Select driving track name which should be hero for all others", # noqa
|
||||
"order": 1}
|
||||
}
|
||||
},
|
||||
"publishSettings": {
|
||||
"type": "section",
|
||||
"label": "Publish Settings",
|
||||
"target": "ui",
|
||||
"order": 3,
|
||||
"value": {
|
||||
"productName": {
|
||||
"value": ["[ track name ]", "main", "bg", "fg", "bg",
|
||||
"animatic"],
|
||||
"type": "QComboBox",
|
||||
"label": "Product Name",
|
||||
"target": "ui",
|
||||
"toolTip": "chose product name pattern, if [ track name ] is selected, name of track layer will be used", # noqa
|
||||
"order": 0},
|
||||
"productType": {
|
||||
"value": ["plate", "take"],
|
||||
"type": "QComboBox",
|
||||
"label": "Product Type",
|
||||
"target": "ui", "toolTip": "What use of this product is for", # noqa
|
||||
"order": 1},
|
||||
"reviewTrack": {
|
||||
"value": ["< none >"] + gui_tracks,
|
||||
"type": "QComboBox",
|
||||
"label": "Use Review Track",
|
||||
"target": "ui",
|
||||
"toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa
|
||||
"order": 2},
|
||||
"audio": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Include audio",
|
||||
"target": "tag",
|
||||
"toolTip": "Process products with corresponding audio", # noqa
|
||||
"order": 3},
|
||||
"sourceResolution": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Source resolution",
|
||||
"target": "tag",
|
||||
"toolTip": "Is resolution taken from timeline or source?", # noqa
|
||||
"order": 4},
|
||||
}
|
||||
},
|
||||
"frameRangeAttr": {
|
||||
"type": "section",
|
||||
"label": "Shot Attributes",
|
||||
"target": "ui",
|
||||
"order": 4,
|
||||
"value": {
|
||||
"workfileFrameStart": {
|
||||
"value": 1001,
|
||||
"type": "QSpinBox",
|
||||
"label": "Workfiles Start Frame",
|
||||
"target": "tag",
|
||||
"toolTip": "Set workfile starting frame number", # noqa
|
||||
"order": 0
|
||||
},
|
||||
"handleStart": {
|
||||
"value": 0,
|
||||
"type": "QSpinBox",
|
||||
"label": "Handle Start",
|
||||
"target": "tag",
|
||||
"toolTip": "Handle at start of clip", # noqa
|
||||
"order": 1
|
||||
},
|
||||
"handleEnd": {
|
||||
"value": 0,
|
||||
"type": "QSpinBox",
|
||||
"label": "Handle End",
|
||||
"target": "tag",
|
||||
"toolTip": "Handle at end of clip", # noqa
|
||||
"order": 2
|
||||
},
|
||||
"includeHandles": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Include handles",
|
||||
"target": "tag",
|
||||
"toolTip": "By default handles are excluded", # noqa
|
||||
"order": 3
|
||||
},
|
||||
"retimedHandles": {
|
||||
"value": True,
|
||||
"type": "QCheckBox",
|
||||
"label": "Retimed handles",
|
||||
"target": "tag",
|
||||
"toolTip": "By default handles are retimed.", # noqa
|
||||
"order": 4
|
||||
},
|
||||
"retimedFramerange": {
|
||||
"value": True,
|
||||
"type": "QCheckBox",
|
||||
"label": "Retimed framerange",
|
||||
"target": "tag",
|
||||
"toolTip": "By default framerange is retimed.", # noqa
|
||||
"order": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
def _get_video_track_names(self, sequence):
|
||||
track_names = []
|
||||
for ver in sequence.versions:
|
||||
for track in ver.tracks:
|
||||
track_names.append(track.name.get_value())
|
||||
|
||||
return track_names
|
||||
|
|
@ -1,274 +0,0 @@
|
|||
from copy import deepcopy
|
||||
import os
|
||||
import flame
|
||||
from pprint import pformat
|
||||
import ayon_flame.api as opfapi
|
||||
from ayon_core.lib import StringTemplate
|
||||
from ayon_core.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS,
|
||||
IMAGE_EXTENSIONS
|
||||
)
|
||||
|
||||
|
||||
class LoadClip(opfapi.ClipLoader):
|
||||
"""Load a product to timeline as clip
|
||||
|
||||
Place clip to timeline on its asset origin timings collected
|
||||
during conforming to project
|
||||
"""
|
||||
|
||||
product_types = {"render2d", "source", "plate", "render", "review"}
|
||||
representations = {"*"}
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load as clip"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
# settings
|
||||
reel_group_name = "OpenPype_Reels"
|
||||
reel_name = "Loaded"
|
||||
clip_name_template = "{folder[name]}_{product[name]}<_{output}>"
|
||||
|
||||
""" Anatomy keys from version context data and dynamically added:
|
||||
- {layerName} - original layer name token
|
||||
- {layerUID} - original layer UID token
|
||||
- {originalBasename} - original clip name taken from file
|
||||
"""
|
||||
layer_rename_template = "{folder[name]}_{product[name]}<_{output}>"
|
||||
layer_rename_patterns = []
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
|
||||
# get flame objects
|
||||
fproject = flame.project.current_project
|
||||
self.fpd = fproject.current_workspace.desktop
|
||||
|
||||
# load clip to timeline and get main variables
|
||||
version_entity = context["version"]
|
||||
version_attributes = version_entity["attrib"]
|
||||
version_name = version_entity["version"]
|
||||
colorspace = self.get_colorspace(context)
|
||||
|
||||
# in case output is not in context replace key to representation
|
||||
if not context["representation"]["context"].get("output"):
|
||||
self.clip_name_template = self.clip_name_template.replace(
|
||||
"output", "representation")
|
||||
self.layer_rename_template = self.layer_rename_template.replace(
|
||||
"output", "representation")
|
||||
|
||||
formatting_data = deepcopy(context["representation"]["context"])
|
||||
clip_name = StringTemplate(self.clip_name_template).format(
|
||||
formatting_data)
|
||||
|
||||
# convert colorspace with ocio to flame mapping
|
||||
# in imageio flame section
|
||||
colorspace = self.get_native_colorspace(colorspace)
|
||||
self.log.info("Loading with colorspace: `{}`".format(colorspace))
|
||||
|
||||
# create workfile path
|
||||
workfile_dir = os.environ["AYON_WORKDIR"]
|
||||
openclip_dir = os.path.join(
|
||||
workfile_dir, clip_name
|
||||
)
|
||||
openclip_path = os.path.join(
|
||||
openclip_dir, clip_name + ".clip"
|
||||
)
|
||||
if not os.path.exists(openclip_dir):
|
||||
os.makedirs(openclip_dir)
|
||||
|
||||
# prepare clip data from context ad send it to openClipLoader
|
||||
path = self.filepath_from_context(context)
|
||||
loading_context = {
|
||||
"path": path.replace("\\", "/"),
|
||||
"colorspace": colorspace,
|
||||
"version": "v{:0>3}".format(version_name),
|
||||
"layer_rename_template": self.layer_rename_template,
|
||||
"layer_rename_patterns": self.layer_rename_patterns,
|
||||
"context_data": formatting_data
|
||||
}
|
||||
self.log.debug(pformat(
|
||||
loading_context
|
||||
))
|
||||
self.log.debug(openclip_path)
|
||||
|
||||
# make openpype clip file
|
||||
opfapi.OpenClipSolver(
|
||||
openclip_path, loading_context, logger=self.log).make()
|
||||
|
||||
# prepare Reel group in actual desktop
|
||||
opc = self._get_clip(
|
||||
clip_name,
|
||||
openclip_path
|
||||
)
|
||||
|
||||
# add additional metadata from the version to imprint Avalon knob
|
||||
add_keys = [
|
||||
"frameStart", "frameEnd", "source", "author",
|
||||
"fps", "handleStart", "handleEnd"
|
||||
]
|
||||
|
||||
# move all version data keys to tag data
|
||||
data_imprint = {
|
||||
key: version_attributes.get(key, str(None))
|
||||
for key in add_keys
|
||||
}
|
||||
|
||||
# add variables related to version context
|
||||
data_imprint.update({
|
||||
"version": version_name,
|
||||
"colorspace": colorspace,
|
||||
"objectName": clip_name
|
||||
})
|
||||
|
||||
# TODO: finish the containerisation
|
||||
# opc_segment = opfapi.get_clip_segment(opc)
|
||||
|
||||
# return opfapi.containerise(
|
||||
# opc_segment,
|
||||
# name, namespace, context,
|
||||
# self.__class__.__name__,
|
||||
# data_imprint)
|
||||
|
||||
return opc
|
||||
|
||||
def _get_clip(self, name, clip_path):
|
||||
reel = self._get_reel()
|
||||
# with maintained openclip as opc
|
||||
matching_clip = [cl for cl in reel.clips
|
||||
if cl.name.get_value() == name]
|
||||
if matching_clip:
|
||||
return matching_clip.pop()
|
||||
else:
|
||||
created_clips = flame.import_clips(str(clip_path), reel)
|
||||
return created_clips.pop()
|
||||
|
||||
def _get_reel(self):
|
||||
|
||||
matching_rgroup = [
|
||||
rg for rg in self.fpd.reel_groups
|
||||
if rg.name.get_value() == self.reel_group_name
|
||||
]
|
||||
|
||||
if not matching_rgroup:
|
||||
reel_group = self.fpd.create_reel_group(str(self.reel_group_name))
|
||||
for _r in reel_group.reels:
|
||||
if "reel" not in _r.name.get_value().lower():
|
||||
continue
|
||||
self.log.debug("Removing: {}".format(_r.name))
|
||||
flame.delete(_r)
|
||||
else:
|
||||
reel_group = matching_rgroup.pop()
|
||||
|
||||
matching_reel = [
|
||||
re for re in reel_group.reels
|
||||
if re.name.get_value() == self.reel_name
|
||||
]
|
||||
|
||||
if not matching_reel:
|
||||
reel_group = reel_group.create_reel(str(self.reel_name))
|
||||
else:
|
||||
reel_group = matching_reel.pop()
|
||||
|
||||
return reel_group
|
||||
|
||||
def _get_segment_from_clip(self, clip):
|
||||
# unwrapping segment from input clip
|
||||
pass
|
||||
|
||||
# def switch(self, container, context):
|
||||
# self.update(container, context)
|
||||
|
||||
# def update(self, container, context):
|
||||
# """ Updating previously loaded clips
|
||||
# """
|
||||
# # load clip to timeline and get main variables
|
||||
# repre_entity = context['representation']
|
||||
# name = container['name']
|
||||
# namespace = container['namespace']
|
||||
# track_item = phiero.get_track_items(
|
||||
# track_item_name=namespace)
|
||||
# version = io.find_one({
|
||||
# "type": "version",
|
||||
# "id": repre_entity["versionId"]
|
||||
# })
|
||||
# version_data = version.get("data", {})
|
||||
# version_name = version.get("name", None)
|
||||
# colorspace = version_data.get("colorSpace", None)
|
||||
# object_name = "{}_{}".format(name, namespace)
|
||||
# file = get_representation_path(repre_entity).replace("\\", "/")
|
||||
# clip = track_item.source()
|
||||
|
||||
# # reconnect media to new path
|
||||
# clip.reconnectMedia(file)
|
||||
|
||||
# # set colorspace
|
||||
# if colorspace:
|
||||
# clip.setSourceMediaColourTransform(colorspace)
|
||||
|
||||
# # add additional metadata from the version to imprint Avalon knob
|
||||
# add_keys = [
|
||||
# "frameStart", "frameEnd", "source", "author",
|
||||
# "fps", "handleStart", "handleEnd"
|
||||
# ]
|
||||
|
||||
# # move all version data keys to tag data
|
||||
# data_imprint = {}
|
||||
# for key in add_keys:
|
||||
# data_imprint.update({
|
||||
# key: version_data.get(key, str(None))
|
||||
# })
|
||||
|
||||
# # add variables related to version context
|
||||
# data_imprint.update({
|
||||
# "representation": repre_entity["id"],
|
||||
# "version": version_name,
|
||||
# "colorspace": colorspace,
|
||||
# "objectName": object_name
|
||||
# })
|
||||
|
||||
# # update color of clip regarding the version order
|
||||
# self.set_item_color(track_item, version)
|
||||
|
||||
# return phiero.update_container(track_item, data_imprint)
|
||||
|
||||
# def remove(self, container):
|
||||
# """ Removing previously loaded clips
|
||||
# """
|
||||
# # load clip to timeline and get main variables
|
||||
# namespace = container['namespace']
|
||||
# track_item = phiero.get_track_items(
|
||||
# track_item_name=namespace)
|
||||
# track = track_item.parent()
|
||||
|
||||
# # remove track item from track
|
||||
# track.removeItem(track_item)
|
||||
|
||||
# @classmethod
|
||||
# def multiselection(cls, track_item):
|
||||
# if not cls.track:
|
||||
# cls.track = track_item.parent()
|
||||
# cls.sequence = cls.track.parent()
|
||||
|
||||
# @classmethod
|
||||
# def set_item_color(cls, track_item, version):
|
||||
|
||||
# clip = track_item.source()
|
||||
# # define version name
|
||||
# version_name = version.get("name", None)
|
||||
# # get all versions in list
|
||||
# versions = io.find({
|
||||
# "type": "version",
|
||||
# "parent": version["parent"]
|
||||
# }).distinct('name')
|
||||
|
||||
# max_version = max(versions)
|
||||
|
||||
# # set clip colour
|
||||
# if version_name == max_version:
|
||||
# clip.binItem().setColor(cls.clip_color_last)
|
||||
# else:
|
||||
# clip.binItem().setColor(cls.clip_color)
|
||||
|
|
@ -1,180 +0,0 @@
|
|||
from copy import deepcopy
|
||||
import os
|
||||
import flame
|
||||
from pprint import pformat
|
||||
import ayon_flame.api as opfapi
|
||||
from ayon_core.lib import StringTemplate
|
||||
from ayon_core.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS,
|
||||
IMAGE_EXTENSIONS
|
||||
)
|
||||
|
||||
class LoadClipBatch(opfapi.ClipLoader):
|
||||
"""Load a product to timeline as clip
|
||||
|
||||
Place clip to timeline on its asset origin timings collected
|
||||
during conforming to project
|
||||
"""
|
||||
|
||||
product_types = {"render2d", "source", "plate", "render", "review"}
|
||||
representations = {"*"}
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load as clip to current batch"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
# settings
|
||||
reel_name = "OP_LoadedReel"
|
||||
clip_name_template = "{batch}_{folder[name]}_{product[name]}<_{output}>"
|
||||
|
||||
""" Anatomy keys from version context data and dynamically added:
|
||||
- {layerName} - original layer name token
|
||||
- {layerUID} - original layer UID token
|
||||
- {originalBasename} - original clip name taken from file
|
||||
"""
|
||||
layer_rename_template = "{folder[name]}_{product[name]}<_{output}>"
|
||||
layer_rename_patterns = []
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
|
||||
# get flame objects
|
||||
self.batch = options.get("batch") or flame.batch
|
||||
|
||||
# load clip to timeline and get main variables
|
||||
version_entity = context["version"]
|
||||
version_attributes =version_entity["attrib"]
|
||||
version_name = version_entity["version"]
|
||||
colorspace = self.get_colorspace(context)
|
||||
|
||||
clip_name_template = self.clip_name_template
|
||||
layer_rename_template = self.layer_rename_template
|
||||
# in case output is not in context replace key to representation
|
||||
if not context["representation"]["context"].get("output"):
|
||||
clip_name_template = clip_name_template.replace(
|
||||
"output", "representation")
|
||||
layer_rename_template = layer_rename_template.replace(
|
||||
"output", "representation")
|
||||
|
||||
folder_entity = context["folder"]
|
||||
product_entity = context["product"]
|
||||
formatting_data = deepcopy(context["representation"]["context"])
|
||||
formatting_data["batch"] = self.batch.name.get_value()
|
||||
formatting_data.update({
|
||||
"asset": folder_entity["name"],
|
||||
"folder": {
|
||||
"name": folder_entity["name"],
|
||||
},
|
||||
"subset": product_entity["name"],
|
||||
"family": product_entity["productType"],
|
||||
"product": {
|
||||
"name": product_entity["name"],
|
||||
"type": product_entity["productType"],
|
||||
}
|
||||
})
|
||||
|
||||
clip_name = StringTemplate(clip_name_template).format(
|
||||
formatting_data)
|
||||
|
||||
# convert colorspace with ocio to flame mapping
|
||||
# in imageio flame section
|
||||
colorspace = self.get_native_colorspace(colorspace)
|
||||
self.log.info("Loading with colorspace: `{}`".format(colorspace))
|
||||
|
||||
# create workfile path
|
||||
workfile_dir = options.get("workdir") or os.environ["AYON_WORKDIR"]
|
||||
openclip_dir = os.path.join(
|
||||
workfile_dir, clip_name
|
||||
)
|
||||
openclip_path = os.path.join(
|
||||
openclip_dir, clip_name + ".clip"
|
||||
)
|
||||
|
||||
if not os.path.exists(openclip_dir):
|
||||
os.makedirs(openclip_dir)
|
||||
|
||||
# prepare clip data from context and send it to openClipLoader
|
||||
path = self.filepath_from_context(context)
|
||||
loading_context = {
|
||||
"path": path.replace("\\", "/"),
|
||||
"colorspace": colorspace,
|
||||
"version": "v{:0>3}".format(version_name),
|
||||
"layer_rename_template": layer_rename_template,
|
||||
"layer_rename_patterns": self.layer_rename_patterns,
|
||||
"context_data": formatting_data
|
||||
}
|
||||
self.log.debug(pformat(
|
||||
loading_context
|
||||
))
|
||||
self.log.debug(openclip_path)
|
||||
|
||||
# make openpype clip file
|
||||
opfapi.OpenClipSolver(
|
||||
openclip_path, loading_context, logger=self.log).make()
|
||||
|
||||
# prepare Reel group in actual desktop
|
||||
opc = self._get_clip(
|
||||
clip_name,
|
||||
openclip_path
|
||||
)
|
||||
|
||||
# add additional metadata from the version to imprint Avalon knob
|
||||
add_keys = [
|
||||
"frameStart", "frameEnd", "source", "author",
|
||||
"fps", "handleStart", "handleEnd"
|
||||
]
|
||||
|
||||
# move all version data keys to tag data
|
||||
data_imprint = {
|
||||
key: version_attributes.get(key, str(None))
|
||||
for key in add_keys
|
||||
}
|
||||
# add variables related to version context
|
||||
data_imprint.update({
|
||||
"version": version_name,
|
||||
"colorspace": colorspace,
|
||||
"objectName": clip_name
|
||||
})
|
||||
|
||||
# TODO: finish the containerisation
|
||||
# opc_segment = opfapi.get_clip_segment(opc)
|
||||
|
||||
# return opfapi.containerise(
|
||||
# opc_segment,
|
||||
# name, namespace, context,
|
||||
# self.__class__.__name__,
|
||||
# data_imprint)
|
||||
|
||||
return opc
|
||||
|
||||
def _get_clip(self, name, clip_path):
|
||||
reel = self._get_reel()
|
||||
|
||||
# with maintained openclip as opc
|
||||
matching_clip = None
|
||||
for cl in reel.clips:
|
||||
if cl.name.get_value() != name:
|
||||
continue
|
||||
matching_clip = cl
|
||||
|
||||
if not matching_clip:
|
||||
created_clips = flame.import_clips(str(clip_path), reel)
|
||||
return created_clips.pop()
|
||||
|
||||
return matching_clip
|
||||
|
||||
def _get_reel(self):
|
||||
|
||||
matching_reel = [
|
||||
rg for rg in self.batch.reels
|
||||
if rg.name.get_value() == self.reel_name
|
||||
]
|
||||
|
||||
return (
|
||||
matching_reel.pop()
|
||||
if matching_reel
|
||||
else self.batch.create_reel(str(self.reel_name))
|
||||
)
|
||||
|
|
@ -1,64 +0,0 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import tempfile
|
||||
import ayon_flame.api as opfapi
|
||||
from ayon_flame.otio import flame_export as otio_export
|
||||
import opentimelineio as otio
|
||||
from pprint import pformat
|
||||
reload(otio_export) # noqa
|
||||
|
||||
|
||||
@pyblish.api.log
|
||||
class CollectTestSelection(pyblish.api.ContextPlugin):
|
||||
"""testing selection sharing
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "test selection"
|
||||
hosts = ["flame"]
|
||||
active = False
|
||||
|
||||
def process(self, context):
|
||||
self.log.info(
|
||||
"Active Selection: {}".format(opfapi.CTX.selection))
|
||||
|
||||
sequence = opfapi.get_current_sequence(opfapi.CTX.selection)
|
||||
|
||||
self.test_imprint_data(sequence)
|
||||
self.test_otio_export(sequence)
|
||||
|
||||
def test_otio_export(self, sequence):
|
||||
test_dir = os.path.normpath(
|
||||
tempfile.mkdtemp(prefix="test_pyblish_tmp_")
|
||||
)
|
||||
export_path = os.path.normpath(
|
||||
os.path.join(
|
||||
test_dir, "otio_timeline_export.otio"
|
||||
)
|
||||
)
|
||||
otio_timeline = otio_export.create_otio_timeline(sequence)
|
||||
otio_export.write_to_file(
|
||||
otio_timeline, export_path
|
||||
)
|
||||
read_timeline_otio = otio.adapters.read_from_file(export_path)
|
||||
|
||||
if otio_timeline != read_timeline_otio:
|
||||
raise Exception("Exported timeline is different from original")
|
||||
|
||||
self.log.info(pformat(otio_timeline))
|
||||
self.log.info("Otio exported to: {}".format(export_path))
|
||||
|
||||
def test_imprint_data(self, sequence):
|
||||
with opfapi.maintained_segment_selection(sequence) as sel_segments:
|
||||
for segment in sel_segments:
|
||||
if str(segment.name)[1:-1] == "":
|
||||
continue
|
||||
|
||||
self.log.debug("Segment with OpenPypeData: {}".format(
|
||||
segment.name))
|
||||
|
||||
opfapi.imprint(segment, {
|
||||
'asset': segment.name.get_value(),
|
||||
'productType': 'render',
|
||||
'productName': 'productMain'
|
||||
})
|
||||
|
|
@ -1,419 +0,0 @@
|
|||
import re
|
||||
from types import NoneType
|
||||
import pyblish
|
||||
import ayon_flame.api as opfapi
|
||||
from ayon_flame.otio import flame_export
|
||||
from ayon_core.pipeline import AYON_INSTANCE_ID, AVALON_INSTANCE_ID
|
||||
from ayon_core.pipeline.editorial import (
|
||||
is_overlapping_otio_ranges,
|
||||
get_media_range_with_retimes
|
||||
)
|
||||
|
||||
# # developer reload modules
|
||||
from pprint import pformat
|
||||
|
||||
# constatns
|
||||
NUM_PATERN = re.compile(r"([0-9\.]+)")
|
||||
TXT_PATERN = re.compile(r"([a-zA-Z]+)")
|
||||
|
||||
|
||||
class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
||||
"""Collect all Timeline segment selection."""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.09
|
||||
label = "Collect timeline Instances"
|
||||
hosts = ["flame"]
|
||||
|
||||
settings_category = "flame"
|
||||
|
||||
audio_track_items = []
|
||||
|
||||
# settings
|
||||
xml_preset_attrs_from_comments = []
|
||||
add_tasks = []
|
||||
|
||||
def process(self, context):
|
||||
selected_segments = context.data["flameSelectedSegments"]
|
||||
self.log.debug("__ selected_segments: {}".format(selected_segments))
|
||||
|
||||
self.otio_timeline = context.data["otioTimeline"]
|
||||
self.fps = context.data["fps"]
|
||||
|
||||
# process all selected
|
||||
for segment in selected_segments:
|
||||
# get openpype tag data
|
||||
marker_data = opfapi.get_segment_data_marker(segment)
|
||||
|
||||
self.log.debug("__ marker_data: {}".format(
|
||||
pformat(marker_data)))
|
||||
|
||||
if not marker_data:
|
||||
continue
|
||||
|
||||
if marker_data.get("id") not in {
|
||||
AYON_INSTANCE_ID, AVALON_INSTANCE_ID
|
||||
}:
|
||||
continue
|
||||
|
||||
self.log.debug("__ segment.name: {}".format(
|
||||
segment.name
|
||||
))
|
||||
|
||||
comment_attributes = self._get_comment_attributes(segment)
|
||||
|
||||
self.log.debug("_ comment_attributes: {}".format(
|
||||
pformat(comment_attributes)))
|
||||
|
||||
clip_data = opfapi.get_segment_attributes(segment)
|
||||
clip_name = clip_data["segment_name"]
|
||||
self.log.debug("clip_name: {}".format(clip_name))
|
||||
|
||||
# get otio clip data
|
||||
otio_data = self._get_otio_clip_instance_data(clip_data) or {}
|
||||
self.log.debug("__ otio_data: {}".format(pformat(otio_data)))
|
||||
|
||||
# get file path
|
||||
file_path = clip_data["fpath"]
|
||||
|
||||
first_frame = opfapi.get_frame_from_filename(file_path) or 0
|
||||
|
||||
head, tail = self._get_head_tail(
|
||||
clip_data,
|
||||
otio_data["otioClip"],
|
||||
marker_data["handleStart"],
|
||||
marker_data["handleEnd"]
|
||||
)
|
||||
|
||||
# make sure there is not NoneType rather 0
|
||||
if isinstance(head, NoneType):
|
||||
head = 0
|
||||
if isinstance(tail, NoneType):
|
||||
tail = 0
|
||||
|
||||
# make sure value is absolute
|
||||
if head != 0:
|
||||
head = abs(head)
|
||||
if tail != 0:
|
||||
tail = abs(tail)
|
||||
|
||||
# solve handles length
|
||||
marker_data["handleStart"] = min(
|
||||
marker_data["handleStart"], head)
|
||||
marker_data["handleEnd"] = min(
|
||||
marker_data["handleEnd"], tail)
|
||||
|
||||
# Backward compatibility fix of 'entity_type' > 'folder_type'
|
||||
if "parents" in marker_data:
|
||||
for parent in marker_data["parents"]:
|
||||
if "entity_type" in parent:
|
||||
parent["folder_type"] = parent.pop("entity_type")
|
||||
|
||||
workfile_start = self._set_workfile_start(marker_data)
|
||||
|
||||
with_audio = bool(marker_data.pop("audio"))
|
||||
|
||||
# add marker data to instance data
|
||||
inst_data = dict(marker_data.items())
|
||||
|
||||
# add ocio_data to instance data
|
||||
inst_data.update(otio_data)
|
||||
|
||||
folder_path = marker_data["folderPath"]
|
||||
folder_name = folder_path.rsplit("/")[-1]
|
||||
product_name = marker_data["productName"]
|
||||
|
||||
# insert product type into families
|
||||
product_type = marker_data["productType"]
|
||||
families = [str(f) for f in marker_data["families"]]
|
||||
families.insert(0, str(product_type))
|
||||
|
||||
# form label
|
||||
label = folder_name
|
||||
if folder_name != clip_name:
|
||||
label += " ({})".format(clip_name)
|
||||
label += " {} [{}]".format(product_name, ", ".join(families))
|
||||
|
||||
inst_data.update({
|
||||
"name": "{}_{}".format(folder_name, product_name),
|
||||
"label": label,
|
||||
"folderPath": folder_path,
|
||||
"item": segment,
|
||||
"families": families,
|
||||
"publish": marker_data["publish"],
|
||||
"fps": self.fps,
|
||||
"workfileFrameStart": workfile_start,
|
||||
"sourceFirstFrame": int(first_frame),
|
||||
"retimedHandles": marker_data.get("retimedHandles"),
|
||||
"shotDurationFromSource": (
|
||||
not marker_data.get("retimedFramerange")),
|
||||
"path": file_path,
|
||||
"flameAddTasks": self.add_tasks,
|
||||
"tasks": {
|
||||
task["name"]: {"type": task["type"]}
|
||||
for task in self.add_tasks},
|
||||
"representations": [],
|
||||
"newHierarchyIntegration": True,
|
||||
# Backwards compatible (Deprecated since 24/06/06)
|
||||
"newAssetPublishing": True,
|
||||
})
|
||||
self.log.debug("__ inst_data: {}".format(pformat(inst_data)))
|
||||
|
||||
# add resolution
|
||||
self._get_resolution_to_data(inst_data, context)
|
||||
|
||||
# add comment attributes if any
|
||||
inst_data.update(comment_attributes)
|
||||
|
||||
# create instance
|
||||
instance = context.create_instance(**inst_data)
|
||||
|
||||
# add colorspace data
|
||||
instance.data.update({
|
||||
"versionData": {
|
||||
"colorspace": clip_data["colour_space"],
|
||||
}
|
||||
})
|
||||
|
||||
# create shot instance for shot attributes create/update
|
||||
self._create_shot_instance(context, clip_name, **inst_data)
|
||||
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.info(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
if not with_audio:
|
||||
continue
|
||||
|
||||
# add audioReview attribute to plate instance data
|
||||
# if reviewTrack is on
|
||||
if marker_data.get("reviewTrack") is not None:
|
||||
instance.data["reviewAudio"] = True
|
||||
|
||||
@staticmethod
|
||||
def _set_workfile_start(data):
|
||||
include_handles = data.get("includeHandles")
|
||||
workfile_start = data["workfileFrameStart"]
|
||||
handle_start = data["handleStart"]
|
||||
|
||||
if include_handles:
|
||||
workfile_start += handle_start
|
||||
|
||||
return workfile_start
|
||||
|
||||
def _get_comment_attributes(self, segment):
|
||||
comment = segment.comment.get_value()
|
||||
|
||||
# try to find attributes
|
||||
attributes = {
|
||||
"xml_overrides": {
|
||||
"pixelRatio": 1.00}
|
||||
}
|
||||
# search for `:`
|
||||
for split in self._split_comments(comment):
|
||||
# make sure we ignore if not `:` in key
|
||||
if ":" not in split:
|
||||
continue
|
||||
|
||||
self._get_xml_preset_attrs(
|
||||
attributes, split)
|
||||
|
||||
# add xml overrides resolution to instance data
|
||||
xml_overrides = attributes["xml_overrides"]
|
||||
if xml_overrides.get("width"):
|
||||
attributes.update({
|
||||
"resolutionWidth": xml_overrides["width"],
|
||||
"resolutionHeight": xml_overrides["height"],
|
||||
"pixelAspect": xml_overrides["pixelRatio"]
|
||||
})
|
||||
|
||||
return attributes
|
||||
|
||||
def _get_xml_preset_attrs(self, attributes, split):
|
||||
|
||||
# split to key and value
|
||||
key, value = split.split(":")
|
||||
|
||||
for attr_data in self.xml_preset_attrs_from_comments:
|
||||
a_name = attr_data["name"]
|
||||
a_type = attr_data["type"]
|
||||
|
||||
# exclude all not related attributes
|
||||
if a_name.lower() not in key.lower():
|
||||
continue
|
||||
|
||||
# get pattern defined by type
|
||||
pattern = TXT_PATERN
|
||||
if a_type in ("number", "float"):
|
||||
pattern = NUM_PATERN
|
||||
|
||||
res_goup = pattern.findall(value)
|
||||
|
||||
# raise if nothing is found as it is not correctly defined
|
||||
if not res_goup:
|
||||
raise ValueError((
|
||||
"Value for `{}` attribute is not "
|
||||
"set correctly: `{}`").format(a_name, split))
|
||||
|
||||
if "string" in a_type:
|
||||
_value = res_goup[0]
|
||||
if "float" in a_type:
|
||||
_value = float(res_goup[0])
|
||||
if "number" in a_type:
|
||||
_value = int(res_goup[0])
|
||||
|
||||
attributes["xml_overrides"][a_name] = _value
|
||||
|
||||
# condition for resolution in key
|
||||
if "resolution" in key.lower():
|
||||
res_goup = NUM_PATERN.findall(value)
|
||||
# check if axpect was also defined
|
||||
# 1920x1080x1.5
|
||||
aspect = res_goup[2] if len(res_goup) > 2 else 1
|
||||
|
||||
width = int(res_goup[0])
|
||||
height = int(res_goup[1])
|
||||
pixel_ratio = float(aspect)
|
||||
attributes["xml_overrides"].update({
|
||||
"width": width,
|
||||
"height": height,
|
||||
"pixelRatio": pixel_ratio
|
||||
})
|
||||
|
||||
def _split_comments(self, comment_string):
|
||||
# first split comment by comma
|
||||
split_comments = []
|
||||
if "," in comment_string:
|
||||
split_comments.extend(comment_string.split(","))
|
||||
elif ";" in comment_string:
|
||||
split_comments.extend(comment_string.split(";"))
|
||||
else:
|
||||
split_comments.append(comment_string)
|
||||
|
||||
return split_comments
|
||||
|
||||
def _get_head_tail(self, clip_data, otio_clip, handle_start, handle_end):
|
||||
# calculate head and tail with forward compatibility
|
||||
head = clip_data.get("segment_head")
|
||||
tail = clip_data.get("segment_tail")
|
||||
self.log.debug("__ head: `{}`".format(head))
|
||||
self.log.debug("__ tail: `{}`".format(tail))
|
||||
|
||||
# HACK: it is here to serve for versions below 2021.1
|
||||
if not any([head, tail]):
|
||||
retimed_attributes = get_media_range_with_retimes(
|
||||
otio_clip, handle_start, handle_end)
|
||||
self.log.debug(
|
||||
">> retimed_attributes: {}".format(retimed_attributes))
|
||||
|
||||
# retimed head and tail
|
||||
head = int(retimed_attributes["handleStart"])
|
||||
tail = int(retimed_attributes["handleEnd"])
|
||||
|
||||
return head, tail
|
||||
|
||||
def _get_resolution_to_data(self, data, context):
|
||||
assert data.get("otioClip"), "Missing `otioClip` data"
|
||||
|
||||
# solve source resolution option
|
||||
if data.get("sourceResolution", None):
|
||||
otio_clip_metadata = data[
|
||||
"otioClip"].media_reference.metadata
|
||||
data.update({
|
||||
"resolutionWidth": otio_clip_metadata[
|
||||
"openpype.source.width"],
|
||||
"resolutionHeight": otio_clip_metadata[
|
||||
"openpype.source.height"],
|
||||
"pixelAspect": otio_clip_metadata[
|
||||
"openpype.source.pixelAspect"]
|
||||
})
|
||||
else:
|
||||
otio_tl_metadata = context.data["otioTimeline"].metadata
|
||||
data.update({
|
||||
"resolutionWidth": otio_tl_metadata["openpype.timeline.width"],
|
||||
"resolutionHeight": otio_tl_metadata[
|
||||
"openpype.timeline.height"],
|
||||
"pixelAspect": otio_tl_metadata[
|
||||
"openpype.timeline.pixelAspect"]
|
||||
})
|
||||
|
||||
def _create_shot_instance(self, context, clip_name, **data):
|
||||
master_layer = data.get("heroTrack")
|
||||
hierarchy_data = data.get("hierarchyData")
|
||||
|
||||
if not master_layer:
|
||||
return
|
||||
|
||||
if not hierarchy_data:
|
||||
return
|
||||
|
||||
folder_path = data["folderPath"]
|
||||
folder_name = folder_path.rsplit("/")[-1]
|
||||
product_name = "shotMain"
|
||||
|
||||
# insert product type into families
|
||||
product_type = "shot"
|
||||
|
||||
# form label
|
||||
label = folder_name
|
||||
if folder_name != clip_name:
|
||||
label += " ({}) ".format(clip_name)
|
||||
label += " {}".format(product_name)
|
||||
label += " [{}]".format(product_type)
|
||||
|
||||
data.update({
|
||||
"name": "{}_{}".format(folder_name, product_name),
|
||||
"label": label,
|
||||
"productName": product_name,
|
||||
"folderPath": folder_path,
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type]
|
||||
})
|
||||
|
||||
instance = context.create_instance(**data)
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.debug(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
def _get_otio_clip_instance_data(self, clip_data):
|
||||
"""
|
||||
Return otio objects for timeline, track and clip
|
||||
|
||||
Args:
|
||||
timeline_item_data (dict): timeline_item_data from list returned by
|
||||
resolve.get_current_timeline_items()
|
||||
otio_timeline (otio.schema.Timeline): otio object
|
||||
|
||||
Returns:
|
||||
dict: otio clip object
|
||||
|
||||
"""
|
||||
segment = clip_data["PySegment"]
|
||||
s_track_name = segment.parent.name.get_value()
|
||||
timeline_range = self._create_otio_time_range_from_timeline_item_data(
|
||||
clip_data)
|
||||
|
||||
for otio_clip in self.otio_timeline.each_clip():
|
||||
track_name = otio_clip.parent().name
|
||||
parent_range = otio_clip.range_in_parent()
|
||||
if s_track_name not in track_name:
|
||||
continue
|
||||
if otio_clip.name not in segment.name.get_value():
|
||||
continue
|
||||
if is_overlapping_otio_ranges(
|
||||
parent_range, timeline_range, strict=True):
|
||||
|
||||
# add pypedata marker to otio_clip metadata
|
||||
for marker in otio_clip.markers:
|
||||
if opfapi.MARKER_NAME in marker.name:
|
||||
otio_clip.metadata.update(marker.metadata)
|
||||
return {"otioClip": otio_clip}
|
||||
|
||||
return None
|
||||
|
||||
def _create_otio_time_range_from_timeline_item_data(self, clip_data):
|
||||
frame_start = int(clip_data["record_in"])
|
||||
frame_duration = int(clip_data["record_duration"])
|
||||
|
||||
return flame_export.create_otio_time_range(
|
||||
frame_start, frame_duration, self.fps)
|
||||
|
|
@ -1,67 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
import ayon_flame.api as opfapi
|
||||
from ayon_flame.otio import flame_export
|
||||
from ayon_core.pipeline.create import get_product_name
|
||||
|
||||
|
||||
class CollecTimelineOTIO(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working context into publish context"""
|
||||
|
||||
label = "Collect Timeline OTIO"
|
||||
order = pyblish.api.CollectorOrder - 0.099
|
||||
|
||||
def process(self, context):
|
||||
# plugin defined
|
||||
product_type = "workfile"
|
||||
variant = "otioTimeline"
|
||||
|
||||
# main
|
||||
folder_entity = context.data["folderEntity"]
|
||||
project = opfapi.get_current_project()
|
||||
sequence = opfapi.get_current_sequence(opfapi.CTX.selection)
|
||||
|
||||
# create product name
|
||||
task_entity = context.data["taskEntity"]
|
||||
task_name = task_type = None
|
||||
if task_entity:
|
||||
task_name = task_entity["name"]
|
||||
task_type = task_entity["taskType"]
|
||||
product_name = get_product_name(
|
||||
context.data["projectName"],
|
||||
task_name,
|
||||
task_type,
|
||||
context.data["hostName"],
|
||||
product_type,
|
||||
variant,
|
||||
project_settings=context.data["project_settings"]
|
||||
)
|
||||
|
||||
# adding otio timeline to context
|
||||
with opfapi.maintained_segment_selection(sequence) as selected_seg:
|
||||
otio_timeline = flame_export.create_otio_timeline(sequence)
|
||||
|
||||
instance_data = {
|
||||
"name": product_name,
|
||||
"folderPath": folder_entity["path"],
|
||||
"productName": product_name,
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type]
|
||||
}
|
||||
|
||||
# create instance with workfile
|
||||
instance = context.create_instance(**instance_data)
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
|
||||
# update context with main project attributes
|
||||
context.data.update({
|
||||
"flameProject": project,
|
||||
"flameSequence": sequence,
|
||||
"otioTimeline": otio_timeline,
|
||||
"currentFile": "Flame/{}/{}".format(
|
||||
project.name, sequence.name
|
||||
),
|
||||
"flameSelectedSegments": selected_seg,
|
||||
"fps": float(str(sequence.frame_rate)[:-4])
|
||||
})
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import opentimelineio as otio
|
||||
from ayon_core.pipeline import publish
|
||||
|
||||
|
||||
class ExtractOTIOFile(publish.Extractor):
|
||||
"""
|
||||
Extractor export OTIO file
|
||||
"""
|
||||
|
||||
label = "Extract OTIO file"
|
||||
order = pyblish.api.ExtractorOrder - 0.45
|
||||
families = ["workfile"]
|
||||
hosts = ["flame"]
|
||||
|
||||
def process(self, instance):
|
||||
# create representation data
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
name = instance.data["name"]
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
otio_timeline = instance.context.data["otioTimeline"]
|
||||
# create otio timeline representation
|
||||
otio_file_name = name + ".otio"
|
||||
otio_file_path = os.path.join(staging_dir, otio_file_name)
|
||||
|
||||
# export otio file to temp dir
|
||||
otio.adapters.write_to_file(otio_timeline, otio_file_path)
|
||||
|
||||
representation_otio = {
|
||||
'name': "otio",
|
||||
'ext': "otio",
|
||||
'files': otio_file_name,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation_otio)
|
||||
|
||||
self.log.info("Added OTIO file representation: {}".format(
|
||||
representation_otio))
|
||||
|
|
@ -1,560 +0,0 @@
|
|||
import os
|
||||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_flame import api as opfapi
|
||||
from ayon_flame.api import MediaInfoFile
|
||||
from ayon_core.pipeline.editorial import (
|
||||
get_media_range_with_retimes
|
||||
)
|
||||
|
||||
import flame
|
||||
|
||||
|
||||
class ExtractProductResources(publish.Extractor):
|
||||
"""
|
||||
Extractor for transcoding files from Flame clip
|
||||
"""
|
||||
|
||||
label = "Extract product resources"
|
||||
order = pyblish.api.ExtractorOrder
|
||||
families = ["clip"]
|
||||
hosts = ["flame"]
|
||||
|
||||
settings_category = "flame"
|
||||
|
||||
# plugin defaults
|
||||
keep_original_representation = False
|
||||
|
||||
default_presets = {
|
||||
"thumbnail": {
|
||||
"active": True,
|
||||
"ext": "jpg",
|
||||
"xml_preset_file": "Jpeg (8-bit).xml",
|
||||
"xml_preset_dir": "",
|
||||
"export_type": "File Sequence",
|
||||
"parsed_comment_attrs": False,
|
||||
"colorspace_out": "Output - sRGB",
|
||||
"representation_add_range": False,
|
||||
"representation_tags": ["thumbnail"],
|
||||
"path_regex": ".*"
|
||||
}
|
||||
}
|
||||
|
||||
# hide publisher during exporting
|
||||
hide_ui_on_process = True
|
||||
|
||||
# settings
|
||||
export_presets_mapping = []
|
||||
|
||||
def process(self, instance):
|
||||
if not self.keep_original_representation:
|
||||
# remove previeous representation if not needed
|
||||
instance.data["representations"] = []
|
||||
|
||||
# flame objects
|
||||
segment = instance.data["item"]
|
||||
folder_path = instance.data["folderPath"]
|
||||
segment_name = segment.name.get_value()
|
||||
clip_path = instance.data["path"]
|
||||
sequence_clip = instance.context.data["flameSequence"]
|
||||
|
||||
# segment's parent track name
|
||||
s_track_name = segment.parent.name.get_value()
|
||||
|
||||
# get configured workfile frame start/end (handles excluded)
|
||||
frame_start = instance.data["frameStart"]
|
||||
# get media source first frame
|
||||
source_first_frame = instance.data["sourceFirstFrame"]
|
||||
|
||||
self.log.debug("_ frame_start: {}".format(frame_start))
|
||||
self.log.debug("_ source_first_frame: {}".format(source_first_frame))
|
||||
|
||||
# get timeline in/out of segment
|
||||
clip_in = instance.data["clipIn"]
|
||||
clip_out = instance.data["clipOut"]
|
||||
|
||||
# get retimed attributres
|
||||
retimed_data = self._get_retimed_attributes(instance)
|
||||
|
||||
# get individual keys
|
||||
retimed_handle_start = retimed_data["handle_start"]
|
||||
retimed_handle_end = retimed_data["handle_end"]
|
||||
retimed_source_duration = retimed_data["source_duration"]
|
||||
retimed_speed = retimed_data["speed"]
|
||||
|
||||
# get handles value - take only the max from both
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
handles = max(handle_start, handle_end)
|
||||
include_handles = instance.data.get("includeHandles")
|
||||
retimed_handles = instance.data.get("retimedHandles")
|
||||
|
||||
# get media source range with handles
|
||||
source_start_handles = instance.data["sourceStartH"]
|
||||
source_end_handles = instance.data["sourceEndH"]
|
||||
|
||||
# retime if needed
|
||||
if retimed_speed != 1.0:
|
||||
if retimed_handles:
|
||||
# handles are retimed
|
||||
source_start_handles = (
|
||||
instance.data["sourceStart"] - retimed_handle_start)
|
||||
source_end_handles = (
|
||||
source_start_handles
|
||||
+ (retimed_source_duration - 1)
|
||||
+ retimed_handle_start
|
||||
+ retimed_handle_end
|
||||
)
|
||||
|
||||
else:
|
||||
# handles are not retimed
|
||||
source_end_handles = (
|
||||
source_start_handles
|
||||
+ (retimed_source_duration - 1)
|
||||
+ handle_start
|
||||
+ handle_end
|
||||
)
|
||||
|
||||
# get frame range with handles for representation range
|
||||
frame_start_handle = frame_start - handle_start
|
||||
repre_frame_start = frame_start_handle
|
||||
if include_handles:
|
||||
if retimed_speed == 1.0 or not retimed_handles:
|
||||
frame_start_handle = frame_start
|
||||
else:
|
||||
frame_start_handle = (
|
||||
frame_start - handle_start) + retimed_handle_start
|
||||
|
||||
self.log.debug("_ frame_start_handle: {}".format(
|
||||
frame_start_handle))
|
||||
self.log.debug("_ repre_frame_start: {}".format(
|
||||
repre_frame_start))
|
||||
|
||||
# calculate duration with handles
|
||||
source_duration_handles = (
|
||||
source_end_handles - source_start_handles) + 1
|
||||
|
||||
self.log.debug("_ source_duration_handles: {}".format(
|
||||
source_duration_handles))
|
||||
|
||||
# create staging dir path
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
# append staging dir for later cleanup
|
||||
instance.context.data["cleanupFullPaths"].append(staging_dir)
|
||||
|
||||
export_presets_mapping = {}
|
||||
for preset_mapping in deepcopy(self.export_presets_mapping):
|
||||
name = preset_mapping.pop("name")
|
||||
export_presets_mapping[name] = preset_mapping
|
||||
|
||||
# add default preset type for thumbnail and reviewable video
|
||||
# update them with settings and override in case the same
|
||||
# are found in there
|
||||
_preset_keys = [k.split('_')[0] for k in export_presets_mapping]
|
||||
export_presets = {
|
||||
k: v
|
||||
for k, v in deepcopy(self.default_presets).items()
|
||||
if k not in _preset_keys
|
||||
}
|
||||
export_presets.update(export_presets_mapping)
|
||||
|
||||
if not instance.data.get("versionData"):
|
||||
instance.data["versionData"] = {}
|
||||
|
||||
# set versiondata if any retime
|
||||
version_data = retimed_data.get("version_data")
|
||||
self.log.debug("_ version_data: {}".format(version_data))
|
||||
|
||||
if version_data:
|
||||
instance.data["versionData"].update(version_data)
|
||||
|
||||
# version data start frame
|
||||
version_frame_start = frame_start
|
||||
if include_handles:
|
||||
version_frame_start = frame_start_handle
|
||||
if retimed_speed != 1.0:
|
||||
if retimed_handles:
|
||||
instance.data["versionData"].update({
|
||||
"frameStart": version_frame_start,
|
||||
"frameEnd": (
|
||||
(version_frame_start + source_duration_handles - 1)
|
||||
- (retimed_handle_start + retimed_handle_end)
|
||||
)
|
||||
})
|
||||
else:
|
||||
instance.data["versionData"].update({
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"frameStart": version_frame_start,
|
||||
"frameEnd": (
|
||||
(version_frame_start + source_duration_handles - 1)
|
||||
- (handle_start + handle_end)
|
||||
)
|
||||
})
|
||||
self.log.debug("_ version_data: {}".format(
|
||||
instance.data["versionData"]
|
||||
))
|
||||
|
||||
# loop all preset names and
|
||||
for unique_name, preset_config in export_presets.items():
|
||||
modify_xml_data = {}
|
||||
|
||||
if self._should_skip(preset_config, clip_path, unique_name):
|
||||
continue
|
||||
|
||||
# get all presets attributes
|
||||
extension = preset_config["ext"]
|
||||
preset_file = preset_config["xml_preset_file"]
|
||||
preset_dir = preset_config["xml_preset_dir"]
|
||||
export_type = preset_config["export_type"]
|
||||
repre_tags = preset_config["representation_tags"]
|
||||
parsed_comment_attrs = preset_config["parsed_comment_attrs"]
|
||||
color_out = preset_config["colorspace_out"]
|
||||
|
||||
self.log.info(
|
||||
"Processing `{}` as `{}` to `{}` type...".format(
|
||||
preset_file, export_type, extension
|
||||
)
|
||||
)
|
||||
|
||||
exporting_clip = None
|
||||
name_patern_xml = "<name>_{}.".format(
|
||||
unique_name)
|
||||
|
||||
if export_type == "Sequence Publish":
|
||||
# change export clip to sequence
|
||||
exporting_clip = flame.duplicate(sequence_clip)
|
||||
|
||||
# only keep visible layer where instance segment is child
|
||||
self.hide_others(
|
||||
exporting_clip, segment_name, s_track_name)
|
||||
|
||||
# change name pattern
|
||||
name_patern_xml = (
|
||||
"<segment name>_<shot name>_{}.").format(
|
||||
unique_name)
|
||||
|
||||
# only for h264 with baked retime
|
||||
in_mark = clip_in
|
||||
out_mark = clip_out + 1
|
||||
modify_xml_data.update({
|
||||
"exportHandles": True,
|
||||
"nbHandles": handles
|
||||
})
|
||||
else:
|
||||
in_mark = (source_start_handles - source_first_frame) + 1
|
||||
out_mark = in_mark + source_duration_handles
|
||||
exporting_clip = self.import_clip(clip_path)
|
||||
exporting_clip.name.set_value("{}_{}".format(
|
||||
folder_path, segment_name))
|
||||
|
||||
# add xml tags modifications
|
||||
modify_xml_data.update({
|
||||
# enum position low start from 0
|
||||
"frameIndex": 0,
|
||||
"startFrame": repre_frame_start,
|
||||
"namePattern": name_patern_xml
|
||||
})
|
||||
|
||||
if parsed_comment_attrs:
|
||||
# add any xml overrides collected form segment.comment
|
||||
modify_xml_data.update(instance.data["xml_overrides"])
|
||||
|
||||
self.log.debug("_ in_mark: {}".format(in_mark))
|
||||
self.log.debug("_ out_mark: {}".format(out_mark))
|
||||
|
||||
export_kwargs = {}
|
||||
# validate xml preset file is filled
|
||||
if preset_file == "":
|
||||
raise ValueError(
|
||||
("Check Settings for {} preset: "
|
||||
"`XML preset file` is not filled").format(
|
||||
unique_name)
|
||||
)
|
||||
|
||||
# resolve xml preset dir if not filled
|
||||
if preset_dir == "":
|
||||
preset_dir = opfapi.get_preset_path_by_xml_name(
|
||||
preset_file)
|
||||
|
||||
if not preset_dir:
|
||||
raise ValueError(
|
||||
("Check Settings for {} preset: "
|
||||
"`XML preset file` {} is not found").format(
|
||||
unique_name, preset_file)
|
||||
)
|
||||
|
||||
# create preset path
|
||||
preset_orig_xml_path = str(os.path.join(
|
||||
preset_dir, preset_file
|
||||
))
|
||||
|
||||
# define kwargs based on preset type
|
||||
if "thumbnail" in unique_name:
|
||||
modify_xml_data.update({
|
||||
"video/posterFrame": True,
|
||||
"video/useFrameAsPoster": 1,
|
||||
"namePattern": "__thumbnail"
|
||||
})
|
||||
thumb_frame_number = int(in_mark + (
|
||||
(out_mark - in_mark + 1) / 2))
|
||||
|
||||
self.log.debug("__ thumb_frame_number: {}".format(
|
||||
thumb_frame_number
|
||||
))
|
||||
|
||||
export_kwargs["thumb_frame_number"] = thumb_frame_number
|
||||
else:
|
||||
export_kwargs.update({
|
||||
"in_mark": in_mark,
|
||||
"out_mark": out_mark
|
||||
})
|
||||
|
||||
preset_path = opfapi.modify_preset_file(
|
||||
preset_orig_xml_path, staging_dir, modify_xml_data)
|
||||
|
||||
# get and make export dir paths
|
||||
export_dir_path = str(os.path.join(
|
||||
staging_dir, unique_name
|
||||
))
|
||||
os.makedirs(export_dir_path)
|
||||
|
||||
# export
|
||||
opfapi.export_clip(
|
||||
export_dir_path, exporting_clip, preset_path, **export_kwargs)
|
||||
|
||||
repr_name = unique_name
|
||||
# make sure only first segment is used if underscore in name
|
||||
# HACK: `ftrackreview_withLUT` will result only in `ftrackreview`
|
||||
if (
|
||||
"thumbnail" in unique_name
|
||||
or "ftrackreview" in unique_name
|
||||
):
|
||||
repr_name = unique_name.split("_")[0]
|
||||
|
||||
# create representation data
|
||||
representation_data = {
|
||||
"name": repr_name,
|
||||
"outputName": repr_name,
|
||||
"ext": extension,
|
||||
"stagingDir": export_dir_path,
|
||||
"tags": repre_tags,
|
||||
"data": {
|
||||
"colorspace": color_out
|
||||
},
|
||||
"load_to_batch_group": preset_config.get(
|
||||
"load_to_batch_group"),
|
||||
"batch_group_loader_name": preset_config.get(
|
||||
"batch_group_loader_name") or None
|
||||
}
|
||||
|
||||
# collect all available content of export dir
|
||||
files = os.listdir(export_dir_path)
|
||||
|
||||
# make sure no nested folders inside
|
||||
n_stage_dir, n_files = self._unfolds_nested_folders(
|
||||
export_dir_path, files, extension)
|
||||
|
||||
# fix representation in case of nested folders
|
||||
if n_stage_dir:
|
||||
representation_data["stagingDir"] = n_stage_dir
|
||||
files = n_files
|
||||
|
||||
# add files to representation but add
|
||||
# imagesequence as list
|
||||
if (
|
||||
# first check if path in files is not mov extension
|
||||
[
|
||||
f for f in files
|
||||
if os.path.splitext(f)[-1] == ".mov"
|
||||
]
|
||||
# then try if thumbnail is not in unique name
|
||||
or repr_name == "thumbnail"
|
||||
):
|
||||
representation_data["files"] = files.pop()
|
||||
else:
|
||||
representation_data["files"] = files
|
||||
|
||||
# add frame range
|
||||
if preset_config["representation_add_range"]:
|
||||
representation_data.update({
|
||||
"frameStart": repre_frame_start,
|
||||
"frameEnd": (
|
||||
repre_frame_start + source_duration_handles) - 1,
|
||||
"fps": instance.data["fps"]
|
||||
})
|
||||
|
||||
instance.data["representations"].append(representation_data)
|
||||
|
||||
# add review family if found in tags
|
||||
if "review" in repre_tags:
|
||||
instance.data["families"].append("review")
|
||||
|
||||
self.log.info("Added representation: {}".format(
|
||||
representation_data))
|
||||
|
||||
if export_type == "Sequence Publish":
|
||||
# at the end remove the duplicated clip
|
||||
flame.delete(exporting_clip)
|
||||
|
||||
def _get_retimed_attributes(self, instance):
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
# get basic variables
|
||||
otio_clip = instance.data["otioClip"]
|
||||
|
||||
# get available range trimmed with processed retimes
|
||||
retimed_attributes = get_media_range_with_retimes(
|
||||
otio_clip, handle_start, handle_end)
|
||||
self.log.debug(
|
||||
">> retimed_attributes: {}".format(retimed_attributes))
|
||||
|
||||
r_media_in = int(retimed_attributes["mediaIn"])
|
||||
r_media_out = int(retimed_attributes["mediaOut"])
|
||||
version_data = retimed_attributes.get("versionData")
|
||||
|
||||
return {
|
||||
"version_data": version_data,
|
||||
"handle_start": int(retimed_attributes["handleStart"]),
|
||||
"handle_end": int(retimed_attributes["handleEnd"]),
|
||||
"source_duration": (
|
||||
(r_media_out - r_media_in) + 1
|
||||
),
|
||||
"speed": float(retimed_attributes["speed"])
|
||||
}
|
||||
|
||||
def _should_skip(self, preset_config, clip_path, unique_name):
|
||||
# get activating attributes
|
||||
activated_preset = preset_config["active"]
|
||||
filter_path_regex = preset_config.get("filter_path_regex")
|
||||
|
||||
self.log.info(
|
||||
"Preset `{}` is active `{}` with filter `{}`".format(
|
||||
unique_name, activated_preset, filter_path_regex
|
||||
)
|
||||
)
|
||||
|
||||
# skip if not activated presete
|
||||
if not activated_preset:
|
||||
return True
|
||||
|
||||
# exclude by regex filter if any
|
||||
if (
|
||||
filter_path_regex
|
||||
and not re.search(filter_path_regex, clip_path)
|
||||
):
|
||||
return True
|
||||
|
||||
def _unfolds_nested_folders(self, stage_dir, files_list, ext):
|
||||
"""Unfolds nested folders
|
||||
|
||||
Args:
|
||||
stage_dir (str): path string with directory
|
||||
files_list (list): list of file names
|
||||
ext (str): extension (jpg)[without dot]
|
||||
|
||||
Raises:
|
||||
IOError: in case no files were collected form any directory
|
||||
|
||||
Returns:
|
||||
str, list: new staging dir path, new list of file names
|
||||
or
|
||||
None, None: In case single file in `files_list`
|
||||
"""
|
||||
# exclude single files which are having extension
|
||||
# the same as input ext attr
|
||||
if (
|
||||
# only one file in list
|
||||
len(files_list) == 1
|
||||
# file is having extension as input
|
||||
and ext in os.path.splitext(files_list[0])[-1]
|
||||
):
|
||||
return None, None
|
||||
elif (
|
||||
# more then one file in list
|
||||
len(files_list) >= 1
|
||||
# extension is correct
|
||||
and ext in os.path.splitext(files_list[0])[-1]
|
||||
# test file exists
|
||||
and os.path.exists(
|
||||
os.path.join(stage_dir, files_list[0])
|
||||
)
|
||||
):
|
||||
return None, None
|
||||
|
||||
new_stage_dir = None
|
||||
new_files_list = []
|
||||
for file in files_list:
|
||||
search_path = os.path.join(stage_dir, file)
|
||||
if not os.path.isdir(search_path):
|
||||
continue
|
||||
for root, _dirs, files in os.walk(search_path):
|
||||
for _file in files:
|
||||
_fn, _ext = os.path.splitext(_file)
|
||||
if ext.lower() != _ext[1:].lower():
|
||||
continue
|
||||
new_files_list.append(_file)
|
||||
if not new_stage_dir:
|
||||
new_stage_dir = root
|
||||
|
||||
if not new_stage_dir:
|
||||
raise AssertionError(
|
||||
"Files in `{}` are not correct! Check `{}`".format(
|
||||
files_list, stage_dir)
|
||||
)
|
||||
|
||||
return new_stage_dir, new_files_list
|
||||
|
||||
def hide_others(self, sequence_clip, segment_name, track_name):
|
||||
"""Helper method used only if sequence clip is used
|
||||
|
||||
Args:
|
||||
sequence_clip (flame.Clip): sequence clip
|
||||
segment_name (str): segment name
|
||||
track_name (str): track name
|
||||
"""
|
||||
# create otio tracks and clips
|
||||
for ver in sequence_clip.versions:
|
||||
for track in ver.tracks:
|
||||
if len(track.segments) == 0 and track.hidden.get_value():
|
||||
continue
|
||||
|
||||
# hide tracks which are not parent track
|
||||
if track.name.get_value() != track_name:
|
||||
track.hidden = True
|
||||
continue
|
||||
|
||||
# hidde all other segments
|
||||
for segment in track.segments:
|
||||
if segment.name.get_value() != segment_name:
|
||||
segment.hidden = True
|
||||
|
||||
def import_clip(self, path):
|
||||
"""
|
||||
Import clip from path
|
||||
"""
|
||||
dir_path = os.path.dirname(path)
|
||||
media_info = MediaInfoFile(path, logger=self.log)
|
||||
file_pattern = media_info.file_pattern
|
||||
self.log.debug("__ file_pattern: {}".format(file_pattern))
|
||||
|
||||
# rejoin the pattern to dir path
|
||||
new_path = os.path.join(dir_path, file_pattern)
|
||||
|
||||
clips = flame.import_clips(new_path)
|
||||
self.log.info("Clips [{}] imported from `{}`".format(clips, path))
|
||||
|
||||
if not clips:
|
||||
self.log.warning("Path `{}` is not having any clips".format(path))
|
||||
return None
|
||||
elif len(clips) > 1:
|
||||
self.log.warning(
|
||||
"Path `{}` is containing more that one clip".format(path)
|
||||
)
|
||||
return clips[0]
|
||||
|
|
@ -1,339 +0,0 @@
|
|||
import os
|
||||
import copy
|
||||
from collections import OrderedDict
|
||||
from pprint import pformat
|
||||
import pyblish
|
||||
import ayon_flame.api as opfapi
|
||||
import ayon_core.pipeline as op_pipeline
|
||||
from ayon_core.pipeline.workfile import get_workdir
|
||||
|
||||
|
||||
class IntegrateBatchGroup(pyblish.api.InstancePlugin):
|
||||
"""Integrate published shot to batch group"""
|
||||
|
||||
order = pyblish.api.IntegratorOrder + 0.45
|
||||
label = "Integrate Batch Groups"
|
||||
hosts = ["flame"]
|
||||
families = ["clip"]
|
||||
|
||||
settings_category = "flame"
|
||||
|
||||
# settings
|
||||
default_loader = "LoadClip"
|
||||
|
||||
def process(self, instance):
|
||||
add_tasks = instance.data["flameAddTasks"]
|
||||
|
||||
# iterate all tasks from settings
|
||||
for task_data in add_tasks:
|
||||
# exclude batch group
|
||||
if not task_data["create_batch_group"]:
|
||||
continue
|
||||
|
||||
# create or get already created batch group
|
||||
bgroup = self._get_batch_group(instance, task_data)
|
||||
|
||||
# add batch group content
|
||||
all_batch_nodes = self._add_nodes_to_batch_with_links(
|
||||
instance, task_data, bgroup)
|
||||
|
||||
for name, node in all_batch_nodes.items():
|
||||
self.log.debug("name: {}, dir: {}".format(
|
||||
name, dir(node)
|
||||
))
|
||||
self.log.debug("__ node.attributes: {}".format(
|
||||
node.attributes
|
||||
))
|
||||
|
||||
# load plate to batch group
|
||||
self.log.info("Loading product `{}` into batch `{}`".format(
|
||||
instance.data["productName"], bgroup.name.get_value()
|
||||
))
|
||||
self._load_clip_to_context(instance, bgroup)
|
||||
|
||||
def _add_nodes_to_batch_with_links(self, instance, task_data, batch_group):
|
||||
# get write file node properties > OrederDict because order does matter
|
||||
write_pref_data = self._get_write_prefs(instance, task_data)
|
||||
|
||||
batch_nodes = [
|
||||
{
|
||||
"type": "comp",
|
||||
"properties": {},
|
||||
"id": "comp_node01"
|
||||
},
|
||||
{
|
||||
"type": "Write File",
|
||||
"properties": write_pref_data,
|
||||
"id": "write_file_node01"
|
||||
}
|
||||
]
|
||||
batch_links = [
|
||||
{
|
||||
"from_node": {
|
||||
"id": "comp_node01",
|
||||
"connector": "Result"
|
||||
},
|
||||
"to_node": {
|
||||
"id": "write_file_node01",
|
||||
"connector": "Front"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# add nodes into batch group
|
||||
return opfapi.create_batch_group_conent(
|
||||
batch_nodes, batch_links, batch_group)
|
||||
|
||||
def _load_clip_to_context(self, instance, bgroup):
|
||||
# get all loaders for host
|
||||
loaders_by_name = {
|
||||
loader.__name__: loader
|
||||
for loader in op_pipeline.discover_loader_plugins()
|
||||
}
|
||||
|
||||
# get all published representations
|
||||
published_representations = instance.data["published_representations"]
|
||||
repres_db_id_by_name = {
|
||||
repre_info["representation"]["name"]: repre_id
|
||||
for repre_id, repre_info in published_representations.items()
|
||||
}
|
||||
|
||||
# get all loadable representations
|
||||
repres_by_name = {
|
||||
repre["name"]: repre for repre in instance.data["representations"]
|
||||
}
|
||||
|
||||
# get repre_id for the loadable representations
|
||||
loader_name_by_repre_id = {
|
||||
repres_db_id_by_name[repr_name]: {
|
||||
"loader": repr_data["batch_group_loader_name"],
|
||||
# add repre data for exception logging
|
||||
"_repre_data": repr_data
|
||||
}
|
||||
for repr_name, repr_data in repres_by_name.items()
|
||||
if repr_data.get("load_to_batch_group")
|
||||
}
|
||||
|
||||
self.log.debug("__ loader_name_by_repre_id: {}".format(pformat(
|
||||
loader_name_by_repre_id)))
|
||||
|
||||
# get representation context from the repre_id
|
||||
repre_contexts = op_pipeline.load.get_repres_contexts(
|
||||
loader_name_by_repre_id.keys())
|
||||
|
||||
self.log.debug("__ repre_contexts: {}".format(pformat(
|
||||
repre_contexts)))
|
||||
|
||||
# loop all returned repres from repre_context dict
|
||||
for repre_id, repre_context in repre_contexts.items():
|
||||
self.log.debug("__ repre_id: {}".format(repre_id))
|
||||
# get loader name by representation id
|
||||
loader_name = (
|
||||
loader_name_by_repre_id[repre_id]["loader"]
|
||||
# if nothing was added to settings fallback to default
|
||||
or self.default_loader
|
||||
)
|
||||
|
||||
# get loader plugin
|
||||
loader_plugin = loaders_by_name.get(loader_name)
|
||||
if loader_plugin:
|
||||
# load to flame by representation context
|
||||
try:
|
||||
op_pipeline.load.load_with_repre_context(
|
||||
loader_plugin, repre_context, **{
|
||||
"data": {
|
||||
"workdir": self.task_workdir,
|
||||
"batch": bgroup
|
||||
}
|
||||
})
|
||||
except op_pipeline.load.IncompatibleLoaderError as msg:
|
||||
self.log.error(
|
||||
"Check allowed representations for Loader `{}` "
|
||||
"in settings > error: {}".format(
|
||||
loader_plugin.__name__, msg))
|
||||
self.log.error(
|
||||
"Representaton context >>{}<< is not compatible "
|
||||
"with loader `{}`".format(
|
||||
pformat(repre_context), loader_plugin.__name__
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.log.warning(
|
||||
"Something got wrong and there is not Loader found for "
|
||||
"following data: {}".format(
|
||||
pformat(loader_name_by_repre_id))
|
||||
)
|
||||
|
||||
def _get_batch_group(self, instance, task_data):
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = instance.data["frameEnd"]
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
frame_duration = (frame_end - frame_start) + 1
|
||||
folder_path = instance.data["folderPath"]
|
||||
|
||||
task_name = task_data["name"]
|
||||
batchgroup_name = "{}_{}".format(folder_path, task_name)
|
||||
|
||||
batch_data = {
|
||||
"shematic_reels": [
|
||||
"OP_LoadedReel"
|
||||
],
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
}
|
||||
self.log.debug(
|
||||
"__ batch_data: {}".format(pformat(batch_data)))
|
||||
|
||||
# check if the batch group already exists
|
||||
bgroup = opfapi.get_batch_group_from_desktop(batchgroup_name)
|
||||
|
||||
if not bgroup:
|
||||
self.log.info(
|
||||
"Creating new batch group: {}".format(batchgroup_name))
|
||||
# create batch with utils
|
||||
bgroup = opfapi.create_batch_group(
|
||||
batchgroup_name,
|
||||
frame_start,
|
||||
frame_duration,
|
||||
**batch_data
|
||||
)
|
||||
|
||||
else:
|
||||
self.log.info(
|
||||
"Updating batch group: {}".format(batchgroup_name))
|
||||
# update already created batch group
|
||||
bgroup = opfapi.create_batch_group(
|
||||
batchgroup_name,
|
||||
frame_start,
|
||||
frame_duration,
|
||||
update_batch_group=bgroup,
|
||||
**batch_data
|
||||
)
|
||||
|
||||
return bgroup
|
||||
|
||||
def _get_anamoty_data_with_current_task(self, instance, task_data):
|
||||
anatomy_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
task_name = task_data["name"]
|
||||
task_type = task_data["type"]
|
||||
anatomy_obj = instance.context.data["anatomy"]
|
||||
|
||||
# update task data in anatomy data
|
||||
project_task_types = anatomy_obj["tasks"]
|
||||
task_code = project_task_types.get(task_type, {}).get("shortName")
|
||||
anatomy_data.update({
|
||||
"task": {
|
||||
"name": task_name,
|
||||
"type": task_type,
|
||||
"short": task_code
|
||||
}
|
||||
})
|
||||
return anatomy_data
|
||||
|
||||
def _get_write_prefs(self, instance, task_data):
|
||||
# update task in anatomy data
|
||||
anatomy_data = self._get_anamoty_data_with_current_task(
|
||||
instance, task_data)
|
||||
|
||||
self.task_workdir = self._get_shot_task_dir_path(
|
||||
instance, task_data)
|
||||
self.log.debug("__ task_workdir: {}".format(
|
||||
self.task_workdir))
|
||||
|
||||
# TODO: this might be done with template in settings
|
||||
render_dir_path = os.path.join(
|
||||
self.task_workdir, "render", "flame")
|
||||
|
||||
if not os.path.exists(render_dir_path):
|
||||
os.makedirs(render_dir_path, mode=0o777)
|
||||
|
||||
# TODO: add most of these to `imageio/flame/batch/write_node`
|
||||
name = "{project[code]}_{folder[name]}_{task[name]}".format(
|
||||
**anatomy_data
|
||||
)
|
||||
|
||||
# The path attribute where the rendered clip is exported
|
||||
# /path/to/file.[0001-0010].exr
|
||||
media_path = render_dir_path
|
||||
# name of file represented by tokens
|
||||
media_path_pattern = (
|
||||
"<name>_v<iteration###>/<name>_v<iteration###>.<frame><ext>")
|
||||
# The Create Open Clip attribute of the Write File node. \
|
||||
# Determines if an Open Clip is created by the Write File node.
|
||||
create_clip = True
|
||||
# The Include Setup attribute of the Write File node.
|
||||
# Determines if a Batch Setup file is created by the Write File node.
|
||||
include_setup = True
|
||||
# The path attribute where the Open Clip file is exported by
|
||||
# the Write File node.
|
||||
create_clip_path = "<name>"
|
||||
# The path attribute where the Batch setup file
|
||||
# is exported by the Write File node.
|
||||
include_setup_path = "./<name>_v<iteration###>"
|
||||
# The file type for the files written by the Write File node.
|
||||
# Setting this attribute also overwrites format_extension,
|
||||
# bit_depth and compress_mode to match the defaults for
|
||||
# this file type.
|
||||
file_type = "OpenEXR"
|
||||
# The file extension for the files written by the Write File node.
|
||||
# This attribute resets to match file_type whenever file_type
|
||||
# is set. If you require a specific extension, you must
|
||||
# set format_extension after setting file_type.
|
||||
format_extension = "exr"
|
||||
# The bit depth for the files written by the Write File node.
|
||||
# This attribute resets to match file_type whenever file_type is set.
|
||||
bit_depth = "16"
|
||||
# The compressing attribute for the files exported by the Write
|
||||
# File node. Only relevant when file_type in 'OpenEXR', 'Sgi', 'Tiff'
|
||||
compress = True
|
||||
# The compression format attribute for the specific File Types
|
||||
# export by the Write File node. You must set compress_mode
|
||||
# after setting file_type.
|
||||
compress_mode = "DWAB"
|
||||
# The frame index mode attribute of the Write File node.
|
||||
# Value range: `Use Timecode` or `Use Start Frame`
|
||||
frame_index_mode = "Use Start Frame"
|
||||
frame_padding = 6
|
||||
# The versioning mode of the Open Clip exported by the Write File node.
|
||||
# Only available if create_clip = True.
|
||||
version_mode = "Follow Iteration"
|
||||
version_name = "v<version>"
|
||||
version_padding = 3
|
||||
|
||||
# need to make sure the order of keys is correct
|
||||
return OrderedDict((
|
||||
("name", name),
|
||||
("media_path", media_path),
|
||||
("media_path_pattern", media_path_pattern),
|
||||
("create_clip", create_clip),
|
||||
("include_setup", include_setup),
|
||||
("create_clip_path", create_clip_path),
|
||||
("include_setup_path", include_setup_path),
|
||||
("file_type", file_type),
|
||||
("format_extension", format_extension),
|
||||
("bit_depth", bit_depth),
|
||||
("compress", compress),
|
||||
("compress_mode", compress_mode),
|
||||
("frame_index_mode", frame_index_mode),
|
||||
("frame_padding", frame_padding),
|
||||
("version_mode", version_mode),
|
||||
("version_name", version_name),
|
||||
("version_padding", version_padding)
|
||||
))
|
||||
|
||||
def _get_shot_task_dir_path(self, instance, task_data):
|
||||
project_entity = instance.data["projectEntity"]
|
||||
folder_entity = instance.data["folderEntity"]
|
||||
task_entity = instance.data["taskEntity"]
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
project_settings = instance.context.data["project_settings"]
|
||||
|
||||
return get_workdir(
|
||||
project_entity,
|
||||
folder_entity,
|
||||
task_entity,
|
||||
"flame",
|
||||
anatomy=anatomy,
|
||||
project_settings=project_settings
|
||||
)
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
<?xml version="1.0"?>
|
||||
<preset version="9">
|
||||
<type>sequence</type>
|
||||
<comment>Creates a 8-bit Jpeg file per segment. </comment>
|
||||
<sequence>
|
||||
<fileType>NONE</fileType>
|
||||
<namePattern></namePattern>
|
||||
<composition><name></composition>
|
||||
<includeVideo>True</includeVideo>
|
||||
<exportVideo>True</exportVideo>
|
||||
<videoMedia>
|
||||
<mediaFileType>image</mediaFileType>
|
||||
<commit>FX</commit>
|
||||
<flatten>NoChange</flatten>
|
||||
<exportHandles>False</exportHandles>
|
||||
<nbHandles>10</nbHandles>
|
||||
</videoMedia>
|
||||
<includeAudio>True</includeAudio>
|
||||
<exportAudio>False</exportAudio>
|
||||
<audioMedia>
|
||||
<mediaFileType>audio</mediaFileType>
|
||||
<commit>FX</commit>
|
||||
<flatten>FlattenTracks</flatten>
|
||||
<exportHandles>True</exportHandles>
|
||||
<nbHandles>10</nbHandles>
|
||||
</audioMedia>
|
||||
</sequence>
|
||||
<video>
|
||||
<fileType>Jpeg</fileType>
|
||||
<codec>923688</codec>
|
||||
<codecProfile></codecProfile>
|
||||
<namePattern><shot name></namePattern>
|
||||
<compressionQuality>100</compressionQuality>
|
||||
<transferCharacteristic>2</transferCharacteristic>
|
||||
<colorimetricSpecification>4</colorimetricSpecification>
|
||||
<includeAlpha>False</includeAlpha>
|
||||
<overwriteWithVersions>False</overwriteWithVersions>
|
||||
<posterFrame>True</posterFrame>
|
||||
<useFrameAsPoster>1</useFrameAsPoster>
|
||||
<resize>
|
||||
<resizeType>fit</resizeType>
|
||||
<resizeFilter>lanczos</resizeFilter>
|
||||
<width>1920</width>
|
||||
<height>1080</height>
|
||||
<bitsPerChannel>8</bitsPerChannel>
|
||||
<numChannels>3</numChannels>
|
||||
<floatingPoint>False</floatingPoint>
|
||||
<bigEndian>True</bigEndian>
|
||||
<pixelRatio>1</pixelRatio>
|
||||
<scanFormat>P</scanFormat>
|
||||
</resize>
|
||||
</video>
|
||||
<name>
|
||||
<framePadding>4</framePadding>
|
||||
<startFrame>1</startFrame>
|
||||
<frameIndex>2</frameIndex>
|
||||
</name>
|
||||
</preset>
|
||||
|
|
@ -1,72 +0,0 @@
|
|||
<?xml version="1.0"?>
|
||||
<preset version="10">
|
||||
<type>sequence</type>
|
||||
<comment>Create MOV H264 files per segment with thumbnail</comment>
|
||||
<sequence>
|
||||
<fileType>NONE</fileType>
|
||||
<namePattern></namePattern>
|
||||
<composition><name></composition>
|
||||
<includeVideo>True</includeVideo>
|
||||
<exportVideo>True</exportVideo>
|
||||
<videoMedia>
|
||||
<mediaFileType>movie</mediaFileType>
|
||||
<commit>FX</commit>
|
||||
<flatten>FlattenTracks</flatten>
|
||||
<exportHandles>True</exportHandles>
|
||||
<nbHandles>5</nbHandles>
|
||||
</videoMedia>
|
||||
<includeAudio>True</includeAudio>
|
||||
<exportAudio>False</exportAudio>
|
||||
<audioMedia>
|
||||
<mediaFileType>audio</mediaFileType>
|
||||
<commit>Original</commit>
|
||||
<flatten>NoChange</flatten>
|
||||
<exportHandles>True</exportHandles>
|
||||
<nbHandles>5</nbHandles>
|
||||
</audioMedia>
|
||||
</sequence>
|
||||
<movie>
|
||||
<fileType>QuickTime</fileType>
|
||||
<namePattern><shot name></namePattern>
|
||||
<yuvHeadroom>0</yuvHeadroom>
|
||||
<yuvColourSpace>PCS_709</yuvColourSpace>
|
||||
<operationalPattern>None</operationalPattern>
|
||||
<companyName>Autodesk</companyName>
|
||||
<productName>Flame</productName>
|
||||
<versionName>2021</versionName>
|
||||
</movie>
|
||||
<video>
|
||||
<fileType>QuickTime</fileType>
|
||||
<codec>33622016</codec>
|
||||
<codecProfile>
|
||||
<rootPath>/opt/Autodesk/mediaconverter/</rootPath>
|
||||
<targetVersion>2021</targetVersion>
|
||||
<pathSuffix>/profiles/.33622016/HDTV_720p_8Mbits.cdxprof</pathSuffix>
|
||||
</codecProfile>
|
||||
<namePattern><shot name>_<video codec></namePattern>
|
||||
<compressionQuality>50</compressionQuality>
|
||||
<transferCharacteristic>2</transferCharacteristic>
|
||||
<colorimetricSpecification>4</colorimetricSpecification>
|
||||
<includeAlpha>False</includeAlpha>
|
||||
<overwriteWithVersions>False</overwriteWithVersions>
|
||||
<posterFrame>False</posterFrame>
|
||||
<useFrameAsPoster>1</useFrameAsPoster>
|
||||
<resize>
|
||||
<resizeType>fit</resizeType>
|
||||
<resizeFilter>gaussian</resizeFilter>
|
||||
<width>1920</width>
|
||||
<height>1080</height>
|
||||
<bitsPerChannel>8</bitsPerChannel>
|
||||
<numChannels>3</numChannels>
|
||||
<floatingPoint>False</floatingPoint>
|
||||
<bigEndian>True</bigEndian>
|
||||
<pixelRatio>1</pixelRatio>
|
||||
<scanFormat>P</scanFormat>
|
||||
</resize>
|
||||
</video>
|
||||
<name>
|
||||
<framePadding>4</framePadding>
|
||||
<startFrame>1</startFrame>
|
||||
<frameIndex>2</frameIndex>
|
||||
</name>
|
||||
</preset>
|
||||
|
|
@ -1,162 +0,0 @@
|
|||
import os
|
||||
import io
|
||||
import ConfigParser as CP
|
||||
from xml.etree import ElementTree as ET
|
||||
from contextlib import contextmanager
|
||||
|
||||
PLUGIN_DIR = os.path.dirname(os.path.dirname(__file__))
|
||||
EXPORT_PRESETS_DIR = os.path.join(PLUGIN_DIR, "export_preset")
|
||||
|
||||
CONFIG_DIR = os.path.join(os.path.expanduser(
|
||||
"~/.openpype"), "openpype_babypublisher")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def make_temp_dir():
|
||||
import tempfile
|
||||
|
||||
try:
|
||||
dirpath = tempfile.mkdtemp()
|
||||
|
||||
yield dirpath
|
||||
|
||||
except IOError as _error:
|
||||
raise IOError("Not able to create temp dir file: {}".format(_error))
|
||||
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
@contextmanager
|
||||
def get_config(section=None):
|
||||
cfg_file_path = os.path.join(CONFIG_DIR, "settings.ini")
|
||||
|
||||
# create config dir
|
||||
if not os.path.exists(CONFIG_DIR):
|
||||
print("making dirs at: `{}`".format(CONFIG_DIR))
|
||||
os.makedirs(CONFIG_DIR, mode=0o777)
|
||||
|
||||
# write default data to settings.ini
|
||||
if not os.path.exists(cfg_file_path):
|
||||
default_cfg = cfg_default()
|
||||
config = CP.RawConfigParser()
|
||||
config.readfp(io.BytesIO(default_cfg))
|
||||
with open(cfg_file_path, 'wb') as cfg_file:
|
||||
config.write(cfg_file)
|
||||
|
||||
try:
|
||||
config = CP.RawConfigParser()
|
||||
config.read(cfg_file_path)
|
||||
if section:
|
||||
_cfg_data = {
|
||||
k: v
|
||||
for s in config.sections()
|
||||
for k, v in config.items(s)
|
||||
if s == section
|
||||
}
|
||||
else:
|
||||
_cfg_data = {s: dict(config.items(s)) for s in config.sections()}
|
||||
|
||||
yield _cfg_data
|
||||
|
||||
except IOError as _error:
|
||||
raise IOError('Not able to read settings.ini file: {}'.format(_error))
|
||||
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
def set_config(cfg_data, section=None):
|
||||
cfg_file_path = os.path.join(CONFIG_DIR, "settings.ini")
|
||||
|
||||
config = CP.RawConfigParser()
|
||||
config.read(cfg_file_path)
|
||||
|
||||
try:
|
||||
if not section:
|
||||
for section in cfg_data:
|
||||
for key, value in cfg_data[section].items():
|
||||
config.set(section, key, value)
|
||||
else:
|
||||
for key, value in cfg_data.items():
|
||||
config.set(section, key, value)
|
||||
|
||||
with open(cfg_file_path, 'wb') as cfg_file:
|
||||
config.write(cfg_file)
|
||||
|
||||
except IOError as _error:
|
||||
raise IOError('Not able to write settings.ini file: {}'.format(_error))
|
||||
|
||||
|
||||
def cfg_default():
|
||||
return """
|
||||
[main]
|
||||
workfile_start_frame = 1001
|
||||
shot_handles = 0
|
||||
shot_name_template = {sequence}_{shot}
|
||||
hierarchy_template = shots[Folder]/{sequence}[Sequence]
|
||||
create_task_type = Compositing
|
||||
"""
|
||||
|
||||
|
||||
def configure_preset(file_path, data):
|
||||
split_fp = os.path.splitext(file_path)
|
||||
new_file_path = split_fp[0] + "_tmp" + split_fp[-1]
|
||||
with open(file_path, "r") as datafile:
|
||||
tree = ET.parse(datafile)
|
||||
for key, value in data.items():
|
||||
for element in tree.findall(".//{}".format(key)):
|
||||
print(element)
|
||||
element.text = str(value)
|
||||
tree.write(new_file_path)
|
||||
|
||||
return new_file_path
|
||||
|
||||
|
||||
def export_thumbnail(sequence, tempdir_path, data):
|
||||
import flame
|
||||
export_preset = os.path.join(
|
||||
EXPORT_PRESETS_DIR,
|
||||
"openpype_seg_thumbnails_jpg.xml"
|
||||
)
|
||||
new_path = configure_preset(export_preset, data)
|
||||
poster_frame_exporter = flame.PyExporter()
|
||||
poster_frame_exporter.foreground = True
|
||||
poster_frame_exporter.export(sequence, new_path, tempdir_path)
|
||||
|
||||
|
||||
def export_video(sequence, tempdir_path, data):
|
||||
import flame
|
||||
export_preset = os.path.join(
|
||||
EXPORT_PRESETS_DIR,
|
||||
"openpype_seg_video_h264.xml"
|
||||
)
|
||||
new_path = configure_preset(export_preset, data)
|
||||
poster_frame_exporter = flame.PyExporter()
|
||||
poster_frame_exporter.foreground = True
|
||||
poster_frame_exporter.export(sequence, new_path, tempdir_path)
|
||||
|
||||
|
||||
def timecode_to_frames(timecode, framerate):
|
||||
def _seconds(value):
|
||||
if isinstance(value, str):
|
||||
_zip_ft = zip((3600, 60, 1, 1 / framerate), value.split(':'))
|
||||
return sum(f * float(t) for f, t in _zip_ft)
|
||||
elif isinstance(value, (int, float)):
|
||||
return value / framerate
|
||||
return 0
|
||||
|
||||
def _frames(seconds):
|
||||
return seconds * framerate
|
||||
|
||||
def tc_to_frames(_timecode, start=None):
|
||||
return _frames(_seconds(_timecode) - _seconds(start))
|
||||
|
||||
if '+' in timecode:
|
||||
timecode = timecode.replace('+', ':')
|
||||
elif '#' in timecode:
|
||||
timecode = timecode.replace('#', ':')
|
||||
|
||||
frames = int(round(tc_to_frames(timecode, start='00:00:00:00')))
|
||||
|
||||
return frames
|
||||
|
|
@ -1,459 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
import six
|
||||
import re
|
||||
import json
|
||||
|
||||
import app_utils
|
||||
|
||||
# Fill following constants or set them via environment variable
|
||||
FTRACK_MODULE_PATH = None
|
||||
FTRACK_API_KEY = None
|
||||
FTRACK_API_USER = None
|
||||
FTRACK_SERVER = None
|
||||
|
||||
|
||||
def import_ftrack_api():
|
||||
try:
|
||||
import ftrack_api
|
||||
return ftrack_api
|
||||
except ImportError:
|
||||
import sys
|
||||
ftrk_m_p = FTRACK_MODULE_PATH or os.getenv("FTRACK_MODULE_PATH")
|
||||
sys.path.append(ftrk_m_p)
|
||||
import ftrack_api
|
||||
return ftrack_api
|
||||
|
||||
|
||||
def get_ftrack_session():
|
||||
import os
|
||||
ftrack_api = import_ftrack_api()
|
||||
|
||||
# fill your own credentials
|
||||
url = FTRACK_SERVER or os.getenv("FTRACK_SERVER") or ""
|
||||
user = FTRACK_API_USER or os.getenv("FTRACK_API_USER") or ""
|
||||
api = FTRACK_API_KEY or os.getenv("FTRACK_API_KEY") or ""
|
||||
|
||||
first_validation = True
|
||||
if not user:
|
||||
print('- Ftrack Username is not set')
|
||||
first_validation = False
|
||||
if not api:
|
||||
print('- Ftrack API key is not set')
|
||||
first_validation = False
|
||||
if not first_validation:
|
||||
return False
|
||||
|
||||
try:
|
||||
return ftrack_api.Session(
|
||||
server_url=url,
|
||||
api_user=user,
|
||||
api_key=api
|
||||
)
|
||||
except Exception as _e:
|
||||
print("Can't log into Ftrack with used credentials: {}".format(_e))
|
||||
ftrack_cred = {
|
||||
'Ftrack server': str(url),
|
||||
'Username': str(user),
|
||||
'API key': str(api),
|
||||
}
|
||||
|
||||
item_lens = [len(key) + 1 for key in ftrack_cred]
|
||||
justify_len = max(*item_lens)
|
||||
for key, value in ftrack_cred.items():
|
||||
print('{} {}'.format((key + ':').ljust(justify_len, ' '), value))
|
||||
return False
|
||||
|
||||
|
||||
def get_project_task_types(project_entity):
|
||||
tasks = {}
|
||||
proj_template = project_entity['project_schema']
|
||||
temp_task_types = proj_template['_task_type_schema']['types']
|
||||
|
||||
for type in temp_task_types:
|
||||
if type['name'] not in tasks:
|
||||
tasks[type['name']] = type
|
||||
|
||||
return tasks
|
||||
|
||||
|
||||
class FtrackComponentCreator:
|
||||
default_location = "ftrack.server"
|
||||
ftrack_locations = {}
|
||||
thumbnails = []
|
||||
videos = []
|
||||
temp_dir = None
|
||||
|
||||
def __init__(self, session):
|
||||
self.session = session
|
||||
self._get_ftrack_location()
|
||||
|
||||
def generate_temp_data(self, selection, change_preset_data):
|
||||
with app_utils.make_temp_dir() as tempdir_path:
|
||||
for seq in selection:
|
||||
app_utils.export_thumbnail(
|
||||
seq, tempdir_path, change_preset_data)
|
||||
app_utils.export_video(seq, tempdir_path, change_preset_data)
|
||||
|
||||
return tempdir_path
|
||||
|
||||
def collect_generated_data(self, tempdir_path):
|
||||
temp_files = os.listdir(tempdir_path)
|
||||
self.thumbnails = [f for f in temp_files if "jpg" in f]
|
||||
self.videos = [f for f in temp_files if "mov" in f]
|
||||
self.temp_dir = tempdir_path
|
||||
|
||||
def get_thumb_path(self, shot_name):
|
||||
# get component files
|
||||
thumb_f = next((f for f in self.thumbnails if shot_name in f), None)
|
||||
return os.path.join(self.temp_dir, thumb_f)
|
||||
|
||||
def get_video_path(self, shot_name):
|
||||
# get component files
|
||||
video_f = next((f for f in self.videos if shot_name in f), None)
|
||||
return os.path.join(self.temp_dir, video_f)
|
||||
|
||||
def close(self):
|
||||
self.ftrack_locations = {}
|
||||
self.session = None
|
||||
|
||||
def create_comonent(self, shot_entity, data, assetversion_entity=None):
|
||||
self.shot_entity = shot_entity
|
||||
location = self._get_ftrack_location()
|
||||
|
||||
file_path = data["file_path"]
|
||||
|
||||
# get extension
|
||||
file = os.path.basename(file_path)
|
||||
_n, ext = os.path.splitext(file)
|
||||
|
||||
name = "ftrackreview-mp4" if "mov" in ext else "thumbnail"
|
||||
|
||||
component_data = {
|
||||
"name": name,
|
||||
"file_path": file_path,
|
||||
"file_type": ext,
|
||||
"location": location
|
||||
}
|
||||
|
||||
if name == "ftrackreview-mp4":
|
||||
duration = data["duration"]
|
||||
handles = data["handles"]
|
||||
fps = data["fps"]
|
||||
component_data["metadata"] = {
|
||||
'ftr_meta': json.dumps({
|
||||
'frameIn': int(0),
|
||||
'frameOut': int(duration + (handles * 2)),
|
||||
'frameRate': float(fps)
|
||||
})
|
||||
}
|
||||
if not assetversion_entity:
|
||||
# get assettype entity from session
|
||||
assettype_entity = self._get_assettype({"short": "reference"})
|
||||
|
||||
# get or create asset entity from session
|
||||
asset_entity = self._get_asset({
|
||||
"name": "plateReference",
|
||||
"type": assettype_entity,
|
||||
"parent": self.shot_entity
|
||||
})
|
||||
|
||||
# get or create assetversion entity from session
|
||||
assetversion_entity = self._get_assetversion({
|
||||
"version": 0,
|
||||
"asset": asset_entity
|
||||
})
|
||||
|
||||
# get or create component entity
|
||||
self._set_component(component_data, {
|
||||
"name": name,
|
||||
"version": assetversion_entity,
|
||||
})
|
||||
|
||||
return assetversion_entity
|
||||
|
||||
def _overwrite_members(self, entity, data):
|
||||
origin_location = self._get_ftrack_location("ftrack.origin")
|
||||
location = data.pop("location")
|
||||
|
||||
self._remove_component_from_location(entity, location)
|
||||
|
||||
entity["file_type"] = data["file_type"]
|
||||
|
||||
try:
|
||||
origin_location.add_component(
|
||||
entity, data["file_path"]
|
||||
)
|
||||
# Add components to location.
|
||||
location.add_component(
|
||||
entity, origin_location, recursive=True)
|
||||
except Exception as __e:
|
||||
print("Error: {}".format(__e))
|
||||
self._remove_component_from_location(entity, origin_location)
|
||||
origin_location.add_component(
|
||||
entity, data["file_path"]
|
||||
)
|
||||
# Add components to location.
|
||||
location.add_component(
|
||||
entity, origin_location, recursive=True)
|
||||
|
||||
def _remove_component_from_location(self, entity, location):
|
||||
print(location)
|
||||
# Removing existing members from location
|
||||
components = list(entity.get("members", []))
|
||||
components += [entity]
|
||||
for component in components:
|
||||
for loc in component.get("component_locations", []):
|
||||
if location["id"] == loc["location_id"]:
|
||||
print("<< Removing component: {}".format(component))
|
||||
location.remove_component(
|
||||
component, recursive=False
|
||||
)
|
||||
|
||||
# Deleting existing members on component entity
|
||||
for member in entity.get("members", []):
|
||||
self.session.delete(member)
|
||||
print("<< Deleting member: {}".format(member))
|
||||
del(member)
|
||||
|
||||
self._commit()
|
||||
|
||||
# Reset members in memory
|
||||
if "members" in entity.keys():
|
||||
entity["members"] = []
|
||||
|
||||
def _get_assettype(self, data):
|
||||
return self.session.query(
|
||||
self._query("AssetType", data)).first()
|
||||
|
||||
def _set_component(self, comp_data, base_data):
|
||||
component_metadata = comp_data.pop("metadata", {})
|
||||
|
||||
component_entity = self.session.query(
|
||||
self._query("Component", base_data)
|
||||
).first()
|
||||
|
||||
if component_entity:
|
||||
# overwrite existing members in component entity
|
||||
# - get data for member from `ftrack.origin` location
|
||||
self._overwrite_members(component_entity, comp_data)
|
||||
|
||||
# Adding metadata
|
||||
existing_component_metadata = component_entity["metadata"]
|
||||
existing_component_metadata.update(component_metadata)
|
||||
component_entity["metadata"] = existing_component_metadata
|
||||
return
|
||||
|
||||
assetversion_entity = base_data["version"]
|
||||
location = comp_data.pop("location")
|
||||
|
||||
component_entity = assetversion_entity.create_component(
|
||||
comp_data["file_path"],
|
||||
data=comp_data,
|
||||
location=location
|
||||
)
|
||||
|
||||
# Adding metadata
|
||||
existing_component_metadata = component_entity["metadata"]
|
||||
existing_component_metadata.update(component_metadata)
|
||||
component_entity["metadata"] = existing_component_metadata
|
||||
|
||||
if comp_data["name"] == "thumbnail":
|
||||
self.shot_entity["thumbnail_id"] = component_entity["id"]
|
||||
assetversion_entity["thumbnail_id"] = component_entity["id"]
|
||||
|
||||
self._commit()
|
||||
|
||||
def _get_asset(self, data):
|
||||
# first find already created
|
||||
asset_entity = self.session.query(
|
||||
self._query("Asset", data)
|
||||
).first()
|
||||
|
||||
if asset_entity:
|
||||
return asset_entity
|
||||
|
||||
asset_entity = self.session.create("Asset", data)
|
||||
|
||||
# _commit if created
|
||||
self._commit()
|
||||
|
||||
return asset_entity
|
||||
|
||||
def _get_assetversion(self, data):
|
||||
assetversion_entity = self.session.query(
|
||||
self._query("AssetVersion", data)
|
||||
).first()
|
||||
|
||||
if assetversion_entity:
|
||||
return assetversion_entity
|
||||
|
||||
assetversion_entity = self.session.create("AssetVersion", data)
|
||||
|
||||
# _commit if created
|
||||
self._commit()
|
||||
|
||||
return assetversion_entity
|
||||
|
||||
def _commit(self):
|
||||
try:
|
||||
self.session.commit()
|
||||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
# self.session.rollback()
|
||||
# self.session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
def _get_ftrack_location(self, name=None):
|
||||
name = name or self.default_location
|
||||
|
||||
if name in self.ftrack_locations:
|
||||
return self.ftrack_locations[name]
|
||||
|
||||
location = self.session.query(
|
||||
'Location where name is "{}"'.format(name)
|
||||
).one()
|
||||
self.ftrack_locations[name] = location
|
||||
return location
|
||||
|
||||
def _query(self, entitytype, data):
|
||||
""" Generate a query expression from data supplied.
|
||||
|
||||
If a value is not a string, we'll add the id of the entity to the
|
||||
query.
|
||||
|
||||
Args:
|
||||
entitytype (str): The type of entity to query.
|
||||
data (dict): The data to identify the entity.
|
||||
exclusions (list): All keys to exclude from the query.
|
||||
|
||||
Returns:
|
||||
str: String query to use with "session.query"
|
||||
"""
|
||||
queries = []
|
||||
if sys.version_info[0] < 3:
|
||||
for key, value in data.items():
|
||||
if not isinstance(value, (str, int)):
|
||||
print("value: {}".format(value))
|
||||
if "id" in value.keys():
|
||||
queries.append(
|
||||
"{0}.id is \"{1}\"".format(key, value["id"])
|
||||
)
|
||||
else:
|
||||
queries.append("{0} is \"{1}\"".format(key, value))
|
||||
else:
|
||||
for key, value in data.items():
|
||||
if not isinstance(value, (str, int)):
|
||||
print("value: {}".format(value))
|
||||
if "id" in value.keys():
|
||||
queries.append(
|
||||
"{0}.id is \"{1}\"".format(key, value["id"])
|
||||
)
|
||||
else:
|
||||
queries.append("{0} is \"{1}\"".format(key, value))
|
||||
|
||||
query = (
|
||||
"select id from " + entitytype + " where " + " and ".join(queries)
|
||||
)
|
||||
print(query)
|
||||
return query
|
||||
|
||||
|
||||
class FtrackEntityOperator:
|
||||
existing_tasks = []
|
||||
|
||||
def __init__(self, session, project_entity):
|
||||
self.session = session
|
||||
self.project_entity = project_entity
|
||||
|
||||
def commit(self):
|
||||
try:
|
||||
self.session.commit()
|
||||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
self.session.rollback()
|
||||
self.session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
def create_ftrack_entity(self, session, type, name, parent=None):
|
||||
parent = parent or self.project_entity
|
||||
entity = session.create(type, {
|
||||
'name': name,
|
||||
'parent': parent
|
||||
})
|
||||
try:
|
||||
session.commit()
|
||||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
session.rollback()
|
||||
session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
return entity
|
||||
|
||||
def get_ftrack_entity(self, session, type, name, parent):
|
||||
query = '{} where name is "{}" and project_id is "{}"'.format(
|
||||
type, name, self.project_entity["id"])
|
||||
|
||||
entity = session.query(query).first()
|
||||
|
||||
# if entity doesn't exist then create one
|
||||
if not entity:
|
||||
entity = self.create_ftrack_entity(
|
||||
session,
|
||||
type,
|
||||
name,
|
||||
parent
|
||||
)
|
||||
|
||||
return entity
|
||||
|
||||
def create_parents(self, template):
|
||||
parents = []
|
||||
t_split = template.split("/")
|
||||
replace_patern = re.compile(r"(\[.*\])")
|
||||
type_patern = re.compile(r"\[(.*)\]")
|
||||
|
||||
for t_s in t_split:
|
||||
match_type = type_patern.findall(t_s)
|
||||
if not match_type:
|
||||
raise Exception((
|
||||
"Missing correct type flag in : {}"
|
||||
"/n Example: name[Type]").format(
|
||||
t_s)
|
||||
)
|
||||
new_name = re.sub(replace_patern, "", t_s)
|
||||
f_type = match_type.pop()
|
||||
|
||||
parents.append((new_name, f_type))
|
||||
|
||||
return parents
|
||||
|
||||
def create_task(self, task_type, task_types, parent):
|
||||
_exising_tasks = [
|
||||
child for child in parent['children']
|
||||
if child.entity_type.lower() == 'task'
|
||||
]
|
||||
|
||||
# add task into existing tasks if they are not already there
|
||||
for _t in _exising_tasks:
|
||||
if _t in self.existing_tasks:
|
||||
continue
|
||||
self.existing_tasks.append(_t)
|
||||
|
||||
existing_task = [
|
||||
task for task in self.existing_tasks
|
||||
if task['name'].lower() in task_type.lower()
|
||||
if task['parent'] == parent
|
||||
]
|
||||
|
||||
if existing_task:
|
||||
return existing_task.pop()
|
||||
|
||||
task = self.session.create('Task', {
|
||||
"name": task_type.lower(),
|
||||
"parent": parent
|
||||
})
|
||||
task["type"] = task_types[task_type]
|
||||
|
||||
self.existing_tasks.append(task)
|
||||
return task
|
||||
|
|
@ -1,529 +0,0 @@
|
|||
from qtpy import QtWidgets, QtCore
|
||||
|
||||
import uiwidgets
|
||||
import app_utils
|
||||
import ftrack_lib
|
||||
|
||||
|
||||
def clear_inner_modules():
|
||||
import sys
|
||||
|
||||
if "ftrack_lib" in sys.modules.keys():
|
||||
del sys.modules["ftrack_lib"]
|
||||
print("Ftrack Lib module removed from sys.modules")
|
||||
|
||||
if "app_utils" in sys.modules.keys():
|
||||
del sys.modules["app_utils"]
|
||||
print("app_utils module removed from sys.modules")
|
||||
|
||||
if "uiwidgets" in sys.modules.keys():
|
||||
del sys.modules["uiwidgets"]
|
||||
print("uiwidgets module removed from sys.modules")
|
||||
|
||||
|
||||
class MainWindow(QtWidgets.QWidget):
|
||||
|
||||
def __init__(self, klass, *args, **kwargs):
|
||||
super(MainWindow, self).__init__(*args, **kwargs)
|
||||
self.panel_class = klass
|
||||
|
||||
def closeEvent(self, event):
|
||||
# clear all temp data
|
||||
print("Removing temp data")
|
||||
self.panel_class.clear_temp_data()
|
||||
self.panel_class.close()
|
||||
clear_inner_modules()
|
||||
ftrack_lib.FtrackEntityOperator.existing_tasks = []
|
||||
# now the panel can be closed
|
||||
event.accept()
|
||||
|
||||
|
||||
class FlameBabyPublisherPanel(object):
|
||||
session = None
|
||||
temp_data_dir = None
|
||||
processed_components = []
|
||||
project_entity = None
|
||||
task_types = {}
|
||||
all_task_types = {}
|
||||
|
||||
# TreeWidget
|
||||
columns = {
|
||||
"Sequence name": {
|
||||
"columnWidth": 200,
|
||||
"order": 0
|
||||
},
|
||||
"Shot name": {
|
||||
"columnWidth": 200,
|
||||
"order": 1
|
||||
},
|
||||
"Clip duration": {
|
||||
"columnWidth": 100,
|
||||
"order": 2
|
||||
},
|
||||
"Shot description": {
|
||||
"columnWidth": 500,
|
||||
"order": 3
|
||||
},
|
||||
"Task description": {
|
||||
"columnWidth": 500,
|
||||
"order": 4
|
||||
},
|
||||
}
|
||||
|
||||
def __init__(self, selection):
|
||||
print(selection)
|
||||
|
||||
self.session = ftrack_lib.get_ftrack_session()
|
||||
self.selection = selection
|
||||
self.window = MainWindow(self)
|
||||
|
||||
# creating ui
|
||||
self.window.setMinimumSize(1500, 600)
|
||||
self.window.setWindowTitle('AYON: Baby-publisher')
|
||||
self.window.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
|
||||
self.window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
|
||||
self.window.setFocusPolicy(QtCore.Qt.StrongFocus)
|
||||
self.window.setStyleSheet('background-color: #313131')
|
||||
|
||||
self._create_project_widget()
|
||||
self._create_tree_widget()
|
||||
self._set_sequence_params()
|
||||
self._generate_widgets()
|
||||
self._generate_layouts()
|
||||
self._timeline_info()
|
||||
self._fix_resolution()
|
||||
|
||||
self.window.show()
|
||||
|
||||
def _generate_widgets(self):
|
||||
with app_utils.get_config("main") as cfg_data:
|
||||
cfg_d = cfg_data
|
||||
|
||||
self._create_task_type_widget(cfg_d)
|
||||
|
||||
# input fields
|
||||
self.shot_name_label = uiwidgets.FlameLabel(
|
||||
'Shot name template', 'normal', self.window)
|
||||
self.shot_name_template_input = uiwidgets.FlameLineEdit(
|
||||
cfg_d["shot_name_template"], self.window)
|
||||
|
||||
self.hierarchy_label = uiwidgets.FlameLabel(
|
||||
'Parents template', 'normal', self.window)
|
||||
self.hierarchy_template_input = uiwidgets.FlameLineEdit(
|
||||
cfg_d["hierarchy_template"], self.window)
|
||||
|
||||
self.start_frame_label = uiwidgets.FlameLabel(
|
||||
'Workfile start frame', 'normal', self.window)
|
||||
self.start_frame_input = uiwidgets.FlameLineEdit(
|
||||
cfg_d["workfile_start_frame"], self.window)
|
||||
|
||||
self.handles_label = uiwidgets.FlameLabel(
|
||||
'Shot handles', 'normal', self.window)
|
||||
self.handles_input = uiwidgets.FlameLineEdit(
|
||||
cfg_d["shot_handles"], self.window)
|
||||
|
||||
self.width_label = uiwidgets.FlameLabel(
|
||||
'Sequence width', 'normal', self.window)
|
||||
self.width_input = uiwidgets.FlameLineEdit(
|
||||
str(self.seq_width), self.window)
|
||||
|
||||
self.height_label = uiwidgets.FlameLabel(
|
||||
'Sequence height', 'normal', self.window)
|
||||
self.height_input = uiwidgets.FlameLineEdit(
|
||||
str(self.seq_height), self.window)
|
||||
|
||||
self.pixel_aspect_label = uiwidgets.FlameLabel(
|
||||
'Pixel aspect ratio', 'normal', self.window)
|
||||
self.pixel_aspect_input = uiwidgets.FlameLineEdit(
|
||||
str(1.00), self.window)
|
||||
|
||||
self.fps_label = uiwidgets.FlameLabel(
|
||||
'Frame rate', 'normal', self.window)
|
||||
self.fps_input = uiwidgets.FlameLineEdit(
|
||||
str(self.fps), self.window)
|
||||
|
||||
# Button
|
||||
self.select_all_btn = uiwidgets.FlameButton(
|
||||
'Select All', self.select_all, self.window)
|
||||
|
||||
self.remove_temp_data_btn = uiwidgets.FlameButton(
|
||||
'Remove temp data', self.clear_temp_data, self.window)
|
||||
|
||||
self.ftrack_send_btn = uiwidgets.FlameButton(
|
||||
'Send to Ftrack', self._send_to_ftrack, self.window)
|
||||
|
||||
def _generate_layouts(self):
|
||||
# left props
|
||||
v_shift = 0
|
||||
prop_layout_l = QtWidgets.QGridLayout()
|
||||
prop_layout_l.setHorizontalSpacing(30)
|
||||
if self.project_selector_enabled:
|
||||
prop_layout_l.addWidget(self.project_select_label, v_shift, 0)
|
||||
prop_layout_l.addWidget(self.project_select_input, v_shift, 1)
|
||||
v_shift += 1
|
||||
prop_layout_l.addWidget(self.shot_name_label, (v_shift + 0), 0)
|
||||
prop_layout_l.addWidget(
|
||||
self.shot_name_template_input, (v_shift + 0), 1)
|
||||
prop_layout_l.addWidget(self.hierarchy_label, (v_shift + 1), 0)
|
||||
prop_layout_l.addWidget(
|
||||
self.hierarchy_template_input, (v_shift + 1), 1)
|
||||
prop_layout_l.addWidget(self.start_frame_label, (v_shift + 2), 0)
|
||||
prop_layout_l.addWidget(self.start_frame_input, (v_shift + 2), 1)
|
||||
prop_layout_l.addWidget(self.handles_label, (v_shift + 3), 0)
|
||||
prop_layout_l.addWidget(self.handles_input, (v_shift + 3), 1)
|
||||
prop_layout_l.addWidget(self.task_type_label, (v_shift + 4), 0)
|
||||
prop_layout_l.addWidget(
|
||||
self.task_type_input, (v_shift + 4), 1)
|
||||
|
||||
# right props
|
||||
prop_widget_r = QtWidgets.QWidget(self.window)
|
||||
prop_layout_r = QtWidgets.QGridLayout(prop_widget_r)
|
||||
prop_layout_r.setHorizontalSpacing(30)
|
||||
prop_layout_r.setAlignment(
|
||||
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
|
||||
prop_layout_r.setContentsMargins(0, 0, 0, 0)
|
||||
prop_layout_r.addWidget(self.width_label, 1, 0)
|
||||
prop_layout_r.addWidget(self.width_input, 1, 1)
|
||||
prop_layout_r.addWidget(self.height_label, 2, 0)
|
||||
prop_layout_r.addWidget(self.height_input, 2, 1)
|
||||
prop_layout_r.addWidget(self.pixel_aspect_label, 3, 0)
|
||||
prop_layout_r.addWidget(self.pixel_aspect_input, 3, 1)
|
||||
prop_layout_r.addWidget(self.fps_label, 4, 0)
|
||||
prop_layout_r.addWidget(self.fps_input, 4, 1)
|
||||
|
||||
# prop layout
|
||||
prop_main_layout = QtWidgets.QHBoxLayout()
|
||||
prop_main_layout.addLayout(prop_layout_l, 1)
|
||||
prop_main_layout.addSpacing(20)
|
||||
prop_main_layout.addWidget(prop_widget_r, 1)
|
||||
|
||||
# buttons layout
|
||||
hbox = QtWidgets.QHBoxLayout()
|
||||
hbox.addWidget(self.remove_temp_data_btn)
|
||||
hbox.addWidget(self.select_all_btn)
|
||||
hbox.addWidget(self.ftrack_send_btn)
|
||||
|
||||
# put all layouts together
|
||||
main_frame = QtWidgets.QVBoxLayout(self.window)
|
||||
main_frame.setMargin(20)
|
||||
main_frame.addLayout(prop_main_layout)
|
||||
main_frame.addWidget(self.tree)
|
||||
main_frame.addLayout(hbox)
|
||||
|
||||
def _set_sequence_params(self):
|
||||
for select in self.selection:
|
||||
self.seq_height = select.height
|
||||
self.seq_width = select.width
|
||||
self.fps = float(str(select.frame_rate)[:-4])
|
||||
break
|
||||
|
||||
def _create_task_type_widget(self, cfg_d):
|
||||
print(self.project_entity)
|
||||
self.task_types = ftrack_lib.get_project_task_types(
|
||||
self.project_entity)
|
||||
|
||||
self.task_type_label = uiwidgets.FlameLabel(
|
||||
'Create Task (type)', 'normal', self.window)
|
||||
self.task_type_input = uiwidgets.FlamePushButtonMenu(
|
||||
cfg_d["create_task_type"], self.task_types.keys(), self.window)
|
||||
|
||||
def _create_project_widget(self):
|
||||
import flame
|
||||
# get project name from flame current project
|
||||
self.project_name = flame.project.current_project.name
|
||||
|
||||
# get project from ftrack -
|
||||
# ftrack project name has to be the same as flame project!
|
||||
query = 'Project where full_name is "{}"'.format(self.project_name)
|
||||
|
||||
# globally used variables
|
||||
self.project_entity = self.session.query(query).first()
|
||||
|
||||
self.project_selector_enabled = bool(not self.project_entity)
|
||||
|
||||
if self.project_selector_enabled:
|
||||
self.all_projects = self.session.query(
|
||||
"Project where status is active").all()
|
||||
self.project_entity = self.all_projects[0]
|
||||
project_names = [p["full_name"] for p in self.all_projects]
|
||||
self.all_task_types = {
|
||||
p["full_name"]: ftrack_lib.get_project_task_types(p).keys()
|
||||
for p in self.all_projects
|
||||
}
|
||||
self.project_select_label = uiwidgets.FlameLabel(
|
||||
'Select Ftrack project', 'normal', self.window)
|
||||
self.project_select_input = uiwidgets.FlamePushButtonMenu(
|
||||
self.project_entity["full_name"], project_names, self.window)
|
||||
self.project_select_input.selection_changed.connect(
|
||||
self._on_project_changed)
|
||||
|
||||
def _create_tree_widget(self):
|
||||
ordered_column_labels = self.columns.keys()
|
||||
for _name, _value in self.columns.items():
|
||||
ordered_column_labels.pop(_value["order"])
|
||||
ordered_column_labels.insert(_value["order"], _name)
|
||||
|
||||
self.tree = uiwidgets.FlameTreeWidget(
|
||||
ordered_column_labels, self.window)
|
||||
|
||||
# Allow multiple items in tree to be selected
|
||||
self.tree.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
|
||||
|
||||
# Set tree column width
|
||||
for _name, _val in self.columns.items():
|
||||
self.tree.setColumnWidth(
|
||||
_val["order"],
|
||||
_val["columnWidth"]
|
||||
)
|
||||
|
||||
# Prevent weird characters when shrinking tree columns
|
||||
self.tree.setTextElideMode(QtCore.Qt.ElideNone)
|
||||
|
||||
def _resolve_project_entity(self):
|
||||
if self.project_selector_enabled:
|
||||
selected_project_name = self.project_select_input.text()
|
||||
self.project_entity = next(
|
||||
(p for p in self.all_projects
|
||||
if p["full_name"] in selected_project_name),
|
||||
None
|
||||
)
|
||||
|
||||
def _save_ui_state_to_cfg(self):
|
||||
_cfg_data_back = {
|
||||
"shot_name_template": self.shot_name_template_input.text(),
|
||||
"workfile_start_frame": self.start_frame_input.text(),
|
||||
"shot_handles": self.handles_input.text(),
|
||||
"hierarchy_template": self.hierarchy_template_input.text(),
|
||||
"create_task_type": self.task_type_input.text()
|
||||
}
|
||||
|
||||
# add cfg data back to settings.ini
|
||||
app_utils.set_config(_cfg_data_back, "main")
|
||||
|
||||
def _send_to_ftrack(self):
|
||||
# resolve active project and add it to self.project_entity
|
||||
self._resolve_project_entity()
|
||||
self._save_ui_state_to_cfg()
|
||||
|
||||
# get handles from gui input
|
||||
handles = self.handles_input.text()
|
||||
|
||||
# get frame start from gui input
|
||||
frame_start = int(self.start_frame_input.text())
|
||||
|
||||
# get task type from gui input
|
||||
task_type = self.task_type_input.text()
|
||||
|
||||
# get resolution from gui inputs
|
||||
fps = self.fps_input.text()
|
||||
|
||||
entity_operator = ftrack_lib.FtrackEntityOperator(
|
||||
self.session, self.project_entity)
|
||||
component_creator = ftrack_lib.FtrackComponentCreator(self.session)
|
||||
|
||||
if not self.temp_data_dir:
|
||||
self.window.hide()
|
||||
self.temp_data_dir = component_creator.generate_temp_data(
|
||||
self.selection,
|
||||
{
|
||||
"nbHandles": handles
|
||||
}
|
||||
)
|
||||
self.window.show()
|
||||
|
||||
# collect generated files to list data for farther use
|
||||
component_creator.collect_generated_data(self.temp_data_dir)
|
||||
|
||||
# Get all selected items from treewidget
|
||||
for item in self.tree.selectedItems():
|
||||
# frame ranges
|
||||
frame_duration = int(item.text(2))
|
||||
frame_end = frame_start + frame_duration
|
||||
|
||||
# description
|
||||
shot_description = item.text(3)
|
||||
task_description = item.text(4)
|
||||
|
||||
# other
|
||||
sequence_name = item.text(0)
|
||||
shot_name = item.text(1)
|
||||
|
||||
thumb_fp = component_creator.get_thumb_path(shot_name)
|
||||
video_fp = component_creator.get_video_path(shot_name)
|
||||
|
||||
print("processed comps: {}".format(self.processed_components))
|
||||
print("processed thumb_fp: {}".format(thumb_fp))
|
||||
|
||||
processed = False
|
||||
if thumb_fp not in self.processed_components:
|
||||
self.processed_components.append(thumb_fp)
|
||||
else:
|
||||
processed = True
|
||||
|
||||
print("processed: {}".format(processed))
|
||||
|
||||
# populate full shot info
|
||||
shot_attributes = {
|
||||
"sequence": sequence_name,
|
||||
"shot": shot_name,
|
||||
"task": task_type
|
||||
}
|
||||
|
||||
# format shot name template
|
||||
_shot_name = self.shot_name_template_input.text().format(
|
||||
**shot_attributes)
|
||||
|
||||
# format hierarchy template
|
||||
_hierarchy_text = self.hierarchy_template_input.text().format(
|
||||
**shot_attributes)
|
||||
print(_hierarchy_text)
|
||||
|
||||
# solve parents
|
||||
parents = entity_operator.create_parents(_hierarchy_text)
|
||||
print(parents)
|
||||
|
||||
# obtain shot parents entities
|
||||
_parent = None
|
||||
for _name, _type in parents:
|
||||
p_entity = entity_operator.get_ftrack_entity(
|
||||
self.session,
|
||||
_type,
|
||||
_name,
|
||||
_parent
|
||||
)
|
||||
print(p_entity)
|
||||
_parent = p_entity
|
||||
|
||||
# obtain shot ftrack entity
|
||||
f_s_entity = entity_operator.get_ftrack_entity(
|
||||
self.session,
|
||||
"Shot",
|
||||
_shot_name,
|
||||
_parent
|
||||
)
|
||||
print("Shot entity is: {}".format(f_s_entity))
|
||||
|
||||
if not processed:
|
||||
# first create thumbnail and get version entity
|
||||
assetversion_entity = component_creator.create_comonent(
|
||||
f_s_entity, {
|
||||
"file_path": thumb_fp
|
||||
}
|
||||
)
|
||||
|
||||
# secondly add video to version entity
|
||||
component_creator.create_comonent(
|
||||
f_s_entity, {
|
||||
"file_path": video_fp,
|
||||
"duration": frame_duration,
|
||||
"handles": int(handles),
|
||||
"fps": float(fps)
|
||||
}, assetversion_entity
|
||||
)
|
||||
|
||||
# create custom attributtes
|
||||
custom_attrs = {
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": int(handles),
|
||||
"handleEnd": int(handles),
|
||||
"resolutionWidth": int(self.width_input.text()),
|
||||
"resolutionHeight": int(self.height_input.text()),
|
||||
"pixelAspect": float(self.pixel_aspect_input.text()),
|
||||
"fps": float(fps)
|
||||
}
|
||||
|
||||
# update custom attributes on shot entity
|
||||
for key in custom_attrs:
|
||||
f_s_entity['custom_attributes'][key] = custom_attrs[key]
|
||||
|
||||
task_entity = entity_operator.create_task(
|
||||
task_type, self.task_types, f_s_entity)
|
||||
|
||||
# Create notes.
|
||||
user = self.session.query(
|
||||
"User where username is \"{}\"".format(self.session.api_user)
|
||||
).first()
|
||||
|
||||
f_s_entity.create_note(shot_description, author=user)
|
||||
|
||||
if task_description:
|
||||
task_entity.create_note(task_description, user)
|
||||
|
||||
entity_operator.commit()
|
||||
|
||||
component_creator.close()
|
||||
|
||||
def _fix_resolution(self):
|
||||
# Center window in linux
|
||||
resolution = QtWidgets.QDesktopWidget().screenGeometry()
|
||||
self.window.move(
|
||||
(resolution.width() / 2) - (self.window.frameSize().width() / 2),
|
||||
(resolution.height() / 2) - (self.window.frameSize().height() / 2))
|
||||
|
||||
def _on_project_changed(self):
|
||||
task_types = self.all_task_types[self.project_name]
|
||||
self.task_type_input.set_menu_options(task_types)
|
||||
|
||||
def _timeline_info(self):
|
||||
# identificar as informacoes dos segmentos na timeline
|
||||
for sequence in self.selection:
|
||||
frame_rate = float(str(sequence.frame_rate)[:-4])
|
||||
for ver in sequence.versions:
|
||||
for track in ver.tracks:
|
||||
if len(track.segments) == 0 and track.hidden:
|
||||
continue
|
||||
for segment in track.segments:
|
||||
print(segment.attributes)
|
||||
if segment.name.get_value() == "":
|
||||
continue
|
||||
if segment.hidden.get_value() is True:
|
||||
continue
|
||||
# get clip frame duration
|
||||
record_duration = str(segment.record_duration)[1:-1]
|
||||
clip_duration = app_utils.timecode_to_frames(
|
||||
record_duration, frame_rate)
|
||||
|
||||
# populate shot source metadata
|
||||
shot_description = ""
|
||||
for attr in ["tape_name", "source_name", "head",
|
||||
"tail", "file_path"]:
|
||||
if not hasattr(segment, attr):
|
||||
continue
|
||||
_value = getattr(segment, attr)
|
||||
_label = attr.replace("_", " ").capitalize()
|
||||
row = "{}: {}\n".format(_label, _value)
|
||||
shot_description += row
|
||||
|
||||
# Add timeline segment to tree
|
||||
QtWidgets.QTreeWidgetItem(self.tree, [
|
||||
sequence.name.get_value(), # seq name
|
||||
segment.shot_name.get_value(), # shot name
|
||||
str(clip_duration), # clip duration
|
||||
shot_description, # shot description
|
||||
segment.comment.get_value() # task description
|
||||
]).setFlags(
|
||||
QtCore.Qt.ItemIsEditable
|
||||
| QtCore.Qt.ItemIsEnabled
|
||||
| QtCore.Qt.ItemIsSelectable
|
||||
)
|
||||
|
||||
# Select top item in tree
|
||||
self.tree.setCurrentItem(self.tree.topLevelItem(0))
|
||||
|
||||
def select_all(self, ):
|
||||
self.tree.selectAll()
|
||||
|
||||
def clear_temp_data(self):
|
||||
import shutil
|
||||
|
||||
self.processed_components = []
|
||||
|
||||
if self.temp_data_dir:
|
||||
shutil.rmtree(self.temp_data_dir)
|
||||
self.temp_data_dir = None
|
||||
print("All Temp data were destroyed ...")
|
||||
|
||||
def close(self):
|
||||
self._save_ui_state_to_cfg()
|
||||
self.session.close()
|
||||
|
|
@ -1,212 +0,0 @@
|
|||
from qtpy import QtWidgets, QtCore
|
||||
|
||||
|
||||
class FlameLabel(QtWidgets.QLabel):
|
||||
"""
|
||||
Custom Qt Flame Label Widget
|
||||
|
||||
For different label looks set label_type as:
|
||||
'normal', 'background', or 'outline'
|
||||
|
||||
To use:
|
||||
|
||||
label = FlameLabel('Label Name', 'normal', window)
|
||||
"""
|
||||
|
||||
def __init__(self, label_name, label_type, parent_window, *args, **kwargs):
|
||||
super(FlameLabel, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setText(label_name)
|
||||
self.setParent(parent_window)
|
||||
self.setMinimumSize(130, 28)
|
||||
self.setMaximumHeight(28)
|
||||
self.setFocusPolicy(QtCore.Qt.NoFocus)
|
||||
|
||||
# Set label stylesheet based on label_type
|
||||
|
||||
if label_type == 'normal':
|
||||
self.setStyleSheet(
|
||||
'QLabel {color: #9a9a9a; border-bottom: 1px inset #282828; font: 14px "Discreet"}' # noqa
|
||||
'QLabel:disabled {color: #6a6a6a}'
|
||||
)
|
||||
elif label_type == 'background':
|
||||
self.setAlignment(QtCore.Qt.AlignCenter)
|
||||
self.setStyleSheet(
|
||||
'color: #9a9a9a; background-color: #393939; font: 14px "Discreet"' # noqa
|
||||
)
|
||||
elif label_type == 'outline':
|
||||
self.setAlignment(QtCore.Qt.AlignCenter)
|
||||
self.setStyleSheet(
|
||||
'color: #9a9a9a; background-color: #212121; border: 1px solid #404040; font: 14px "Discreet"' # noqa
|
||||
)
|
||||
|
||||
|
||||
class FlameLineEdit(QtWidgets.QLineEdit):
|
||||
"""
|
||||
Custom Qt Flame Line Edit Widget
|
||||
|
||||
Main window should include this:
|
||||
window.setFocusPolicy(QtCore.Qt.StrongFocus)
|
||||
|
||||
To use:
|
||||
|
||||
line_edit = FlameLineEdit('Some text here', window)
|
||||
"""
|
||||
|
||||
def __init__(self, text, parent_window, *args, **kwargs):
|
||||
super(FlameLineEdit, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setText(text)
|
||||
self.setParent(parent_window)
|
||||
self.setMinimumHeight(28)
|
||||
self.setMinimumWidth(110)
|
||||
self.setStyleSheet(
|
||||
'QLineEdit {color: #9a9a9a; background-color: #373e47; selection-color: #262626; selection-background-color: #b8b1a7; font: 14px "Discreet"}' # noqa
|
||||
'QLineEdit:focus {background-color: #474e58}' # noqa
|
||||
'QLineEdit:disabled {color: #6a6a6a; background-color: #373737}'
|
||||
)
|
||||
|
||||
|
||||
class FlameTreeWidget(QtWidgets.QTreeWidget):
|
||||
"""
|
||||
Custom Qt Flame Tree Widget
|
||||
|
||||
To use:
|
||||
|
||||
tree_headers = ['Header1', 'Header2', 'Header3', 'Header4']
|
||||
tree = FlameTreeWidget(tree_headers, window)
|
||||
"""
|
||||
|
||||
def __init__(self, tree_headers, parent_window, *args, **kwargs):
|
||||
super(FlameTreeWidget, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setMinimumWidth(1000)
|
||||
self.setMinimumHeight(300)
|
||||
self.setSortingEnabled(True)
|
||||
self.sortByColumn(0, QtCore.Qt.AscendingOrder)
|
||||
self.setAlternatingRowColors(True)
|
||||
self.setFocusPolicy(QtCore.Qt.NoFocus)
|
||||
self.setStyleSheet(
|
||||
'QTreeWidget {color: #9a9a9a; background-color: #2a2a2a; alternate-background-color: #2d2d2d; font: 14px "Discreet"}' # noqa
|
||||
'QTreeWidget::item:selected {color: #d9d9d9; background-color: #474747; border: 1px solid #111111}' # noqa
|
||||
'QHeaderView {color: #9a9a9a; background-color: #393939; font: 14px "Discreet"}' # noqa
|
||||
'QTreeWidget::item:selected {selection-background-color: #111111}'
|
||||
'QMenu {color: #9a9a9a; background-color: #24303d; font: 14px "Discreet"}' # noqa
|
||||
'QMenu::item:selected {color: #d9d9d9; background-color: #3a4551}'
|
||||
)
|
||||
self.verticalScrollBar().setStyleSheet('color: #818181')
|
||||
self.horizontalScrollBar().setStyleSheet('color: #818181')
|
||||
self.setHeaderLabels(tree_headers)
|
||||
|
||||
|
||||
class FlameButton(QtWidgets.QPushButton):
|
||||
"""
|
||||
Custom Qt Flame Button Widget
|
||||
|
||||
To use:
|
||||
|
||||
button = FlameButton('Button Name', do_this_when_pressed, window)
|
||||
"""
|
||||
|
||||
def __init__(self, button_name, do_when_pressed, parent_window,
|
||||
*args, **kwargs):
|
||||
super(FlameButton, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setText(button_name)
|
||||
self.setParent(parent_window)
|
||||
self.setMinimumSize(QtCore.QSize(110, 28))
|
||||
self.setMaximumSize(QtCore.QSize(110, 28))
|
||||
self.setFocusPolicy(QtCore.Qt.NoFocus)
|
||||
self.clicked.connect(do_when_pressed)
|
||||
self.setStyleSheet(
|
||||
'QPushButton {color: #9a9a9a; background-color: #424142; border-top: 1px inset #555555; border-bottom: 1px inset black; font: 14px "Discreet"}' # noqa
|
||||
'QPushButton:pressed {color: #d9d9d9; background-color: #4f4f4f; border-top: 1px inset #666666; font: italic}' # noqa
|
||||
'QPushButton:disabled {color: #747474; background-color: #353535; border-top: 1px solid #444444; border-bottom: 1px solid #242424}' # noqa
|
||||
)
|
||||
|
||||
|
||||
class FlamePushButton(QtWidgets.QPushButton):
|
||||
"""
|
||||
Custom Qt Flame Push Button Widget
|
||||
|
||||
To use:
|
||||
|
||||
pushbutton = FlamePushButton(' Button Name', True_or_False, window)
|
||||
"""
|
||||
|
||||
def __init__(self, button_name, button_checked, parent_window,
|
||||
*args, **kwargs):
|
||||
super(FlamePushButton, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setText(button_name)
|
||||
self.setParent(parent_window)
|
||||
self.setCheckable(True)
|
||||
self.setChecked(button_checked)
|
||||
self.setMinimumSize(155, 28)
|
||||
self.setMaximumSize(155, 28)
|
||||
self.setFocusPolicy(QtCore.Qt.NoFocus)
|
||||
self.setStyleSheet(
|
||||
'QPushButton {color: #9a9a9a; background-color: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, stop: .93 #424142, stop: .94 #2e3b48); text-align: left; border-top: 1px inset #555555; border-bottom: 1px inset black; font: 14px "Discreet"}' # noqa
|
||||
'QPushButton:checked {color: #d9d9d9; background-color: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, stop: .93 #4f4f4f, stop: .94 #5a7fb4); font: italic; border: 1px inset black; border-bottom: 1px inset #404040; border-right: 1px inset #404040}' # noqa
|
||||
'QPushButton:disabled {color: #6a6a6a; background-color: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, stop: .93 #383838, stop: .94 #353535); font: light; border-top: 1px solid #575757; border-bottom: 1px solid #242424; border-right: 1px solid #353535; border-left: 1px solid #353535}' # noqa
|
||||
'QToolTip {color: black; background-color: #ffffde; border: black solid 1px}' # noqa
|
||||
)
|
||||
|
||||
|
||||
class FlamePushButtonMenu(QtWidgets.QPushButton):
|
||||
"""
|
||||
Custom Qt Flame Menu Push Button Widget
|
||||
|
||||
To use:
|
||||
|
||||
push_button_menu_options = ['Item 1', 'Item 2', 'Item 3', 'Item 4']
|
||||
menu_push_button = FlamePushButtonMenu('push_button_name',
|
||||
push_button_menu_options, window)
|
||||
|
||||
or
|
||||
|
||||
push_button_menu_options = ['Item 1', 'Item 2', 'Item 3', 'Item 4']
|
||||
menu_push_button = FlamePushButtonMenu(push_button_menu_options[0],
|
||||
push_button_menu_options, window)
|
||||
"""
|
||||
selection_changed = QtCore.Signal(str)
|
||||
|
||||
def __init__(self, button_name, menu_options, parent_window,
|
||||
*args, **kwargs):
|
||||
super(FlamePushButtonMenu, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setParent(parent_window)
|
||||
self.setMinimumHeight(28)
|
||||
self.setMinimumWidth(110)
|
||||
self.setFocusPolicy(QtCore.Qt.NoFocus)
|
||||
self.setStyleSheet(
|
||||
'QPushButton {color: #9a9a9a; background-color: #24303d; font: 14px "Discreet"}' # noqa
|
||||
'QPushButton:disabled {color: #747474; background-color: #353535; border-top: 1px solid #444444; border-bottom: 1px solid #242424}' # noqa
|
||||
)
|
||||
|
||||
pushbutton_menu = QtWidgets.QMenu(parent_window)
|
||||
pushbutton_menu.setFocusPolicy(QtCore.Qt.NoFocus)
|
||||
pushbutton_menu.setStyleSheet(
|
||||
'QMenu {color: #9a9a9a; background-color:#24303d; font: 14px "Discreet"}' # noqa
|
||||
'QMenu::item:selected {color: #d9d9d9; background-color: #3a4551}'
|
||||
)
|
||||
|
||||
self._pushbutton_menu = pushbutton_menu
|
||||
self.setMenu(pushbutton_menu)
|
||||
self.set_menu_options(menu_options, button_name)
|
||||
|
||||
def set_menu_options(self, menu_options, current_option=None):
|
||||
self._pushbutton_menu.clear()
|
||||
current_option = current_option or menu_options[0]
|
||||
|
||||
for option in menu_options:
|
||||
action = self._pushbutton_menu.addAction(option)
|
||||
action.triggered.connect(self._on_action_trigger)
|
||||
|
||||
if current_option is not None:
|
||||
self.setText(current_option)
|
||||
|
||||
def _on_action_trigger(self):
|
||||
action = self.sender()
|
||||
self.setText(action.text())
|
||||
self.selection_changed.emit(action.text())
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# only testing dependency for nested modules in package
|
||||
import six # noqa
|
||||
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(__file__)
|
||||
PACKAGE_DIR = os.path.join(SCRIPT_DIR, "modules")
|
||||
sys.path.append(PACKAGE_DIR)
|
||||
|
||||
|
||||
def flame_panel_executor(selection):
|
||||
if "panel_app" in sys.modules.keys():
|
||||
print("panel_app module is already loaded")
|
||||
del sys.modules["panel_app"]
|
||||
import panel_app
|
||||
reload(panel_app) # noqa
|
||||
print("panel_app module removed from sys.modules")
|
||||
|
||||
panel_app.FlameBabyPublisherPanel(selection)
|
||||
|
||||
|
||||
def scope_sequence(selection):
|
||||
import flame
|
||||
return any(isinstance(item, flame.PySequence) for item in selection)
|
||||
|
||||
|
||||
def get_media_panel_custom_ui_actions():
|
||||
return [
|
||||
{
|
||||
"name": "AYON: Baby-publisher",
|
||||
"actions": [
|
||||
{
|
||||
"name": "Create Shots",
|
||||
"isVisible": scope_sequence,
|
||||
"execute": flame_panel_executor
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
|
@ -1,219 +0,0 @@
|
|||
from __future__ import print_function
|
||||
import sys
|
||||
from qtpy import QtWidgets
|
||||
from pprint import pformat
|
||||
import atexit
|
||||
|
||||
import ayon_flame.api as opfapi
|
||||
from ayon_core.pipeline import (
|
||||
install_host,
|
||||
registered_host,
|
||||
)
|
||||
|
||||
|
||||
def openpype_install():
|
||||
"""Registering AYON in context
|
||||
"""
|
||||
install_host(opfapi)
|
||||
print("Registered host: {}".format(registered_host()))
|
||||
|
||||
|
||||
# Exception handler
|
||||
def exeption_handler(exctype, value, _traceback):
|
||||
"""Exception handler for improving UX
|
||||
|
||||
Args:
|
||||
exctype (str): type of exception
|
||||
value (str): exception value
|
||||
tb (str): traceback to show
|
||||
"""
|
||||
import traceback
|
||||
msg = "AYON: Python exception {} in {}".format(value, exctype)
|
||||
mbox = QtWidgets.QMessageBox()
|
||||
mbox.setText(msg)
|
||||
mbox.setDetailedText(
|
||||
pformat(traceback.format_exception(exctype, value, _traceback)))
|
||||
mbox.setStyleSheet('QLabel{min-width: 800px;}')
|
||||
mbox.exec_()
|
||||
sys.__excepthook__(exctype, value, _traceback)
|
||||
|
||||
|
||||
# add exception handler into sys module
|
||||
sys.excepthook = exeption_handler
|
||||
|
||||
|
||||
# register clean up logic to be called at Flame exit
|
||||
def cleanup():
|
||||
"""Cleaning up Flame framework context
|
||||
"""
|
||||
if opfapi.CTX.flame_apps:
|
||||
print('`{}` cleaning up flame_apps:\n {}\n'.format(
|
||||
__file__, pformat(opfapi.CTX.flame_apps)))
|
||||
while len(opfapi.CTX.flame_apps):
|
||||
app = opfapi.CTX.flame_apps.pop()
|
||||
print('`{}` removing : {}'.format(__file__, app.name))
|
||||
del app
|
||||
opfapi.CTX.flame_apps = []
|
||||
|
||||
if opfapi.CTX.app_framework:
|
||||
print('openpype\t: {} cleaning up'.format(
|
||||
opfapi.CTX.app_framework.bundle_name)
|
||||
)
|
||||
opfapi.CTX.app_framework.save_prefs()
|
||||
opfapi.CTX.app_framework = None
|
||||
|
||||
|
||||
atexit.register(cleanup)
|
||||
|
||||
|
||||
def load_apps():
|
||||
"""Load available flame_apps into Flame framework
|
||||
"""
|
||||
opfapi.CTX.flame_apps.append(
|
||||
opfapi.FlameMenuProjectConnect(opfapi.CTX.app_framework))
|
||||
opfapi.CTX.flame_apps.append(
|
||||
opfapi.FlameMenuTimeline(opfapi.CTX.app_framework))
|
||||
opfapi.CTX.flame_apps.append(
|
||||
opfapi.FlameMenuUniversal(opfapi.CTX.app_framework))
|
||||
opfapi.CTX.app_framework.log.info("Apps are loaded")
|
||||
|
||||
|
||||
def project_changed_dict(info):
|
||||
"""Hook for project change action
|
||||
|
||||
Args:
|
||||
info (str): info text
|
||||
"""
|
||||
cleanup()
|
||||
|
||||
|
||||
def app_initialized(parent=None):
|
||||
"""Inicialization of Framework
|
||||
|
||||
Args:
|
||||
parent (obj, optional): Parent object. Defaults to None.
|
||||
"""
|
||||
opfapi.CTX.app_framework = opfapi.FlameAppFramework()
|
||||
|
||||
print("{} initializing".format(
|
||||
opfapi.CTX.app_framework.bundle_name))
|
||||
|
||||
load_apps()
|
||||
|
||||
|
||||
"""
|
||||
Initialisation of the hook is starting from here
|
||||
|
||||
First it needs to test if it can import the flame module.
|
||||
This will happen only in case a project has been loaded.
|
||||
Then `app_initialized` will load main Framework which will load
|
||||
all menu objects as flame_apps.
|
||||
"""
|
||||
|
||||
try:
|
||||
import flame # noqa
|
||||
app_initialized(parent=None)
|
||||
except ImportError:
|
||||
print("!!!! not able to import flame module !!!!")
|
||||
|
||||
|
||||
def rescan_hooks():
|
||||
import flame # noqa
|
||||
flame.execute_shortcut('Rescan Python Hooks')
|
||||
|
||||
|
||||
def _build_app_menu(app_name):
|
||||
"""Flame menu object generator
|
||||
|
||||
Args:
|
||||
app_name (str): name of menu object app
|
||||
|
||||
Returns:
|
||||
list: menu object
|
||||
"""
|
||||
menu = []
|
||||
|
||||
# first find the relative appname
|
||||
app = None
|
||||
for _app in opfapi.CTX.flame_apps:
|
||||
if _app.__class__.__name__ == app_name:
|
||||
app = _app
|
||||
|
||||
if app:
|
||||
menu.append(app.build_menu())
|
||||
|
||||
if opfapi.CTX.app_framework:
|
||||
menu_auto_refresh = opfapi.CTX.app_framework.prefs_global.get(
|
||||
'menu_auto_refresh', {})
|
||||
if menu_auto_refresh.get('timeline_menu', True):
|
||||
try:
|
||||
import flame # noqa
|
||||
flame.schedule_idle_event(rescan_hooks)
|
||||
except ImportError:
|
||||
print("!-!!! not able to import flame module !!!!")
|
||||
|
||||
return menu
|
||||
|
||||
|
||||
""" Flame hooks are starting here
|
||||
"""
|
||||
|
||||
|
||||
def project_saved(project_name, save_time, is_auto_save):
|
||||
"""Hook to activate when project is saved
|
||||
|
||||
Args:
|
||||
project_name (str): name of project
|
||||
save_time (str): time when it was saved
|
||||
is_auto_save (bool): autosave is on or off
|
||||
"""
|
||||
if opfapi.CTX.app_framework:
|
||||
opfapi.CTX.app_framework.save_prefs()
|
||||
|
||||
|
||||
def get_main_menu_custom_ui_actions():
|
||||
"""Hook to create submenu in start menu
|
||||
|
||||
Returns:
|
||||
list: menu object
|
||||
"""
|
||||
# install openpype and the host
|
||||
openpype_install()
|
||||
|
||||
return _build_app_menu("FlameMenuProjectConnect")
|
||||
|
||||
|
||||
def get_timeline_custom_ui_actions():
|
||||
"""Hook to create submenu in timeline
|
||||
|
||||
Returns:
|
||||
list: menu object
|
||||
"""
|
||||
# install openpype and the host
|
||||
openpype_install()
|
||||
|
||||
return _build_app_menu("FlameMenuTimeline")
|
||||
|
||||
|
||||
def get_batch_custom_ui_actions():
|
||||
"""Hook to create submenu in batch
|
||||
|
||||
Returns:
|
||||
list: menu object
|
||||
"""
|
||||
# install openpype and the host
|
||||
openpype_install()
|
||||
|
||||
return _build_app_menu("FlameMenuUniversal")
|
||||
|
||||
|
||||
def get_media_panel_custom_ui_actions():
|
||||
"""Hook to create submenu in desktop
|
||||
|
||||
Returns:
|
||||
list: menu object
|
||||
"""
|
||||
# install openpype and the host
|
||||
openpype_install()
|
||||
|
||||
return _build_app_menu("FlameMenuUniversal")
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON addon 'flame' version."""
|
||||
__version__ = "0.2.1"
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
name = "flame"
|
||||
title = "Flame"
|
||||
version = "0.2.1"
|
||||
|
||||
client_dir = "ayon_flame"
|
||||
|
||||
ayon_required_addons = {
|
||||
"core": ">0.3.2",
|
||||
}
|
||||
ayon_compatible_addons = {}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
from typing import Type
|
||||
|
||||
from ayon_server.addons import BaseServerAddon
|
||||
|
||||
from .settings import FlameSettings, DEFAULT_VALUES
|
||||
|
||||
|
||||
class FlameAddon(BaseServerAddon):
|
||||
settings_model: Type[FlameSettings] = FlameSettings
|
||||
|
||||
async def get_default_settings(self):
|
||||
settings_model_cls = self.get_settings_model()
|
||||
return settings_model_cls(**DEFAULT_VALUES)
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
from .main import (
|
||||
FlameSettings,
|
||||
DEFAULT_VALUES,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"FlameSettings",
|
||||
"DEFAULT_VALUES",
|
||||
)
|
||||
|
|
@ -1,119 +0,0 @@
|
|||
from ayon_server.settings import BaseSettingsModel, SettingsField
|
||||
|
||||
|
||||
class CreateShotClipModel(BaseSettingsModel):
|
||||
hierarchy: str = SettingsField(
|
||||
"shot",
|
||||
title="Shot parent hierarchy",
|
||||
section="Shot Hierarchy And Rename Settings"
|
||||
)
|
||||
useShotName: bool = SettingsField(
|
||||
True,
|
||||
title="Use Shot Name",
|
||||
)
|
||||
clipRename: bool = SettingsField(
|
||||
False,
|
||||
title="Rename clips",
|
||||
)
|
||||
clipName: str = SettingsField(
|
||||
"{sequence}{shot}",
|
||||
title="Clip name template"
|
||||
)
|
||||
segmentIndex: bool = SettingsField(
|
||||
True,
|
||||
title="Accept segment order"
|
||||
)
|
||||
countFrom: int = SettingsField(
|
||||
10,
|
||||
title="Count sequence from"
|
||||
)
|
||||
countSteps: int = SettingsField(
|
||||
10,
|
||||
title="Stepping number"
|
||||
)
|
||||
|
||||
folder: str = SettingsField(
|
||||
"shots",
|
||||
title="{folder}",
|
||||
section="Shot Template Keywords"
|
||||
)
|
||||
episode: str = SettingsField(
|
||||
"ep01",
|
||||
title="{episode}"
|
||||
)
|
||||
sequence: str = SettingsField(
|
||||
"a",
|
||||
title="{sequence}"
|
||||
)
|
||||
track: str = SettingsField(
|
||||
"{_track_}",
|
||||
title="{track}"
|
||||
)
|
||||
shot: str = SettingsField(
|
||||
"####",
|
||||
title="{shot}"
|
||||
)
|
||||
|
||||
vSyncOn: bool = SettingsField(
|
||||
False,
|
||||
title="Enable Vertical Sync",
|
||||
section="Vertical Synchronization Of Attributes"
|
||||
)
|
||||
|
||||
workfileFrameStart: int = SettingsField(
|
||||
1001,
|
||||
title="Workfiles Start Frame",
|
||||
section="Shot Attributes"
|
||||
)
|
||||
handleStart: int = SettingsField(
|
||||
10,
|
||||
title="Handle start (head)"
|
||||
)
|
||||
handleEnd: int = SettingsField(
|
||||
10,
|
||||
title="Handle end (tail)"
|
||||
)
|
||||
includeHandles: bool = SettingsField(
|
||||
False,
|
||||
title="Enable handles including"
|
||||
)
|
||||
retimedHandles: bool = SettingsField(
|
||||
True,
|
||||
title="Enable retimed handles"
|
||||
)
|
||||
retimedFramerange: bool = SettingsField(
|
||||
True,
|
||||
title="Enable retimed shot frameranges"
|
||||
)
|
||||
|
||||
|
||||
class CreatePluginsModel(BaseSettingsModel):
|
||||
CreateShotClip: CreateShotClipModel = SettingsField(
|
||||
default_factory=CreateShotClipModel,
|
||||
title="Create Shot Clip"
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_CREATE_SETTINGS = {
|
||||
"CreateShotClip": {
|
||||
"hierarchy": "{folder}/{sequence}",
|
||||
"useShotName": True,
|
||||
"clipRename": False,
|
||||
"clipName": "{sequence}{shot}",
|
||||
"segmentIndex": True,
|
||||
"countFrom": 10,
|
||||
"countSteps": 10,
|
||||
"folder": "shots",
|
||||
"episode": "ep01",
|
||||
"sequence": "a",
|
||||
"track": "{_track_}",
|
||||
"shot": "####",
|
||||
"vSyncOn": False,
|
||||
"workfileFrameStart": 1001,
|
||||
"handleStart": 5,
|
||||
"handleEnd": 5,
|
||||
"includeHandles": False,
|
||||
"retimedHandles": True,
|
||||
"retimedFramerange": True
|
||||
}
|
||||
}
|
||||
|
|
@ -1,149 +0,0 @@
|
|||
from pydantic import validator
|
||||
from ayon_server.settings import (
|
||||
BaseSettingsModel,
|
||||
SettingsField,
|
||||
ensure_unique_names,
|
||||
)
|
||||
|
||||
|
||||
class ImageIOFileRuleModel(BaseSettingsModel):
|
||||
name: str = SettingsField("", title="Rule name")
|
||||
pattern: str = SettingsField("", title="Regex pattern")
|
||||
colorspace: str = SettingsField("", title="Colorspace name")
|
||||
ext: str = SettingsField("", title="File extension")
|
||||
|
||||
|
||||
class ImageIOFileRulesModel(BaseSettingsModel):
|
||||
activate_host_rules: bool = SettingsField(False)
|
||||
rules: list[ImageIOFileRuleModel] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Rules"
|
||||
)
|
||||
|
||||
@validator("rules")
|
||||
def validate_unique_outputs(cls, value):
|
||||
ensure_unique_names(value)
|
||||
return value
|
||||
|
||||
|
||||
class ImageIORemappingRulesModel(BaseSettingsModel):
|
||||
host_native_name: str = SettingsField(
|
||||
title="Application native colorspace name"
|
||||
)
|
||||
ocio_name: str = SettingsField(title="OCIO colorspace name")
|
||||
|
||||
|
||||
class ImageIORemappingModel(BaseSettingsModel):
|
||||
rules: list[ImageIORemappingRulesModel] = SettingsField(
|
||||
default_factory=list
|
||||
)
|
||||
|
||||
|
||||
class ImageIOConfigModel(BaseSettingsModel):
|
||||
"""[DEPRECATED] Addon OCIO config settings. Please set the OCIO config
|
||||
path in the Core addon profiles here
|
||||
(ayon+settings://core/imageio/ocio_config_profiles).
|
||||
"""
|
||||
|
||||
override_global_config: bool = SettingsField(
|
||||
False,
|
||||
title="Override global OCIO config",
|
||||
description=(
|
||||
"DEPRECATED functionality. Please set the OCIO config path in the "
|
||||
"Core addon profiles here (ayon+settings://core/imageio/"
|
||||
"ocio_config_profiles)."
|
||||
),
|
||||
)
|
||||
filepath: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Config path",
|
||||
description=(
|
||||
"DEPRECATED functionality. Please set the OCIO config path in the "
|
||||
"Core addon profiles here (ayon+settings://core/imageio/"
|
||||
"ocio_config_profiles)."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class ProfileNamesMappingInputsModel(BaseSettingsModel):
|
||||
_layout = "expanded"
|
||||
|
||||
flameName: str = SettingsField("", title="Flame name")
|
||||
ocioName: str = SettingsField("", title="OCIO name")
|
||||
|
||||
|
||||
class ProfileNamesMappingModel(BaseSettingsModel):
|
||||
_layout = "expanded"
|
||||
|
||||
inputs: list[ProfileNamesMappingInputsModel] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Profile names mapping"
|
||||
)
|
||||
|
||||
|
||||
class ImageIOProjectModel(BaseSettingsModel):
|
||||
colourPolicy: str = SettingsField(
|
||||
"ACES 1.1",
|
||||
title="Colour Policy (name or path)",
|
||||
section="Project"
|
||||
)
|
||||
frameDepth: str = SettingsField(
|
||||
"16-bit fp",
|
||||
title="Image Depth"
|
||||
)
|
||||
fieldDominance: str = SettingsField(
|
||||
"PROGRESSIVE",
|
||||
title="Field Dominance"
|
||||
)
|
||||
|
||||
|
||||
class FlameImageIOModel(BaseSettingsModel):
|
||||
_isGroup = True
|
||||
activate_host_color_management: bool = SettingsField(
|
||||
True, title="Enable Color Management"
|
||||
)
|
||||
remapping: ImageIORemappingModel = SettingsField(
|
||||
title="Remapping colorspace names",
|
||||
default_factory=ImageIORemappingModel
|
||||
)
|
||||
ocio_config: ImageIOConfigModel = SettingsField(
|
||||
default_factory=ImageIOConfigModel,
|
||||
title="OCIO config"
|
||||
)
|
||||
file_rules: ImageIOFileRulesModel = SettingsField(
|
||||
default_factory=ImageIOFileRulesModel,
|
||||
title="File Rules"
|
||||
)
|
||||
# NOTE 'project' attribute was expanded to this model but that caused
|
||||
# inconsistency with v3 settings and harder conversion handling
|
||||
# - it can be moved back but keep in mind that it must be handled in v3
|
||||
# conversion script too
|
||||
project: ImageIOProjectModel = SettingsField(
|
||||
default_factory=ImageIOProjectModel,
|
||||
title="Project"
|
||||
)
|
||||
profilesMapping: ProfileNamesMappingModel = SettingsField(
|
||||
default_factory=ProfileNamesMappingModel,
|
||||
title="Profile names mapping"
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_IMAGEIO_SETTINGS = {
|
||||
"project": {
|
||||
"colourPolicy": "ACES 1.1",
|
||||
"frameDepth": "16-bit fp",
|
||||
"fieldDominance": "PROGRESSIVE"
|
||||
},
|
||||
"profilesMapping": {
|
||||
"inputs": [
|
||||
{
|
||||
"flameName": "ACEScg",
|
||||
"ocioName": "ACES - ACEScg"
|
||||
},
|
||||
{
|
||||
"flameName": "Rec.709 video",
|
||||
"ocioName": "Output - Rec.709"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
@ -1,103 +0,0 @@
|
|||
from ayon_server.settings import SettingsField, BaseSettingsModel
|
||||
|
||||
|
||||
class LoadClipModel(BaseSettingsModel):
|
||||
enabled: bool = SettingsField(True)
|
||||
|
||||
product_types: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Product types"
|
||||
)
|
||||
reel_group_name: str = SettingsField(
|
||||
"OpenPype_Reels",
|
||||
title="Reel group name"
|
||||
)
|
||||
reel_name: str = SettingsField(
|
||||
"Loaded",
|
||||
title="Reel name"
|
||||
)
|
||||
|
||||
clip_name_template: str = SettingsField(
|
||||
"{folder[name]}_{product[name]}<_{output}>",
|
||||
title="Clip name template"
|
||||
)
|
||||
layer_rename_template: str = SettingsField(
|
||||
"", title="Layer name template"
|
||||
)
|
||||
layer_rename_patterns: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Layer rename patters",
|
||||
)
|
||||
|
||||
|
||||
class LoadClipBatchModel(BaseSettingsModel):
|
||||
enabled: bool = SettingsField(True)
|
||||
product_types: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Product types"
|
||||
)
|
||||
reel_name: str = SettingsField(
|
||||
"OP_LoadedReel",
|
||||
title="Reel name"
|
||||
)
|
||||
clip_name_template: str = SettingsField(
|
||||
"{batch}_{folder[name]}_{product[name]}<_{output}>",
|
||||
title="Clip name template"
|
||||
)
|
||||
layer_rename_template: str = SettingsField(
|
||||
"", title="Layer name template"
|
||||
)
|
||||
layer_rename_patterns: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Layer rename patters",
|
||||
)
|
||||
|
||||
|
||||
class LoaderPluginsModel(BaseSettingsModel):
|
||||
LoadClip: LoadClipModel = SettingsField(
|
||||
default_factory=LoadClipModel,
|
||||
title="Load Clip"
|
||||
)
|
||||
LoadClipBatch: LoadClipBatchModel = SettingsField(
|
||||
default_factory=LoadClipBatchModel,
|
||||
title="Load as clip to current batch"
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_LOADER_SETTINGS = {
|
||||
"LoadClip": {
|
||||
"enabled": True,
|
||||
"product_types": [
|
||||
"render2d",
|
||||
"source",
|
||||
"plate",
|
||||
"render",
|
||||
"review"
|
||||
],
|
||||
"reel_group_name": "OpenPype_Reels",
|
||||
"reel_name": "Loaded",
|
||||
"clip_name_template": "{folder[name]}_{product[name]}<_{output}>",
|
||||
"layer_rename_template": "{folder[name]}_{product[name]}<_{output}>",
|
||||
"layer_rename_patterns": [
|
||||
"rgb",
|
||||
"rgba"
|
||||
]
|
||||
},
|
||||
"LoadClipBatch": {
|
||||
"enabled": True,
|
||||
"product_types": [
|
||||
"render2d",
|
||||
"source",
|
||||
"plate",
|
||||
"render",
|
||||
"review"
|
||||
],
|
||||
"reel_name": "OP_LoadedReel",
|
||||
"clip_name_template": "{batch}_{folder[name]}_{product[name]}<_{output}>",
|
||||
"layer_rename_template": "{folder[name]}_{product[name]}<_{output}>",
|
||||
"layer_rename_patterns": [
|
||||
"rgb",
|
||||
"rgba"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
from ayon_server.settings import BaseSettingsModel, SettingsField
|
||||
|
||||
from .imageio import FlameImageIOModel, DEFAULT_IMAGEIO_SETTINGS
|
||||
from .create_plugins import CreatePluginsModel, DEFAULT_CREATE_SETTINGS
|
||||
from .publish_plugins import PublishPluginsModel, DEFAULT_PUBLISH_SETTINGS
|
||||
from .loader_plugins import LoaderPluginsModel, DEFAULT_LOADER_SETTINGS
|
||||
|
||||
|
||||
class FlameSettings(BaseSettingsModel):
|
||||
imageio: FlameImageIOModel = SettingsField(
|
||||
default_factory=FlameImageIOModel,
|
||||
title="Color Management (ImageIO)"
|
||||
)
|
||||
create: CreatePluginsModel = SettingsField(
|
||||
default_factory=CreatePluginsModel,
|
||||
title="Create plugins"
|
||||
)
|
||||
publish: PublishPluginsModel = SettingsField(
|
||||
default_factory=PublishPluginsModel,
|
||||
title="Publish plugins"
|
||||
)
|
||||
load: LoaderPluginsModel = SettingsField(
|
||||
default_factory=LoaderPluginsModel,
|
||||
title="Loader plugins"
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_VALUES = {
|
||||
"imageio": DEFAULT_IMAGEIO_SETTINGS,
|
||||
"create": DEFAULT_CREATE_SETTINGS,
|
||||
"publish": DEFAULT_PUBLISH_SETTINGS,
|
||||
"load": DEFAULT_LOADER_SETTINGS
|
||||
}
|
||||
|
|
@ -1,196 +0,0 @@
|
|||
from ayon_server.settings import (
|
||||
BaseSettingsModel,
|
||||
SettingsField,
|
||||
task_types_enum,
|
||||
)
|
||||
|
||||
|
||||
class XMLPresetAttrsFromCommentsModel(BaseSettingsModel):
|
||||
_layout = "expanded"
|
||||
name: str = SettingsField("", title="Attribute name")
|
||||
type: str = SettingsField(
|
||||
default_factory=str,
|
||||
title="Attribute type",
|
||||
enum_resolver=lambda: ["number", "float", "string"]
|
||||
)
|
||||
|
||||
|
||||
class AddTasksModel(BaseSettingsModel):
|
||||
_layout = "expanded"
|
||||
name: str = SettingsField("", title="Task name")
|
||||
type: str = SettingsField(
|
||||
default_factory=str,
|
||||
title="Task type",
|
||||
enum_resolver=task_types_enum
|
||||
)
|
||||
create_batch_group: bool = SettingsField(
|
||||
True,
|
||||
title="Create batch group"
|
||||
)
|
||||
|
||||
|
||||
class CollectTimelineInstancesModel(BaseSettingsModel):
|
||||
_isGroup = True
|
||||
|
||||
xml_preset_attrs_from_comments: list[XMLPresetAttrsFromCommentsModel] = (
|
||||
SettingsField(
|
||||
default_factory=list,
|
||||
title="XML presets attributes parsable from segment comments"
|
||||
)
|
||||
)
|
||||
add_tasks: list[AddTasksModel] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Add tasks"
|
||||
)
|
||||
|
||||
|
||||
class ExportPresetsMappingModel(BaseSettingsModel):
|
||||
_layout = "expanded"
|
||||
|
||||
name: str = SettingsField(
|
||||
...,
|
||||
title="Name"
|
||||
)
|
||||
active: bool = SettingsField(True, title="Is active")
|
||||
export_type: str = SettingsField(
|
||||
"File Sequence",
|
||||
title="Eport clip type",
|
||||
enum_resolver=lambda: ["Movie", "File Sequence", "Sequence Publish"]
|
||||
)
|
||||
ext: str = SettingsField("exr", title="Output extension")
|
||||
xml_preset_file: str = SettingsField(
|
||||
"OpenEXR (16-bit fp DWAA).xml",
|
||||
title="XML preset file (with ext)"
|
||||
)
|
||||
colorspace_out: str = SettingsField(
|
||||
"ACES - ACEScg",
|
||||
title="Output color (imageio)"
|
||||
)
|
||||
# TODO remove when resolved or v3 is not a thing anymore
|
||||
# NOTE next 4 attributes were grouped under 'other_parameters' but that
|
||||
# created inconsistency with v3 settings and harder conversion handling
|
||||
# - it can be moved back but keep in mind that it must be handled in v3
|
||||
# conversion script too
|
||||
xml_preset_dir: str = SettingsField(
|
||||
"",
|
||||
title="XML preset directory"
|
||||
)
|
||||
parsed_comment_attrs: bool = SettingsField(
|
||||
True,
|
||||
title="Parsed comment attributes"
|
||||
)
|
||||
representation_add_range: bool = SettingsField(
|
||||
True,
|
||||
title="Add range to representation name"
|
||||
)
|
||||
representation_tags: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Representation tags"
|
||||
)
|
||||
load_to_batch_group: bool = SettingsField(
|
||||
True,
|
||||
title="Load to batch group reel"
|
||||
)
|
||||
batch_group_loader_name: str = SettingsField(
|
||||
"LoadClipBatch",
|
||||
title="Use loader name"
|
||||
)
|
||||
filter_path_regex: str = SettingsField(
|
||||
".*",
|
||||
title="Regex in clip path"
|
||||
)
|
||||
|
||||
|
||||
class ExtractProductResourcesModel(BaseSettingsModel):
|
||||
_isGroup = True
|
||||
|
||||
keep_original_representation: bool = SettingsField(
|
||||
False,
|
||||
title="Publish clip's original media"
|
||||
)
|
||||
export_presets_mapping: list[ExportPresetsMappingModel] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Export presets mapping"
|
||||
)
|
||||
|
||||
|
||||
class IntegrateBatchGroupModel(BaseSettingsModel):
|
||||
enabled: bool = SettingsField(
|
||||
False,
|
||||
title="Enabled"
|
||||
)
|
||||
|
||||
|
||||
class PublishPluginsModel(BaseSettingsModel):
|
||||
CollectTimelineInstances: CollectTimelineInstancesModel = SettingsField(
|
||||
default_factory=CollectTimelineInstancesModel,
|
||||
title="Collect Timeline Instances"
|
||||
)
|
||||
|
||||
ExtractProductResources: ExtractProductResourcesModel = SettingsField(
|
||||
default_factory=ExtractProductResourcesModel,
|
||||
title="Extract Product Resources"
|
||||
)
|
||||
|
||||
IntegrateBatchGroup: IntegrateBatchGroupModel = SettingsField(
|
||||
default_factory=IntegrateBatchGroupModel,
|
||||
title="IntegrateBatchGroup"
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_PUBLISH_SETTINGS = {
|
||||
"CollectTimelineInstances": {
|
||||
"xml_preset_attrs_from_comments": [
|
||||
{
|
||||
"name": "width",
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"name": "height",
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"name": "pixelRatio",
|
||||
"type": "float"
|
||||
},
|
||||
{
|
||||
"name": "resizeType",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "resizeFilter",
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"add_tasks": [
|
||||
{
|
||||
"name": "compositing",
|
||||
"type": "Compositing",
|
||||
"create_batch_group": True
|
||||
}
|
||||
]
|
||||
},
|
||||
"ExtractProductResources": {
|
||||
"keep_original_representation": False,
|
||||
"export_presets_mapping": [
|
||||
{
|
||||
"name": "exr16fpdwaa",
|
||||
"active": True,
|
||||
"export_type": "File Sequence",
|
||||
"ext": "exr",
|
||||
"xml_preset_file": "OpenEXR (16-bit fp DWAA).xml",
|
||||
"colorspace_out": "ACES - ACEScg",
|
||||
"xml_preset_dir": "",
|
||||
"parsed_comment_attrs": True,
|
||||
"representation_add_range": True,
|
||||
"representation_tags": [],
|
||||
"load_to_batch_group": True,
|
||||
"batch_group_loader_name": "LoadClipBatch",
|
||||
"filter_path_regex": ".*"
|
||||
}
|
||||
]
|
||||
},
|
||||
"IntegrateBatchGroup": {
|
||||
"enabled": False
|
||||
}
|
||||
}
|
||||
|
|
@ -1,10 +1,13 @@
|
|||
import tempfile
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import OptionalPyblishPluginMixin
|
||||
from ayon_houdini.api import lib, plugin
|
||||
from ayon_houdini.api.pipeline import IS_HEADLESS
|
||||
|
||||
|
||||
class ExtractActiveViewThumbnail(plugin.HoudiniExtractorPlugin):
|
||||
class ExtractActiveViewThumbnail(plugin.HoudiniExtractorPlugin,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""Set instance thumbnail to a screengrab of current active viewport.
|
||||
|
||||
This makes it so that if an instance does not have a thumbnail set yet that
|
||||
|
|
@ -17,6 +20,9 @@ class ExtractActiveViewThumbnail(plugin.HoudiniExtractorPlugin):
|
|||
families = ["workfile"]
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
if IS_HEADLESS:
|
||||
self.log.debug(
|
||||
"Skip extraction of active view thumbnail, due to being in"
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ class ValidateWorkfilePathsModel(BaseSettingsModel):
|
|||
)
|
||||
|
||||
|
||||
class BasicValidateModel(BaseSettingsModel):
|
||||
class BasicEnabledStatesModel(BaseSettingsModel):
|
||||
enabled: bool = SettingsField(title="Enabled")
|
||||
optional: bool = SettingsField(title="Optional")
|
||||
active: bool = SettingsField(title="Active")
|
||||
|
|
@ -78,25 +78,30 @@ class PublishPluginsModel(BaseSettingsModel):
|
|||
default_factory=CollectLocalRenderInstancesModel,
|
||||
title="Collect Local Render Instances"
|
||||
)
|
||||
ValidateInstanceInContextHoudini: BasicValidateModel = SettingsField(
|
||||
default_factory=BasicValidateModel,
|
||||
ValidateInstanceInContextHoudini: BasicEnabledStatesModel = SettingsField(
|
||||
default_factory=BasicEnabledStatesModel,
|
||||
title="Validate Instance is in same Context",
|
||||
section="Validators")
|
||||
ValidateMeshIsStatic: BasicValidateModel = SettingsField(
|
||||
default_factory=BasicValidateModel,
|
||||
ValidateMeshIsStatic: BasicEnabledStatesModel = SettingsField(
|
||||
default_factory=BasicEnabledStatesModel,
|
||||
title="Validate Mesh is Static")
|
||||
ValidateReviewColorspace: BasicValidateModel = SettingsField(
|
||||
default_factory=BasicValidateModel,
|
||||
ValidateReviewColorspace: BasicEnabledStatesModel = SettingsField(
|
||||
default_factory=BasicEnabledStatesModel,
|
||||
title="Validate Review Colorspace")
|
||||
ValidateSubsetName: BasicValidateModel = SettingsField(
|
||||
default_factory=BasicValidateModel,
|
||||
ValidateSubsetName: BasicEnabledStatesModel = SettingsField(
|
||||
default_factory=BasicEnabledStatesModel,
|
||||
title="Validate Subset Name")
|
||||
ValidateUnrealStaticMeshName: BasicValidateModel = SettingsField(
|
||||
default_factory=BasicValidateModel,
|
||||
ValidateUnrealStaticMeshName: BasicEnabledStatesModel = SettingsField(
|
||||
default_factory=BasicEnabledStatesModel,
|
||||
title="Validate Unreal Static Mesh Name")
|
||||
ValidateWorkfilePaths: ValidateWorkfilePathsModel = SettingsField(
|
||||
default_factory=ValidateWorkfilePathsModel,
|
||||
title="Validate workfile paths settings")
|
||||
ExtractActiveViewThumbnail: BasicEnabledStatesModel = SettingsField(
|
||||
default_factory=BasicEnabledStatesModel,
|
||||
title="Extract Active View Thumbnail",
|
||||
section="Extractors"
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_HOUDINI_PUBLISH_SETTINGS = {
|
||||
|
|
@ -153,5 +158,10 @@ DEFAULT_HOUDINI_PUBLISH_SETTINGS = {
|
|||
"$HIP",
|
||||
"$JOB"
|
||||
]
|
||||
},
|
||||
"ExtractActiveViewThumbnail": {
|
||||
"enabled": True,
|
||||
"optional": False,
|
||||
"active": True
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -561,7 +561,7 @@ def read_avalon_data(node):
|
|||
node (nuke.Node): Nuke node object
|
||||
|
||||
Returns:
|
||||
list: A list of nuke.Knob object
|
||||
Dict[str, nuke.Knob]: A dictionary of knob name to nuke.Knob objects
|
||||
|
||||
"""
|
||||
def compat_prefixed(knob_name):
|
||||
|
|
@ -613,7 +613,7 @@ def get_node_path(path, padding=4):
|
|||
path (str): The path to render to.
|
||||
|
||||
Returns:
|
||||
tuple: head, padding, tail (extension)
|
||||
Tuple[str, int, str]: head, padding, tail (extension)
|
||||
|
||||
Examples:
|
||||
>>> get_frame_path("test.exr")
|
||||
|
|
@ -655,8 +655,7 @@ def get_nuke_imageio_settings():
|
|||
|
||||
|
||||
def get_imageio_node_setting(node_class, plugin_name, product_name):
|
||||
''' Get preset data for dataflow (fileType, compression, bitDepth)
|
||||
'''
|
||||
"""Get preset data for dataflow (fileType, compression, bitDepth)"""
|
||||
imageio_nodes = get_nuke_imageio_settings()["nodes"]
|
||||
required_nodes = imageio_nodes["required_nodes"]
|
||||
|
||||
|
|
@ -686,8 +685,8 @@ def get_imageio_node_setting(node_class, plugin_name, product_name):
|
|||
def get_imageio_node_override_setting(
|
||||
node_class, plugin_name, product_name, knobs_settings
|
||||
):
|
||||
''' Get imageio node overrides from settings
|
||||
'''
|
||||
""" Get imageio node overrides from settings
|
||||
"""
|
||||
imageio_nodes = get_nuke_imageio_settings()["nodes"]
|
||||
override_nodes = imageio_nodes["override_nodes"]
|
||||
|
||||
|
|
@ -745,8 +744,7 @@ def get_imageio_node_override_setting(
|
|||
|
||||
|
||||
def get_imageio_input_colorspace(filename):
|
||||
''' Get input file colorspace based on regex in settings.
|
||||
'''
|
||||
"""Get input file colorspace based on regex in settings."""
|
||||
imageio_regex_inputs = (
|
||||
get_nuke_imageio_settings()["regex_inputs"]["inputs"])
|
||||
|
||||
|
|
@ -791,8 +789,7 @@ def get_view_process_node():
|
|||
|
||||
|
||||
def on_script_load():
|
||||
''' Callback for ffmpeg support
|
||||
'''
|
||||
"""Callback for ffmpeg support"""
|
||||
if nuke.env["LINUX"]:
|
||||
nuke.tcl('load ffmpegReader')
|
||||
nuke.tcl('load ffmpegWriter')
|
||||
|
|
@ -815,7 +812,7 @@ def check_inventory_versions():
|
|||
# get all Loader nodes by avalon attribute metadata
|
||||
node_with_repre_id = []
|
||||
repre_ids = set()
|
||||
# Find all containers and collect it's node and representation ids
|
||||
# Find all containers and collect its node and representation ids
|
||||
for node in nuke.allNodes():
|
||||
container = parse_container(node)
|
||||
|
||||
|
|
@ -896,8 +893,7 @@ def check_inventory_versions():
|
|||
|
||||
|
||||
def writes_version_sync():
|
||||
''' Callback synchronizing version of publishable write nodes
|
||||
'''
|
||||
"""Callback synchronizing version of publishable write nodes"""
|
||||
try:
|
||||
rootVersion = get_version_from_path(nuke.root().name())
|
||||
padding = len(rootVersion)
|
||||
|
|
@ -934,8 +930,7 @@ def writes_version_sync():
|
|||
|
||||
|
||||
def version_up_script():
|
||||
''' Raising working script's version
|
||||
'''
|
||||
"""Raising working script's version"""
|
||||
import nukescripts
|
||||
nukescripts.script_and_write_nodes_version_up()
|
||||
|
||||
|
|
@ -957,14 +952,14 @@ def check_product_name_exists(nodes, product_name):
|
|||
|
||||
|
||||
def format_anatomy(data):
|
||||
''' Helping function for formatting of anatomy paths
|
||||
"""Helping function for formatting of anatomy paths
|
||||
|
||||
Arguments:
|
||||
data (dict): dictionary with attributes used for formatting
|
||||
|
||||
Return:
|
||||
path (str)
|
||||
'''
|
||||
str: Formatted path.
|
||||
"""
|
||||
|
||||
project_name = get_current_project_name()
|
||||
anatomy = Anatomy(project_name)
|
||||
|
|
@ -996,9 +991,8 @@ def format_anatomy(data):
|
|||
return anatomy.format(data)
|
||||
|
||||
|
||||
def script_name():
|
||||
''' Returns nuke script path
|
||||
'''
|
||||
def script_name() -> str:
|
||||
"""Returns nuke script path"""
|
||||
return nuke.root().knob("name").value()
|
||||
|
||||
|
||||
|
|
@ -1100,7 +1094,7 @@ def create_write_node(
|
|||
linked_knobs=None,
|
||||
**kwargs
|
||||
):
|
||||
''' Creating write node which is group node
|
||||
"""Creating write node which is group node
|
||||
|
||||
Arguments:
|
||||
name (str): name of node
|
||||
|
|
@ -1134,8 +1128,8 @@ def create_write_node(
|
|||
|
||||
|
||||
Return:
|
||||
node (obj): group node with avalon data as Knobs
|
||||
'''
|
||||
node (nuke.Node): group node with avalon data as Knobs
|
||||
"""
|
||||
# Ensure name does not contain any invalid characters.
|
||||
special_chars = re.escape("!@#$%^&*()=[]{}|\\;',.<>/?~+-")
|
||||
special_chars_regex = re.compile(f"[{special_chars}]")
|
||||
|
|
@ -1300,7 +1294,7 @@ def create_write_node(
|
|||
|
||||
|
||||
def set_node_knobs_from_settings(node, knob_settings, **kwargs):
|
||||
""" Overriding knob values from settings
|
||||
"""Overriding knob values from settings
|
||||
|
||||
Using `schema_nuke_knob_inputs` for knob type definitions.
|
||||
|
||||
|
|
@ -1393,8 +1387,7 @@ def color_gui_to_int(color_gui):
|
|||
|
||||
def create_backdrop(label="", color=None, layer=0,
|
||||
nodes=None):
|
||||
"""
|
||||
Create Backdrop node
|
||||
"""Create Backdrop node
|
||||
|
||||
Arguments:
|
||||
color (str): nuke compatible string with color code
|
||||
|
|
@ -1402,6 +1395,9 @@ def create_backdrop(label="", color=None, layer=0,
|
|||
label (str): the message
|
||||
nodes (list): list of nodes to be wrapped into backdrop
|
||||
|
||||
Returns:
|
||||
nuke.Node: The created backdrop node.
|
||||
|
||||
"""
|
||||
assert isinstance(nodes, list), "`nodes` should be a list of nodes"
|
||||
|
||||
|
|
@ -1491,12 +1487,12 @@ class WorkfileSettings(object):
|
|||
return [n for n in self._nodes if filter in n.Class()]
|
||||
|
||||
def set_viewers_colorspace(self, imageio_nuke):
|
||||
''' Adds correct colorspace to viewer
|
||||
"""Adds correct colorspace to viewer
|
||||
|
||||
Arguments:
|
||||
imageio_nuke (dict): nuke colorspace configurations
|
||||
|
||||
'''
|
||||
"""
|
||||
filter_knobs = [
|
||||
"viewerProcess",
|
||||
"wipe_position",
|
||||
|
|
@ -1560,12 +1556,12 @@ class WorkfileSettings(object):
|
|||
return StringTemplate(display_view).format_strict(self.formatting_data)
|
||||
|
||||
def set_root_colorspace(self, imageio_host):
|
||||
''' Adds correct colorspace to root
|
||||
"""Adds correct colorspace to root
|
||||
|
||||
Arguments:
|
||||
imageio_host (dict): host colorspace configurations
|
||||
|
||||
'''
|
||||
"""
|
||||
config_data = get_current_context_imageio_config_preset()
|
||||
|
||||
workfile_settings = imageio_host["workfile"]
|
||||
|
|
@ -1819,9 +1815,8 @@ Reopening Nuke should synchronize these paths and resolve any discrepancies.
|
|||
return new_path
|
||||
|
||||
def set_writes_colorspace(self):
|
||||
''' Adds correct colorspace to write node dict
|
||||
|
||||
'''
|
||||
""" Adds correct colorspace to write node dict
|
||||
"""
|
||||
for node in nuke.allNodes(filter="Group", group=self._root_node):
|
||||
log.info("Setting colorspace to `{}`".format(node.name()))
|
||||
|
||||
|
|
@ -1943,8 +1938,8 @@ Reopening Nuke should synchronize these paths and resolve any discrepancies.
|
|||
knobs["to"]))
|
||||
|
||||
def set_colorspace(self):
|
||||
''' Setting colorspace following presets
|
||||
'''
|
||||
""" Setting colorspace following presets
|
||||
"""
|
||||
# get imageio
|
||||
nuke_colorspace = get_nuke_imageio_settings()
|
||||
|
||||
|
|
@ -2152,9 +2147,8 @@ Reopening Nuke should synchronize these paths and resolve any discrepancies.
|
|||
|
||||
|
||||
def get_write_node_template_attr(node):
|
||||
''' Gets all defined data from presets
|
||||
|
||||
'''
|
||||
""" Gets all defined data from presets
|
||||
"""
|
||||
|
||||
# TODO: add identifiers to settings and rename settings key
|
||||
plugin_names_mapping = {
|
||||
|
|
|
|||
|
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
Photoshp Addon
|
||||
===============
|
||||
|
||||
Integration with Adobe Photoshop.
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
from .version import __version__
|
||||
from .addon import (
|
||||
PHOTOSHOP_ADDON_ROOT,
|
||||
PhotoshopAddon,
|
||||
get_launch_script_path,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"__version__",
|
||||
|
||||
"PHOTOSHOP_ADDON_ROOT",
|
||||
"PhotoshopAddon",
|
||||
"get_launch_script_path",
|
||||
)
|
||||
|
|
@ -1,38 +0,0 @@
|
|||
import os
|
||||
from ayon_core.addon import AYONAddon, IHostAddon
|
||||
|
||||
from .version import __version__
|
||||
|
||||
PHOTOSHOP_ADDON_ROOT = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
class PhotoshopAddon(AYONAddon, IHostAddon):
|
||||
name = "photoshop"
|
||||
version = __version__
|
||||
host_name = "photoshop"
|
||||
|
||||
def add_implementation_envs(self, env, _app):
|
||||
"""Modify environments to contain all required for implementation."""
|
||||
defaults = {
|
||||
"AYON_LOG_NO_COLORS": "1",
|
||||
"WEBSOCKET_URL": "ws://localhost:8099/ws/"
|
||||
}
|
||||
for key, value in defaults.items():
|
||||
if not env.get(key):
|
||||
env[key] = value
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return [".psd", ".psb"]
|
||||
|
||||
def get_launch_hook_paths(self, app):
|
||||
if app.host_name != self.host_name:
|
||||
return []
|
||||
return [
|
||||
os.path.join(PHOTOSHOP_ADDON_ROOT, "hooks")
|
||||
]
|
||||
|
||||
|
||||
def get_launch_script_path():
|
||||
return os.path.join(
|
||||
PHOTOSHOP_ADDON_ROOT, "api", "launch_script.py"
|
||||
)
|
||||
|
|
@ -1,257 +0,0 @@
|
|||
# Photoshop Integration
|
||||
|
||||
## Setup
|
||||
|
||||
The Photoshop integration requires two components to work; `extension` and `server`.
|
||||
|
||||
### Extension
|
||||
|
||||
To install the extension download [Extension Manager Command Line tool (ExManCmd)](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#option-2---exmancmd).
|
||||
|
||||
```
|
||||
ExManCmd /install {path to addon}/api/extension.zxp
|
||||
```
|
||||
|
||||
### Server
|
||||
|
||||
The easiest way to get the server and Photoshop launch is with:
|
||||
|
||||
```
|
||||
python -c ^"import ayon_photoshop;ayon_photoshop.launch(""C:\Program Files\Adobe\Adobe Photoshop 2020\Photoshop.exe"")^"
|
||||
```
|
||||
|
||||
`avalon.photoshop.launch` launches the application and server, and also closes the server when Photoshop exists.
|
||||
|
||||
## Usage
|
||||
|
||||
The Photoshop extension can be found under `Window > Extensions > Ayon`. Once launched you should be presented with a panel like this:
|
||||
|
||||

|
||||
|
||||
|
||||
## Developing
|
||||
|
||||
### Extension
|
||||
When developing the extension you can load it [unsigned](https://github.com/Adobe-CEP/CEP-Resources/blob/master/CEP_9.x/Documentation/CEP%209.0%20HTML%20Extension%20Cookbook.md#debugging-unsigned-extensions).
|
||||
|
||||
When signing the extension you can use this [guide](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#package-distribute-install-guide).
|
||||
|
||||
```
|
||||
ZXPSignCmd -selfSignedCert NA NA Ayon Ayon-Photoshop Ayon extension.p12
|
||||
ZXPSignCmd -sign {path to avalon-core}\avalon\photoshop\extension {path to avalon-core}\avalon\photoshop\extension.zxp extension.p12 avalon
|
||||
```
|
||||
|
||||
### Plugin Examples
|
||||
|
||||
These plugins were made with the [polly config](https://github.com/mindbender-studio/config). To fully integrate and load, you will have to use this config and add `image` to the [integration plugin](https://github.com/mindbender-studio/config/blob/master/polly/plugins/publish/integrate_asset.py).
|
||||
|
||||
#### Creator Plugin
|
||||
```python
|
||||
from avalon import photoshop
|
||||
|
||||
|
||||
class CreateImage(photoshop.Creator):
|
||||
"""Image folder for publish."""
|
||||
|
||||
name = "imageDefault"
|
||||
label = "Image"
|
||||
product_type = "image"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateImage, self).__init__(*args, **kwargs)
|
||||
```
|
||||
|
||||
#### Collector Plugin
|
||||
```python
|
||||
import pythoncom
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
"""Gather instances by LayerSet and file metadata
|
||||
|
||||
This collector takes into account assets that are associated with
|
||||
an LayerSet and marked with a unique identifier;
|
||||
|
||||
Identifier:
|
||||
id (str): "ayon.create.instance"
|
||||
"""
|
||||
|
||||
label = "Instances"
|
||||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["photoshop"]
|
||||
families_mapping = {
|
||||
"image": []
|
||||
}
|
||||
|
||||
def process(self, context):
|
||||
# Necessary call when running in a different thread which pyblish-qml
|
||||
# can be.
|
||||
pythoncom.CoInitialize()
|
||||
|
||||
photoshop_client = PhotoshopClientStub()
|
||||
layers = photoshop_client.get_layers()
|
||||
layers_meta = photoshop_client.get_layers_metadata()
|
||||
for layer in layers:
|
||||
layer_data = photoshop_client.read(layer, layers_meta)
|
||||
|
||||
# Skip layers without metadata.
|
||||
if layer_data is None:
|
||||
continue
|
||||
|
||||
# Skip containers.
|
||||
if "container" in layer_data["id"]:
|
||||
continue
|
||||
|
||||
# child_layers = [*layer.Layers]
|
||||
# self.log.debug("child_layers {}".format(child_layers))
|
||||
# if not child_layers:
|
||||
# self.log.info("%s skipped, it was empty." % layer.Name)
|
||||
# continue
|
||||
|
||||
instance = context.create_instance(layer.name)
|
||||
instance.append(layer)
|
||||
instance.data.update(layer_data)
|
||||
instance.data["families"] = self.families_mapping[
|
||||
layer_data["productType"]
|
||||
]
|
||||
instance.data["publish"] = layer.visible
|
||||
|
||||
# Produce diagnostic message for any graphical
|
||||
# user interface interested in visualising it.
|
||||
self.log.info("Found: \"%s\" " % instance.data["name"])
|
||||
```
|
||||
|
||||
#### Extractor Plugin
|
||||
```python
|
||||
import os
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_photoshop import api as photoshop
|
||||
|
||||
|
||||
class ExtractImage(publish.Extractor):
|
||||
"""Produce a flattened image file from instance
|
||||
|
||||
This plug-in takes into account only the layers in the group.
|
||||
"""
|
||||
|
||||
label = "Extract Image"
|
||||
hosts = ["photoshop"]
|
||||
families = ["image"]
|
||||
formats = ["png", "jpg"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
self.log.info("Outputting image to {}".format(staging_dir))
|
||||
|
||||
# Perform extraction
|
||||
stub = photoshop.stub()
|
||||
files = {}
|
||||
with photoshop.maintained_selection():
|
||||
self.log.info("Extracting %s" % str(list(instance)))
|
||||
with photoshop.maintained_visibility():
|
||||
# Hide all other layers.
|
||||
extract_ids = set([ll.id for ll in stub.
|
||||
get_layers_in_layers([instance[0]])])
|
||||
|
||||
for layer in stub.get_layers():
|
||||
# limit unnecessary calls to client
|
||||
if layer.visible and layer.id not in extract_ids:
|
||||
stub.set_visible(layer.id, False)
|
||||
|
||||
save_options = []
|
||||
if "png" in self.formats:
|
||||
save_options.append('png')
|
||||
if "jpg" in self.formats:
|
||||
save_options.append('jpg')
|
||||
|
||||
file_basename = os.path.splitext(
|
||||
stub.get_active_document_name()
|
||||
)[0]
|
||||
for extension in save_options:
|
||||
_filename = "{}.{}".format(file_basename, extension)
|
||||
files[extension] = _filename
|
||||
|
||||
full_filename = os.path.join(staging_dir, _filename)
|
||||
stub.saveAs(full_filename, extension, True)
|
||||
|
||||
representations = []
|
||||
for extension, filename in files.items():
|
||||
representations.append({
|
||||
"name": extension,
|
||||
"ext": extension,
|
||||
"files": filename,
|
||||
"stagingDir": staging_dir
|
||||
})
|
||||
instance.data["representations"] = representations
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
self.log.info(f"Extracted {instance} to {staging_dir}")
|
||||
```
|
||||
|
||||
#### Loader Plugin
|
||||
```python
|
||||
from avalon import api, photoshop
|
||||
from ayon_core.pipeline import load, get_representation_path
|
||||
|
||||
stub = photoshop.stub()
|
||||
|
||||
|
||||
class ImageLoader(load.LoaderPlugin):
|
||||
"""Load images
|
||||
|
||||
Stores the imported asset in a container named after the asset.
|
||||
"""
|
||||
|
||||
families = ["image"]
|
||||
representations = {"*"}
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
path = self.filepath_from_context(context)
|
||||
with photoshop.maintained_selection():
|
||||
layer = stub.import_smart_object(path)
|
||||
|
||||
self[:] = [layer]
|
||||
|
||||
return photoshop.containerise(
|
||||
name,
|
||||
namespace,
|
||||
layer,
|
||||
context,
|
||||
self.__class__.__name__
|
||||
)
|
||||
|
||||
def update(self, container, context):
|
||||
layer = container.pop("layer")
|
||||
repre_entity = context["representation"]
|
||||
with photoshop.maintained_selection():
|
||||
stub.replace_smart_object(
|
||||
layer, get_representation_path(repre_entity)
|
||||
)
|
||||
|
||||
stub.imprint(
|
||||
layer, {"representation": repre_entity["id"]}
|
||||
)
|
||||
|
||||
def remove(self, container):
|
||||
container["layer"].Delete()
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
```
|
||||
For easier debugging of Javascript:
|
||||
https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/td-p/10911704?page=1
|
||||
Add --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome
|
||||
then localhost:8078 (port set in `photoshop\extension\.debug`)
|
||||
|
||||
Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01
|
||||
|
||||
Or install CEF client from https://github.com/Adobe-CEP/CEP-Resources/tree/master/CEP_9.x
|
||||
## Resources
|
||||
- https://github.com/lohriialo/photoshop-scripting-python
|
||||
- https://www.adobe.com/devnet/photoshop/scripting.html
|
||||
- https://github.com/Adobe-CEP/Getting-Started-guides
|
||||
- https://github.com/Adobe-CEP/CEP-Resources
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
"""Public API
|
||||
|
||||
Anything that isn't defined here is INTERNAL and unreliable for external use.
|
||||
|
||||
"""
|
||||
|
||||
from .launch_logic import stub
|
||||
|
||||
from .pipeline import (
|
||||
PhotoshopHost,
|
||||
ls,
|
||||
containerise
|
||||
)
|
||||
from .plugin import (
|
||||
PhotoshopLoader,
|
||||
get_unique_layer_name
|
||||
)
|
||||
|
||||
|
||||
from .lib import (
|
||||
maintained_selection,
|
||||
maintained_visibility
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# launch_logic
|
||||
"stub",
|
||||
|
||||
# pipeline
|
||||
"PhotoshopHost",
|
||||
"ls",
|
||||
"containerise",
|
||||
|
||||
# Plugin
|
||||
"PhotoshopLoader",
|
||||
"get_unique_layer_name",
|
||||
|
||||
# lib
|
||||
"maintained_selection",
|
||||
"maintained_visibility",
|
||||
]
|
||||
Binary file not shown.
|
|
@ -1,9 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ExtensionList>
|
||||
<Extension Id="io.ynput.PS.panel">
|
||||
<HostList>
|
||||
<Host Name="PHXS" Port="8078"/>
|
||||
<Host Name="FLPR" Port="8078"/>
|
||||
</HostList>
|
||||
</Extension>
|
||||
</ExtensionList>
|
||||
|
|
@ -1,53 +0,0 @@
|
|||
<?xml version='1.0' encoding='UTF-8'?>
|
||||
<ExtensionManifest ExtensionBundleId="io.ynput.PS.panel" ExtensionBundleVersion="1.1.0" Version="7.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<ExtensionList>
|
||||
<Extension Id="io.ynput.PS.panel" Version="1.0.1" />
|
||||
</ExtensionList>
|
||||
<ExecutionEnvironment>
|
||||
<HostList>
|
||||
<Host Name="PHSP" Version="19" />
|
||||
<Host Name="PHXS" Version="19" />
|
||||
</HostList>
|
||||
<LocaleList>
|
||||
<Locale Code="All" />
|
||||
</LocaleList>
|
||||
<RequiredRuntimeList>
|
||||
<RequiredRuntime Name="CSXS" Version="7.0" />
|
||||
</RequiredRuntimeList>
|
||||
</ExecutionEnvironment>
|
||||
<DispatchInfoList>
|
||||
<Extension Id="io.ynput.PS.panel">
|
||||
<DispatchInfo>
|
||||
<Resources>
|
||||
<MainPath>./index.html</MainPath>
|
||||
<CEFCommandLine />
|
||||
</Resources>
|
||||
<Lifecycle>
|
||||
<AutoVisible>true</AutoVisible>
|
||||
<StartOn>
|
||||
<!-- Photoshop dispatches this event on startup -->
|
||||
<Event>applicationActivate</Event>
|
||||
<Event>com.adobe.csxs.events.ApplicationInitialized</Event>
|
||||
</StartOn>
|
||||
</Lifecycle>
|
||||
<UI>
|
||||
<Type>Panel</Type>
|
||||
<Menu>AYON</Menu>
|
||||
<Geometry>
|
||||
<Size>
|
||||
<Width>300</Width>
|
||||
<Height>140</Height>
|
||||
</Size>
|
||||
<MaxSize>
|
||||
<Width>400</Width>
|
||||
<Height>200</Height>
|
||||
</MaxSize>
|
||||
</Geometry>
|
||||
<Icons>
|
||||
<Icon Type="Normal">./icons/ayon_logo.png</Icon>
|
||||
</Icons>
|
||||
</UI>
|
||||
</DispatchInfo>
|
||||
</Extension>
|
||||
</DispatchInfoList>
|
||||
</ExtensionManifest>
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,300 +0,0 @@
|
|||
// client facing part of extension, creates WSRPC client (jsx cannot
|
||||
// do that)
|
||||
// consumes RPC calls from server (OpenPype) calls ./host/index.jsx and
|
||||
// returns values back (in json format)
|
||||
|
||||
var logReturn = function(result){ log.warn('Result: ' + result);};
|
||||
|
||||
var csInterface = new CSInterface();
|
||||
|
||||
log.warn("script start");
|
||||
|
||||
WSRPC.DEBUG = false;
|
||||
WSRPC.TRACE = false;
|
||||
|
||||
function myCallBack(){
|
||||
log.warn("Triggered index.jsx");
|
||||
}
|
||||
// importing through manifest.xml isn't working because relative paths
|
||||
// possibly TODO
|
||||
jsx.evalFile('./host/index.jsx', myCallBack);
|
||||
|
||||
function runEvalScript(script) {
|
||||
// because of asynchronous nature of functions in jsx
|
||||
// this waits for response
|
||||
return new Promise(function(resolve, reject){
|
||||
csInterface.evalScript(script, resolve);
|
||||
});
|
||||
}
|
||||
|
||||
/** main entry point **/
|
||||
startUp("WEBSOCKET_URL");
|
||||
|
||||
// get websocket server url from environment value
|
||||
async function startUp(url){
|
||||
log.warn("url", url);
|
||||
promis = runEvalScript("getEnv('" + url + "')");
|
||||
|
||||
var res = await promis;
|
||||
// run rest only after resolved promise
|
||||
main(res);
|
||||
}
|
||||
|
||||
function get_extension_version(){
|
||||
/** Returns version number from extension manifest.xml **/
|
||||
log.debug("get_extension_version")
|
||||
var path = csInterface.getSystemPath(SystemPath.EXTENSION);
|
||||
log.debug("extension path " + path);
|
||||
|
||||
var result = window.cep.fs.readFile(path + "/CSXS/manifest.xml");
|
||||
var version = undefined;
|
||||
if(result.err === 0){
|
||||
if (window.DOMParser) {
|
||||
const parser = new DOMParser();
|
||||
const xmlDoc = parser.parseFromString(result.data.toString(), 'text/xml');
|
||||
const children = xmlDoc.children;
|
||||
|
||||
for (let i = 0; i <= children.length; i++) {
|
||||
if (children[i] && children[i].getAttribute('ExtensionBundleVersion')) {
|
||||
version = children[i].getAttribute('ExtensionBundleVersion');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return version
|
||||
}
|
||||
|
||||
function main(websocket_url){
|
||||
// creates connection to 'websocket_url', registers routes
|
||||
log.warn("websocket_url", websocket_url);
|
||||
var default_url = 'ws://localhost:8099/ws/';
|
||||
|
||||
if (websocket_url == ''){
|
||||
websocket_url = default_url;
|
||||
}
|
||||
log.warn("connecting to:", websocket_url);
|
||||
RPC = new WSRPC(websocket_url, 5000); // spin connection
|
||||
|
||||
RPC.connect();
|
||||
|
||||
log.warn("connected");
|
||||
|
||||
function EscapeStringForJSX(str){
|
||||
// Replaces:
|
||||
// \ with \\
|
||||
// ' with \'
|
||||
// " with \"
|
||||
// See: https://stackoverflow.com/a/3967927/5285364
|
||||
return str.replace(/\\/g, '\\\\').replace(/'/g, "\\'").replace(/"/g, '\\"');
|
||||
}
|
||||
|
||||
RPC.addRoute('Photoshop.open', function (data) {
|
||||
log.warn('Server called client route "open":', data);
|
||||
var escapedPath = EscapeStringForJSX(data.path);
|
||||
return runEvalScript("fileOpen('" + escapedPath +"')")
|
||||
.then(function(result){
|
||||
log.warn("open: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.read', function (data) {
|
||||
log.warn('Server called client route "read":', data);
|
||||
return runEvalScript("getHeadline()")
|
||||
.then(function(result){
|
||||
log.warn("getHeadline: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.get_layers', function (data) {
|
||||
log.warn('Server called client route "get_layers":', data);
|
||||
return runEvalScript("getLayers()")
|
||||
.then(function(result){
|
||||
log.warn("getLayers: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.set_visible', function (data) {
|
||||
log.warn('Server called client route "set_visible":', data);
|
||||
return runEvalScript("setVisible(" + data.layer_id + ", " +
|
||||
data.visibility + ")")
|
||||
.then(function(result){
|
||||
log.warn("setVisible: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.get_active_document_name', function (data) {
|
||||
log.warn('Server called client route "get_active_document_name":',
|
||||
data);
|
||||
return runEvalScript("getActiveDocumentName()")
|
||||
.then(function(result){
|
||||
log.warn("save: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.get_active_document_full_name', function (data) {
|
||||
log.warn('Server called client route ' +
|
||||
'"get_active_document_full_name":', data);
|
||||
return runEvalScript("getActiveDocumentFullName()")
|
||||
.then(function(result){
|
||||
log.warn("save: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.save', function (data) {
|
||||
log.warn('Server called client route "save":', data);
|
||||
|
||||
return runEvalScript("save()")
|
||||
.then(function(result){
|
||||
log.warn("save: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.get_selected_layers', function (data) {
|
||||
log.warn('Server called client route "get_selected_layers":', data);
|
||||
|
||||
return runEvalScript("getSelectedLayers()")
|
||||
.then(function(result){
|
||||
log.warn("get_selected_layers: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.create_group', function (data) {
|
||||
log.warn('Server called client route "create_group":', data);
|
||||
|
||||
return runEvalScript("createGroup('" + data.name + "')")
|
||||
.then(function(result){
|
||||
log.warn("createGroup: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.group_selected_layers', function (data) {
|
||||
log.warn('Server called client route "group_selected_layers":',
|
||||
data);
|
||||
|
||||
return runEvalScript("groupSelectedLayers(null, "+
|
||||
"'" + data.name +"')")
|
||||
.then(function(result){
|
||||
log.warn("group_selected_layers: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.import_smart_object', function (data) {
|
||||
log.warn('Server called client "import_smart_object":', data);
|
||||
var escapedPath = EscapeStringForJSX(data.path);
|
||||
return runEvalScript("importSmartObject('" + escapedPath +"', " +
|
||||
"'"+ data.name +"',"+
|
||||
+ data.as_reference +")")
|
||||
.then(function(result){
|
||||
log.warn("import_smart_object: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.replace_smart_object', function (data) {
|
||||
log.warn('Server called route "replace_smart_object":', data);
|
||||
var escapedPath = EscapeStringForJSX(data.path);
|
||||
return runEvalScript("replaceSmartObjects("+data.layer_id+"," +
|
||||
"'" + escapedPath +"',"+
|
||||
"'"+ data.name +"')")
|
||||
.then(function(result){
|
||||
log.warn("replaceSmartObjects: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.delete_layer', function (data) {
|
||||
log.warn('Server called route "delete_layer":', data);
|
||||
return runEvalScript("deleteLayer("+data.layer_id+")")
|
||||
.then(function(result){
|
||||
log.warn("delete_layer: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.rename_layer', function (data) {
|
||||
log.warn('Server called route "rename_layer":', data);
|
||||
return runEvalScript("renameLayer("+data.layer_id+", " +
|
||||
"'"+ data.name +"')")
|
||||
.then(function(result){
|
||||
log.warn("rename_layer: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.select_layers', function (data) {
|
||||
log.warn('Server called client route "select_layers":', data);
|
||||
|
||||
return runEvalScript("selectLayers('" + data.layers +"')")
|
||||
.then(function(result){
|
||||
log.warn("select_layers: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.is_saved', function (data) {
|
||||
log.warn('Server called client route "is_saved":', data);
|
||||
|
||||
return runEvalScript("isSaved()")
|
||||
.then(function(result){
|
||||
log.warn("is_saved: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.saveAs', function (data) {
|
||||
log.warn('Server called client route "saveAsJPEG":', data);
|
||||
var escapedPath = EscapeStringForJSX(data.image_path);
|
||||
return runEvalScript("saveAs('" + escapedPath + "', " +
|
||||
"'" + data.ext + "', " +
|
||||
data.as_copy + ")")
|
||||
.then(function(result){
|
||||
log.warn("save: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.imprint', function (data) {
|
||||
log.warn('Server called client route "imprint":', data);
|
||||
var escaped = data.payload.replace(/\n/g, "\\n");
|
||||
return runEvalScript("imprint('" + escaped + "')")
|
||||
.then(function(result){
|
||||
log.warn("imprint: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.get_extension_version', function (data) {
|
||||
log.warn('Server called client route "get_extension_version":', data);
|
||||
return get_extension_version();
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.close', function (data) {
|
||||
log.warn('Server called client route "close":', data);
|
||||
return runEvalScript("close()");
|
||||
});
|
||||
|
||||
RPC.call('Photoshop.ping').then(function (data) {
|
||||
log.warn('Result for calling server route "ping": ', data);
|
||||
return runEvalScript("ping()")
|
||||
.then(function(result){
|
||||
log.warn("ping: " + result);
|
||||
return result;
|
||||
});
|
||||
|
||||
}, function (error) {
|
||||
log.warn(error);
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
log.warn("end script");
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
/*! loglevel - v1.6.8 - https://github.com/pimterry/loglevel - (c) 2020 Tim Perry - licensed MIT */
|
||||
!function(a,b){"use strict";"function"==typeof define&&define.amd?define(b):"object"==typeof module&&module.exports?module.exports=b():a.log=b()}(this,function(){"use strict";function a(a,b){var c=a[b];if("function"==typeof c.bind)return c.bind(a);try{return Function.prototype.bind.call(c,a)}catch(b){return function(){return Function.prototype.apply.apply(c,[a,arguments])}}}function b(){console.log&&(console.log.apply?console.log.apply(console,arguments):Function.prototype.apply.apply(console.log,[console,arguments])),console.trace&&console.trace()}function c(c){return"debug"===c&&(c="log"),typeof console!==i&&("trace"===c&&j?b:void 0!==console[c]?a(console,c):void 0!==console.log?a(console,"log"):h)}function d(a,b){for(var c=0;c<k.length;c++){var d=k[c];this[d]=c<a?h:this.methodFactory(d,a,b)}this.log=this.debug}function e(a,b,c){return function(){typeof console!==i&&(d.call(this,b,c),this[a].apply(this,arguments))}}function f(a,b,d){return c(a)||e.apply(this,arguments)}function g(a,b,c){function e(a){var b=(k[a]||"silent").toUpperCase();if(typeof window!==i){try{return void(window.localStorage[l]=b)}catch(a){}try{window.document.cookie=encodeURIComponent(l)+"="+b+";"}catch(a){}}}function g(){var a;if(typeof window!==i){try{a=window.localStorage[l]}catch(a){}if(typeof a===i)try{var b=window.document.cookie,c=b.indexOf(encodeURIComponent(l)+"=");-1!==c&&(a=/^([^;]+)/.exec(b.slice(c))[1])}catch(a){}return void 0===j.levels[a]&&(a=void 0),a}}var h,j=this,l="loglevel";a&&(l+=":"+a),j.name=a,j.levels={TRACE:0,DEBUG:1,INFO:2,WARN:3,ERROR:4,SILENT:5},j.methodFactory=c||f,j.getLevel=function(){return h},j.setLevel=function(b,c){if("string"==typeof b&&void 0!==j.levels[b.toUpperCase()]&&(b=j.levels[b.toUpperCase()]),!("number"==typeof b&&b>=0&&b<=j.levels.SILENT))throw"log.setLevel() called with invalid level: "+b;if(h=b,!1!==c&&e(b),d.call(j,b,a),typeof console===i&&b<j.levels.SILENT)return"No console available for logging"},j.setDefaultLevel=function(a){g()||j.setLevel(a,!1)},j.enableAll=function(a){j.setLevel(j.levels.TRACE,a)},j.disableAll=function(a){j.setLevel(j.levels.SILENT,a)};var m=g();null==m&&(m=null==b?"WARN":b),j.setLevel(m,!1)}var h=function(){},i="undefined",j=typeof window!==i&&typeof window.navigator!==i&&/Trident\/|MSIE /.test(window.navigator.userAgent),k=["trace","debug","info","warn","error"],l=new g,m={};l.getLogger=function(a){if("string"!=typeof a||""===a)throw new TypeError("You must supply a name when creating a logger.");var b=m[a];return b||(b=m[a]=new g(a,l.getLevel(),l.methodFactory)),b};var n=typeof window!==i?window.log:void 0;return l.noConflict=function(){return typeof window!==i&&window.log===l&&(window.log=n),l},l.getLoggers=function(){return m},l});
|
||||
|
|
@ -1,393 +0,0 @@
|
|||
(function (global, factory) {
|
||||
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
|
||||
typeof define === 'function' && define.amd ? define(factory) :
|
||||
(global = global || self, global.WSRPC = factory());
|
||||
}(this, function () { 'use strict';
|
||||
|
||||
function _classCallCheck(instance, Constructor) {
|
||||
if (!(instance instanceof Constructor)) {
|
||||
throw new TypeError("Cannot call a class as a function");
|
||||
}
|
||||
}
|
||||
|
||||
var Deferred = function Deferred() {
|
||||
_classCallCheck(this, Deferred);
|
||||
|
||||
var self = this;
|
||||
self.resolve = null;
|
||||
self.reject = null;
|
||||
self.done = false;
|
||||
|
||||
function wrapper(func) {
|
||||
return function () {
|
||||
if (self.done) throw new Error('Promise already done');
|
||||
self.done = true;
|
||||
return func.apply(this, arguments);
|
||||
};
|
||||
}
|
||||
|
||||
self.promise = new Promise(function (resolve, reject) {
|
||||
self.resolve = wrapper(resolve);
|
||||
self.reject = wrapper(reject);
|
||||
});
|
||||
|
||||
self.promise.isPending = function () {
|
||||
return !self.done;
|
||||
};
|
||||
|
||||
return self;
|
||||
};
|
||||
|
||||
function logGroup(group, level, args) {
|
||||
console.group(group);
|
||||
console[level].apply(this, args);
|
||||
console.groupEnd();
|
||||
}
|
||||
|
||||
function log() {
|
||||
if (!WSRPC.DEBUG) return;
|
||||
logGroup('WSRPC.DEBUG', 'trace', arguments);
|
||||
}
|
||||
|
||||
function trace(msg) {
|
||||
if (!WSRPC.TRACE) return;
|
||||
var payload = msg;
|
||||
if ('data' in msg) payload = JSON.parse(msg.data);
|
||||
logGroup("WSRPC.TRACE", 'trace', [payload]);
|
||||
}
|
||||
|
||||
function getAbsoluteWsUrl(url) {
|
||||
if (/^\w+:\/\//.test(url)) return url;
|
||||
if (typeof window == 'undefined' && window.location.host.length < 1) throw new Error("Can not construct absolute URL from ".concat(window.location));
|
||||
var scheme = window.location.protocol === "https:" ? "wss:" : "ws:";
|
||||
var port = window.location.port === '' ? ":".concat(window.location.port) : '';
|
||||
var host = window.location.host;
|
||||
var path = url.replace(/^\/+/gm, '');
|
||||
return "".concat(scheme, "//").concat(host).concat(port, "/").concat(path);
|
||||
}
|
||||
|
||||
var readyState = Object.freeze({
|
||||
0: 'CONNECTING',
|
||||
1: 'OPEN',
|
||||
2: 'CLOSING',
|
||||
3: 'CLOSED'
|
||||
});
|
||||
|
||||
var WSRPC = function WSRPC(URL) {
|
||||
var reconnectTimeout = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 1000;
|
||||
|
||||
_classCallCheck(this, WSRPC);
|
||||
|
||||
var self = this;
|
||||
URL = getAbsoluteWsUrl(URL);
|
||||
self.id = 1;
|
||||
self.eventId = 0;
|
||||
self.socketStarted = false;
|
||||
self.eventStore = {
|
||||
onconnect: {},
|
||||
onerror: {},
|
||||
onclose: {},
|
||||
onchange: {}
|
||||
};
|
||||
self.connectionNumber = 0;
|
||||
self.oneTimeEventStore = {
|
||||
onconnect: [],
|
||||
onerror: [],
|
||||
onclose: [],
|
||||
onchange: []
|
||||
};
|
||||
self.callQueue = [];
|
||||
|
||||
function createSocket() {
|
||||
var ws = new WebSocket(URL);
|
||||
|
||||
var rejectQueue = function rejectQueue() {
|
||||
self.connectionNumber++; // rejects incoming calls
|
||||
|
||||
var deferred; //reject all pending calls
|
||||
|
||||
while (0 < self.callQueue.length) {
|
||||
var callObj = self.callQueue.shift();
|
||||
deferred = self.store[callObj.id];
|
||||
delete self.store[callObj.id];
|
||||
|
||||
if (deferred && deferred.promise.isPending()) {
|
||||
deferred.reject('WebSocket error occurred');
|
||||
}
|
||||
} // reject all from the store
|
||||
|
||||
|
||||
for (var key in self.store) {
|
||||
if (!self.store.hasOwnProperty(key)) continue;
|
||||
deferred = self.store[key];
|
||||
|
||||
if (deferred && deferred.promise.isPending()) {
|
||||
deferred.reject('WebSocket error occurred');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
function reconnect(callEvents) {
|
||||
setTimeout(function () {
|
||||
try {
|
||||
self.socket = createSocket();
|
||||
self.id = 1;
|
||||
} catch (exc) {
|
||||
callEvents('onerror', exc);
|
||||
delete self.socket;
|
||||
console.error(exc);
|
||||
}
|
||||
}, reconnectTimeout);
|
||||
}
|
||||
|
||||
ws.onclose = function (err) {
|
||||
log('ONCLOSE CALLED', 'STATE', self.public.state());
|
||||
trace(err);
|
||||
|
||||
for (var serial in self.store) {
|
||||
if (!self.store.hasOwnProperty(serial)) continue;
|
||||
|
||||
if (self.store[serial].hasOwnProperty('reject')) {
|
||||
self.store[serial].reject('Connection closed');
|
||||
}
|
||||
}
|
||||
|
||||
rejectQueue();
|
||||
callEvents('onclose', err);
|
||||
callEvents('onchange', err);
|
||||
reconnect(callEvents);
|
||||
};
|
||||
|
||||
ws.onerror = function (err) {
|
||||
log('ONERROR CALLED', 'STATE', self.public.state());
|
||||
trace(err);
|
||||
rejectQueue();
|
||||
callEvents('onerror', err);
|
||||
callEvents('onchange', err);
|
||||
log('WebSocket has been closed by error: ', err);
|
||||
};
|
||||
|
||||
function tryCallEvent(func, event) {
|
||||
try {
|
||||
return func(event);
|
||||
} catch (e) {
|
||||
if (e.hasOwnProperty('stack')) {
|
||||
log(e.stack);
|
||||
} else {
|
||||
log('Event function', func, 'raised unknown error:', e);
|
||||
}
|
||||
|
||||
console.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
function callEvents(evName, event) {
|
||||
while (0 < self.oneTimeEventStore[evName].length) {
|
||||
var deferred = self.oneTimeEventStore[evName].shift();
|
||||
if (deferred.hasOwnProperty('resolve') && deferred.promise.isPending()) deferred.resolve();
|
||||
}
|
||||
|
||||
for (var i in self.eventStore[evName]) {
|
||||
if (!self.eventStore[evName].hasOwnProperty(i)) continue;
|
||||
var cur = self.eventStore[evName][i];
|
||||
tryCallEvent(cur, event);
|
||||
}
|
||||
}
|
||||
|
||||
ws.onopen = function (ev) {
|
||||
log('ONOPEN CALLED', 'STATE', self.public.state());
|
||||
trace(ev);
|
||||
|
||||
while (0 < self.callQueue.length) {
|
||||
// noinspection JSUnresolvedFunction
|
||||
self.socket.send(JSON.stringify(self.callQueue.shift(), 0, 1));
|
||||
}
|
||||
|
||||
callEvents('onconnect', ev);
|
||||
callEvents('onchange', ev);
|
||||
};
|
||||
|
||||
function handleCall(self, data) {
|
||||
if (!self.routes.hasOwnProperty(data.method)) throw new Error('Route not found');
|
||||
var connectionNumber = self.connectionNumber;
|
||||
var deferred = new Deferred();
|
||||
deferred.promise.then(function (result) {
|
||||
if (connectionNumber !== self.connectionNumber) return;
|
||||
self.socket.send(JSON.stringify({
|
||||
id: data.id,
|
||||
result: result
|
||||
}));
|
||||
}, function (error) {
|
||||
if (connectionNumber !== self.connectionNumber) return;
|
||||
self.socket.send(JSON.stringify({
|
||||
id: data.id,
|
||||
error: error
|
||||
}));
|
||||
});
|
||||
var func = self.routes[data.method];
|
||||
if (self.asyncRoutes[data.method]) return func.apply(deferred, [data.params]);
|
||||
|
||||
function badPromise() {
|
||||
throw new Error("You should register route with async flag.");
|
||||
}
|
||||
|
||||
var promiseMock = {
|
||||
resolve: badPromise,
|
||||
reject: badPromise
|
||||
};
|
||||
|
||||
try {
|
||||
deferred.resolve(func.apply(promiseMock, [data.params]));
|
||||
} catch (e) {
|
||||
deferred.reject(e);
|
||||
console.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
function handleError(self, data) {
|
||||
if (!self.store.hasOwnProperty(data.id)) return log('Unknown callback');
|
||||
var deferred = self.store[data.id];
|
||||
if (typeof deferred === 'undefined') return log('Confirmation without handler');
|
||||
delete self.store[data.id];
|
||||
log('REJECTING', data.error);
|
||||
deferred.reject(data.error);
|
||||
}
|
||||
|
||||
function handleResult(self, data) {
|
||||
var deferred = self.store[data.id];
|
||||
if (typeof deferred === 'undefined') return log('Confirmation without handler');
|
||||
delete self.store[data.id];
|
||||
|
||||
if (data.hasOwnProperty('result')) {
|
||||
return deferred.resolve(data.result);
|
||||
}
|
||||
|
||||
return deferred.reject(data.error);
|
||||
}
|
||||
|
||||
ws.onmessage = function (message) {
|
||||
log('ONMESSAGE CALLED', 'STATE', self.public.state());
|
||||
trace(message);
|
||||
if (message.type !== 'message') return;
|
||||
var data;
|
||||
|
||||
try {
|
||||
data = JSON.parse(message.data);
|
||||
log(data);
|
||||
|
||||
if (data.hasOwnProperty('method')) {
|
||||
return handleCall(self, data);
|
||||
} else if (data.hasOwnProperty('error') && data.error === null) {
|
||||
return handleError(self, data);
|
||||
} else {
|
||||
return handleResult(self, data);
|
||||
}
|
||||
} catch (exception) {
|
||||
var err = {
|
||||
error: exception.message,
|
||||
result: null,
|
||||
id: data ? data.id : null
|
||||
};
|
||||
self.socket.send(JSON.stringify(err));
|
||||
console.error(exception);
|
||||
}
|
||||
};
|
||||
|
||||
return ws;
|
||||
}
|
||||
|
||||
function makeCall(func, args, params) {
|
||||
self.id += 2;
|
||||
var deferred = new Deferred();
|
||||
var callObj = Object.freeze({
|
||||
id: self.id,
|
||||
method: func,
|
||||
params: args
|
||||
});
|
||||
var state = self.public.state();
|
||||
|
||||
if (state === 'OPEN') {
|
||||
self.store[self.id] = deferred;
|
||||
self.socket.send(JSON.stringify(callObj));
|
||||
} else if (state === 'CONNECTING') {
|
||||
log('SOCKET IS', state);
|
||||
self.store[self.id] = deferred;
|
||||
self.callQueue.push(callObj);
|
||||
} else {
|
||||
log('SOCKET IS', state);
|
||||
|
||||
if (params && params['noWait']) {
|
||||
deferred.reject("Socket is: ".concat(state));
|
||||
} else {
|
||||
self.store[self.id] = deferred;
|
||||
self.callQueue.push(callObj);
|
||||
}
|
||||
}
|
||||
|
||||
return deferred.promise;
|
||||
}
|
||||
|
||||
self.asyncRoutes = {};
|
||||
self.routes = {};
|
||||
self.store = {};
|
||||
self.public = Object.freeze({
|
||||
call: function call(func, args, params) {
|
||||
return makeCall(func, args, params);
|
||||
},
|
||||
addRoute: function addRoute(route, callback, isAsync) {
|
||||
self.asyncRoutes[route] = isAsync || false;
|
||||
self.routes[route] = callback;
|
||||
},
|
||||
deleteRoute: function deleteRoute(route) {
|
||||
delete self.asyncRoutes[route];
|
||||
return delete self.routes[route];
|
||||
},
|
||||
addEventListener: function addEventListener(event, func) {
|
||||
var eventId = self.eventId++;
|
||||
self.eventStore[event][eventId] = func;
|
||||
return eventId;
|
||||
},
|
||||
removeEventListener: function removeEventListener(event, index) {
|
||||
if (self.eventStore[event].hasOwnProperty(index)) {
|
||||
delete self.eventStore[event][index];
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
},
|
||||
onEvent: function onEvent(event) {
|
||||
var deferred = new Deferred();
|
||||
self.oneTimeEventStore[event].push(deferred);
|
||||
return deferred.promise;
|
||||
},
|
||||
destroy: function destroy() {
|
||||
return self.socket.close();
|
||||
},
|
||||
state: function state() {
|
||||
return readyState[this.stateCode()];
|
||||
},
|
||||
stateCode: function stateCode() {
|
||||
if (self.socketStarted && self.socket) return self.socket.readyState;
|
||||
return 3;
|
||||
},
|
||||
connect: function connect() {
|
||||
self.socketStarted = true;
|
||||
self.socket = createSocket();
|
||||
}
|
||||
});
|
||||
self.public.addRoute('log', function (argsObj) {
|
||||
//console.info("Websocket sent: ".concat(argsObj));
|
||||
});
|
||||
self.public.addRoute('ping', function (data) {
|
||||
return data;
|
||||
});
|
||||
return self.public;
|
||||
};
|
||||
|
||||
WSRPC.DEBUG = false;
|
||||
WSRPC.TRACE = false;
|
||||
|
||||
return WSRPC;
|
||||
|
||||
}));
|
||||
//# sourceMappingURL=wsrpc.js.map
|
||||
File diff suppressed because one or more lines are too long
|
|
@ -1,774 +0,0 @@
|
|||
/*
|
||||
_ ______ __ _
|
||||
| / ___\ \/ / (_)___
|
||||
_ | \___ \\ / | / __|
|
||||
| |_| |___) / \ _ | \__ \
|
||||
\___/|____/_/\_(_)/ |___/
|
||||
|__/
|
||||
_ ____
|
||||
/\ /\___ _ __ ___(_) ___ _ __ |___ \
|
||||
\ \ / / _ \ '__/ __| |/ _ \| '_ \ __) |
|
||||
\ V / __/ | \__ \ | (_) | | | | / __/
|
||||
\_/ \___|_| |___/_|\___/|_| |_| |_____|
|
||||
*/
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
// JSX.js © and writtent by Trevor https://creative-scripts.com/jsx-js //
|
||||
// If you turn over is less the $50,000,000 then you don't have to pay anything //
|
||||
// License MIT, don't complain, don't sue NO MATTER WHAT //
|
||||
// If you turn over is more the $50,000,000 then you DO have to pay //
|
||||
// Contact me https://creative-scripts.com/contact for pricing and licensing //
|
||||
// Don't remove these commented lines //
|
||||
// For simple and effective calling of jsx from the js engine //
|
||||
// Version 2 last modified April 18 2018 //
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Change log: //
|
||||
// JSX.js V2 is now independent of NodeJS and CSInterface.js <span class="wp-font-emots-emo-happy"></span> //
|
||||
// forceEval is now by default true //
|
||||
// It wraps the scripts in a try catch and an eval providing useful error handling //
|
||||
// One can set in the jsx engine $.includeStack = true to return the call stack in the event of an error //
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// JSX.js for calling jsx code from the js engine //
|
||||
// 2 methods included //
|
||||
// 1) jsx.evalScript AKA jsx.eval //
|
||||
// 2) jsx.evalFile AKA jsx.file //
|
||||
// Special features //
|
||||
// 1) Allows all changes in your jsx code to be reloaded into your extension at the click of a button //
|
||||
// 2) Can enable the $.fileName property to work and provides a $.__fileName() method as an alternative //
|
||||
// 3) Can force a callBack result from InDesign //
|
||||
// 4) No more csInterface.evalScript('alert("hello "' + title + " " + name + '");') //
|
||||
// use jsx.evalScript('alert("hello __title__ __name__");', {title: title, name: name}); //
|
||||
// 5) execute jsx files from your jsx folder like this jsx.evalFile('myFabJsxScript.jsx'); //
|
||||
// or from a relative path jsx.evalFile('../myFabScripts/myFabJsxScript.jsx'); //
|
||||
// or from an absolute url jsx.evalFile('/Path/to/my/FabJsxScript.jsx'); (mac) //
|
||||
// or from an absolute url jsx.evalFile('C:Path/to/my/FabJsxScript.jsx'); (windows) //
|
||||
// 6) Parameter can be entered in the from of a parameter list which can be in any order or as an object //
|
||||
// 7) Not camelCase sensitive (very useful for the illiterate) //
|
||||
// <span class="wp-font-emots-emo-sunglasses"></span> Dead easy to use BUT SPEND THE 3 TO 5 MINUTES IT SHOULD TAKE TO READ THE INSTRUCTIONS //
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/* jshint undef:true, unused:true, esversion:6 */
|
||||
|
||||
//////////////////////////////////////
|
||||
// jsx is the interface for the API //
|
||||
//////////////////////////////////////
|
||||
|
||||
var jsx;
|
||||
|
||||
// Wrap everything in an anonymous function to prevent leeks
|
||||
(function() {
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// Substitute some CSInterface functions to avoid dependency on it //
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
var __dirname = (function() {
|
||||
var path, isMac;
|
||||
path = decodeURI(window.__adobe_cep__.getSystemPath('extension'));
|
||||
isMac = navigator.platform[0] === 'M'; // [M]ac
|
||||
path = path.replace('file://' + (isMac ? '' : '/'), '');
|
||||
return path;
|
||||
})();
|
||||
|
||||
var evalScript = function(script, callback) {
|
||||
callback = callback || function() {};
|
||||
window.__adobe_cep__.evalScript(script, callback);
|
||||
};
|
||||
|
||||
|
||||
////////////////////////////////////////////
|
||||
// In place of using the node path module //
|
||||
////////////////////////////////////////////
|
||||
|
||||
// jshint undef: true, unused: true
|
||||
|
||||
// A very minified version of the NodeJs Path module!!
|
||||
// For use outside of NodeJs
|
||||
// Majorly nicked by Trevor from Joyent
|
||||
var path = (function() {
|
||||
|
||||
var isString = function(arg) {
|
||||
return typeof arg === 'string';
|
||||
};
|
||||
|
||||
// var isObject = function(arg) {
|
||||
// return typeof arg === 'object' && arg !== null;
|
||||
// };
|
||||
|
||||
var basename = function(path) {
|
||||
if (!isString(path)) {
|
||||
throw new TypeError('Argument to path.basename must be a string');
|
||||
}
|
||||
var bits = path.split(/[\/\\]/g);
|
||||
return bits[bits.length - 1];
|
||||
};
|
||||
|
||||
// jshint undef: true
|
||||
// Regex to split a windows path into three parts: [*, device, slash,
|
||||
// tail] windows-only
|
||||
var splitDeviceRe =
|
||||
/^([a-zA-Z]:|[\\\/]{2}[^\\\/]+[\\\/]+[^\\\/]+)?([\\\/])?([\s\S]*?)$/;
|
||||
|
||||
// Regex to split the tail part of the above into [*, dir, basename, ext]
|
||||
// var splitTailRe =
|
||||
// /^([\s\S]*?)((?:\.{1,2}|[^\\\/]+?|)(\.[^.\/\\]*|))(?:[\\\/]*)$/;
|
||||
|
||||
var win32 = {};
|
||||
// Function to split a filename into [root, dir, basename, ext]
|
||||
// var win32SplitPath = function(filename) {
|
||||
// // Separate device+slash from tail
|
||||
// var result = splitDeviceRe.exec(filename),
|
||||
// device = (result[1] || '') + (result[2] || ''),
|
||||
// tail = result[3] || '';
|
||||
// // Split the tail into dir, basename and extension
|
||||
// var result2 = splitTailRe.exec(tail),
|
||||
// dir = result2[1],
|
||||
// basename = result2[2],
|
||||
// ext = result2[3];
|
||||
// return [device, dir, basename, ext];
|
||||
// };
|
||||
|
||||
var win32StatPath = function(path) {
|
||||
var result = splitDeviceRe.exec(path),
|
||||
device = result[1] || '',
|
||||
isUnc = !!device && device[1] !== ':';
|
||||
return {
|
||||
device: device,
|
||||
isUnc: isUnc,
|
||||
isAbsolute: isUnc || !!result[2], // UNC paths are always absolute
|
||||
tail: result[3]
|
||||
};
|
||||
};
|
||||
|
||||
var normalizeUNCRoot = function(device) {
|
||||
return '\\\\' + device.replace(/^[\\\/]+/, '').replace(/[\\\/]+/g, '\\');
|
||||
};
|
||||
|
||||
var normalizeArray = function(parts, allowAboveRoot) {
|
||||
var res = [];
|
||||
for (var i = 0; i < parts.length; i++) {
|
||||
var p = parts[i];
|
||||
|
||||
// ignore empty parts
|
||||
if (!p || p === '.')
|
||||
continue;
|
||||
|
||||
if (p === '..') {
|
||||
if (res.length && res[res.length - 1] !== '..') {
|
||||
res.pop();
|
||||
} else if (allowAboveRoot) {
|
||||
res.push('..');
|
||||
}
|
||||
} else {
|
||||
res.push(p);
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
};
|
||||
|
||||
win32.normalize = function(path) {
|
||||
var result = win32StatPath(path),
|
||||
device = result.device,
|
||||
isUnc = result.isUnc,
|
||||
isAbsolute = result.isAbsolute,
|
||||
tail = result.tail,
|
||||
trailingSlash = /[\\\/]$/.test(tail);
|
||||
|
||||
// Normalize the tail path
|
||||
tail = normalizeArray(tail.split(/[\\\/]+/), !isAbsolute).join('\\');
|
||||
|
||||
if (!tail && !isAbsolute) {
|
||||
tail = '.';
|
||||
}
|
||||
if (tail && trailingSlash) {
|
||||
tail += '\\';
|
||||
}
|
||||
|
||||
// Convert slashes to backslashes when `device` points to an UNC root.
|
||||
// Also squash multiple slashes into a single one where appropriate.
|
||||
if (isUnc) {
|
||||
device = normalizeUNCRoot(device);
|
||||
}
|
||||
|
||||
return device + (isAbsolute ? '\\' : '') + tail;
|
||||
};
|
||||
win32.join = function() {
|
||||
var paths = [];
|
||||
for (var i = 0; i < arguments.length; i++) {
|
||||
var arg = arguments[i];
|
||||
if (!isString(arg)) {
|
||||
throw new TypeError('Arguments to path.join must be strings');
|
||||
}
|
||||
if (arg) {
|
||||
paths.push(arg);
|
||||
}
|
||||
}
|
||||
|
||||
var joined = paths.join('\\');
|
||||
|
||||
// Make sure that the joined path doesn't start with two slashes, because
|
||||
// normalize() will mistake it for an UNC path then.
|
||||
//
|
||||
// This step is skipped when it is very clear that the user actually
|
||||
// intended to point at an UNC path. This is assumed when the first
|
||||
// non-empty string arguments starts with exactly two slashes followed by
|
||||
// at least one more non-slash character.
|
||||
//
|
||||
// Note that for normalize() to treat a path as an UNC path it needs to
|
||||
// have at least 2 components, so we don't filter for that here.
|
||||
// This means that the user can use join to construct UNC paths from
|
||||
// a server name and a share name; for example:
|
||||
// path.join('//server', 'share') -> '\\\\server\\share\')
|
||||
if (!/^[\\\/]{2}[^\\\/]/.test(paths[0])) {
|
||||
joined = joined.replace(/^[\\\/]{2,}/, '\\');
|
||||
}
|
||||
return win32.normalize(joined);
|
||||
};
|
||||
|
||||
var posix = {};
|
||||
|
||||
// posix version
|
||||
posix.join = function() {
|
||||
var path = '';
|
||||
for (var i = 0; i < arguments.length; i++) {
|
||||
var segment = arguments[i];
|
||||
if (!isString(segment)) {
|
||||
throw new TypeError('Arguments to path.join must be strings');
|
||||
}
|
||||
if (segment) {
|
||||
if (!path) {
|
||||
path += segment;
|
||||
} else {
|
||||
path += '/' + segment;
|
||||
}
|
||||
}
|
||||
}
|
||||
return posix.normalize(path);
|
||||
};
|
||||
|
||||
// path.normalize(path)
|
||||
// posix version
|
||||
posix.normalize = function(path) {
|
||||
var isAbsolute = path.charAt(0) === '/',
|
||||
trailingSlash = path && path[path.length - 1] === '/';
|
||||
|
||||
// Normalize the path
|
||||
path = normalizeArray(path.split('/'), !isAbsolute).join('/');
|
||||
|
||||
if (!path && !isAbsolute) {
|
||||
path = '.';
|
||||
}
|
||||
if (path && trailingSlash) {
|
||||
path += '/';
|
||||
}
|
||||
|
||||
return (isAbsolute ? '/' : '') + path;
|
||||
};
|
||||
|
||||
win32.basename = posix.basename = basename;
|
||||
|
||||
this.win32 = win32;
|
||||
this.posix = posix;
|
||||
return (navigator.platform[0] === 'M') ? posix : win32;
|
||||
})();
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// The is the "main" function which is to be prototyped //
|
||||
// It run a small snippet in the jsx engine that //
|
||||
// 1) Assigns $.__dirname with the value of the extensions __dirname base path //
|
||||
// 2) Sets up a method $.__fileName() for retrieving from within the jsx script it's $.fileName value //
|
||||
// more on that method later //
|
||||
// At the end of the script the global declaration jsx = new Jsx(); has been made. //
|
||||
// If you like you can remove that and include in your relevant functions //
|
||||
// var jsx = new Jsx(); You would never call the Jsx function without the "new" declaration //
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
var Jsx = function() {
|
||||
var jsxScript;
|
||||
// Setup jsx function to enable the jsx scripts to easily retrieve their file location
|
||||
jsxScript = [
|
||||
'$.level = 0;',
|
||||
'if(!$.__fileNames){',
|
||||
' $.__fileNames = {};',
|
||||
' $.__dirname = "__dirname__";'.replace('__dirname__', __dirname),
|
||||
' $.__fileName = function(name){',
|
||||
' name = name || $.fileName;',
|
||||
' return ($.__fileNames && $.__fileNames[name]) || $.fileName;',
|
||||
' };',
|
||||
'}'
|
||||
].join('');
|
||||
evalScript(jsxScript);
|
||||
return this;
|
||||
};
|
||||
|
||||
/**
|
||||
* [evalScript] For calling jsx scripts from the js engine
|
||||
*
|
||||
* The jsx.evalScript method is used for calling jsx scripts directly from the js engine
|
||||
* Allows for easy replacement i.e. variable insertions and for forcing eval.
|
||||
* For convenience jsx.eval or jsx.script or jsx.evalscript can be used instead of calling jsx.evalScript
|
||||
*
|
||||
* @param {String} jsxScript
|
||||
* The string that makes up the jsx script
|
||||
* it can contain a simple template like syntax for replacements
|
||||
* 'alert("__foo__");'
|
||||
* the __foo__ will be replaced as per the replacements parameter
|
||||
*
|
||||
* @param {Function} callback
|
||||
* The callback function you want the jsx script to trigger on completion
|
||||
* The result of the jsx script is passed as the argument to that function
|
||||
* The function can exist in some other file.
|
||||
* Note that InDesign does not automatically pass the callBack as a string.
|
||||
* Either write your InDesign in a way that it returns a sting the form of
|
||||
* return 'this is my result surrounded by quotes'
|
||||
* or use the force eval option
|
||||
* [Optional DEFAULT no callBack]
|
||||
*
|
||||
* @param {Object} replacements
|
||||
* The replacements to make on the jsx script
|
||||
* given the following script (template)
|
||||
* 'alert("__message__: " + __val__);'
|
||||
* and we want to change the script to
|
||||
* 'alert("I was born in the year: " + 1234);'
|
||||
* we would pass the following object
|
||||
* {"message": 'I was born in the year', "val": 1234}
|
||||
* or if not using reserved words like do we can leave out the key quotes
|
||||
* {message: 'I was born in the year', val: 1234}
|
||||
* [Optional DEFAULT no replacements]
|
||||
*
|
||||
* @param {Bolean} forceEval
|
||||
* If the script should be wrapped in an eval and try catch
|
||||
* This will 1) provide useful error feedback if heaven forbid it is needed
|
||||
* 2) The result will be a string which is required for callback results in InDesign
|
||||
* [Optional DEFAULT true]
|
||||
*
|
||||
* Note 1) The order of the parameters is irrelevant
|
||||
* Note 2) One can pass the arguments as an object if desired
|
||||
* jsx.evalScript(myCallBackFunction, 'alert("__myMessage__");', true);
|
||||
* is the same as
|
||||
* jsx.evalScript({
|
||||
* script: 'alert("__myMessage__");',
|
||||
* replacements: {myMessage: 'Hi there'},
|
||||
* callBack: myCallBackFunction,
|
||||
* eval: true
|
||||
* });
|
||||
* note that either lower or camelCase key names are valid
|
||||
* i.e. both callback or callBack will work
|
||||
*
|
||||
* The following keys are the same jsx || script || jsxScript || jsxscript || file
|
||||
* The following keys are the same callBack || callback
|
||||
* The following keys are the same replacements || replace
|
||||
* The following keys are the same eval || forceEval || forceeval
|
||||
* The following keys are the same forceEvalScript || forceevalscript || evalScript || evalscript;
|
||||
*
|
||||
* @return {Boolean} if the jsxScript was executed or not
|
||||
*/
|
||||
|
||||
Jsx.prototype.evalScript = function() {
|
||||
var arg, i, key, replaceThis, withThis, args, callback, forceEval, replacements, jsxScript, isBin;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
// sort out order which arguments into jsxScript, callback, replacements, forceEval //
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
args = arguments;
|
||||
|
||||
// Detect if the parameters were passed as an object and if so allow for various keys
|
||||
if (args.length === 1 && (arg = args[0]) instanceof Object) {
|
||||
jsxScript = arg.jsxScript || arg.jsx || arg.script || arg.file || arg.jsxscript;
|
||||
callback = arg.callBack || arg.callback;
|
||||
replacements = arg.replacements || arg.replace;
|
||||
forceEval = arg.eval || arg.forceEval || arg.forceeval;
|
||||
} else {
|
||||
for (i = 0; i < 4; i++) {
|
||||
arg = args[i];
|
||||
if (arg === undefined) {
|
||||
continue;
|
||||
}
|
||||
if (arg.constructor === String) {
|
||||
jsxScript = arg;
|
||||
continue;
|
||||
}
|
||||
if (arg.constructor === Object) {
|
||||
replacements = arg;
|
||||
continue;
|
||||
}
|
||||
if (arg.constructor === Function) {
|
||||
callback = arg;
|
||||
continue;
|
||||
}
|
||||
if (arg === false) {
|
||||
forceEval = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no script provide then not too much to do!
|
||||
if (!jsxScript) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Have changed the forceEval default to be true as I prefer the error handling
|
||||
if (forceEval !== false) {
|
||||
forceEval = true;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// On Illustrator and other apps the result of the jsx script is automatically passed as a string //
|
||||
// if you have a "script" containing the single number 1 and nothing else then the callBack will register as "1" //
|
||||
// On InDesign that same script will provide a blank callBack //
|
||||
// Let's say we have a callBack function var callBack = function(result){alert(result);} //
|
||||
// On Ai your see the 1 in the alert //
|
||||
// On ID your just see a blank alert //
|
||||
// To see the 1 in the alert you need to convert the result to a string and then it will show //
|
||||
// So if we rewrite out 1 byte script to '1' i.e. surround the 1 in quotes then the call back alert will show 1 //
|
||||
// If the scripts planed one can make sure that the results always passed as a string (including errors) //
|
||||
// otherwise one can wrap the script in an eval and then have the result passed as a string //
|
||||
// I have not gone through all the apps but can say //
|
||||
// for Ai you never need to set the forceEval to true //
|
||||
// for ID you if you have not coded your script appropriately and your want to send a result to the callBack then set forceEval to true //
|
||||
// I changed this that even on Illustrator it applies the try catch, Note the try catch will fail if $.level is set to 1 //
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
if (forceEval) {
|
||||
|
||||
isBin = (jsxScript.substring(0, 10) === '@JSXBIN@ES') ? '' : '\n';
|
||||
jsxScript = (
|
||||
// "\n''') + '';} catch(e){(function(e){var n, a=[]; for (n in e){a.push(n + ': ' + e[n])}; return a.join('\n')})(e)}");
|
||||
// "\n''') + '';} catch(e){e + (e.line ? ('\\nLine ' + (+e.line - 1)) : '')}");
|
||||
[
|
||||
"$.level = 0;",
|
||||
"try{eval('''" + isBin, // need to add an extra line otherwise #targetengine doesn't work ;-]
|
||||
jsxScript.replace(/\\/g, '\\\\').replace(/'/g, "\\'").replace(/"/g, '\\"') + "\n''') + '';",
|
||||
"} catch (e) {",
|
||||
" (function(e) {",
|
||||
" var line, sourceLine, name, description, ErrorMessage, fileName, start, end, bug;",
|
||||
" line = +e.line" + (isBin === '' ? ';' : ' - 1;'), // To take into account the extra line added
|
||||
" fileName = File(e.fileName).fsName;",
|
||||
" sourceLine = line && e.source.split(/[\\r\\n]/)[line];",
|
||||
" name = e.name;",
|
||||
" description = e.description;",
|
||||
" ErrorMessage = name + ' ' + e.number + ': ' + description;",
|
||||
" if (fileName.length && !(/[\\/\\\\]\\d+$/.test(fileName))) {",
|
||||
" ErrorMessage += '\\nFile: ' + fileName;",
|
||||
" line++;",
|
||||
" }",
|
||||
" if (line){",
|
||||
" ErrorMessage += '\\nLine: ' + line +",
|
||||
" '-> ' + ((sourceLine.length < 300) ? sourceLine : sourceLine.substring(0,300) + '...');",
|
||||
" }",
|
||||
" if (e.start) {ErrorMessage += '\\nBug: ' + e.source.substring(e.start - 1, e.end)}",
|
||||
" if ($.includeStack) {ErrorMessage += '\\nStack:' + $.stack;}",
|
||||
" return ErrorMessage;",
|
||||
" })(e);",
|
||||
"}"
|
||||
].join('')
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// deal with the replacements //
|
||||
// Note it's probably better to use ${template} `literals` //
|
||||
/////////////////////////////////////////////////////////////
|
||||
|
||||
if (replacements) {
|
||||
for (key in replacements) {
|
||||
if (replacements.hasOwnProperty(key)) {
|
||||
replaceThis = new RegExp('__' + key + '__', 'g');
|
||||
withThis = replacements[key];
|
||||
jsxScript = jsxScript.replace(replaceThis, withThis + '');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
evalScript(jsxScript, callback);
|
||||
return true;
|
||||
} catch (err) {
|
||||
////////////////////////////////////////////////
|
||||
// Do whatever error handling you want here ! //
|
||||
////////////////////////////////////////////////
|
||||
var newErr;
|
||||
newErr = new Error(err);
|
||||
alert('Error Eek: ' + newErr.stack);
|
||||
return false;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* [evalFile] For calling jsx scripts from the js engine
|
||||
*
|
||||
* The jsx.evalFiles method is used for executing saved jsx scripts
|
||||
* where the jsxScript parameter is a string of the jsx scripts file location.
|
||||
* For convenience jsx.file or jsx.evalfile can be used instead of jsx.evalFile
|
||||
*
|
||||
* @param {String} file
|
||||
* The path to jsx script
|
||||
* If only the base name is provided then the path will be presumed to be the
|
||||
* To execute files stored in the jsx folder located in the __dirname folder use
|
||||
* jsx.evalFile('myFabJsxScript.jsx');
|
||||
* To execute files stored in the a folder myFabScripts located in the __dirname folder use
|
||||
* jsx.evalFile('./myFabScripts/myFabJsxScript.jsx');
|
||||
* To execute files stored in the a folder myFabScripts located at an absolute url use
|
||||
* jsx.evalFile('/Path/to/my/FabJsxScript.jsx'); (mac)
|
||||
* or jsx.evalFile('C:Path/to/my/FabJsxScript.jsx'); (windows)
|
||||
*
|
||||
* @param {Function} callback
|
||||
* The callback function you want the jsx script to trigger on completion
|
||||
* The result of the jsx script is passed as the argument to that function
|
||||
* The function can exist in some other file.
|
||||
* Note that InDesign does not automatically pass the callBack as a string.
|
||||
* Either write your InDesign in a way that it returns a sting the form of
|
||||
* return 'this is my result surrounded by quotes'
|
||||
* or use the force eval option
|
||||
* [Optional DEFAULT no callBack]
|
||||
*
|
||||
* @param {Object} replacements
|
||||
* The replacements to make on the jsx script
|
||||
* give the following script (template)
|
||||
* 'alert("__message__: " + __val__);'
|
||||
* and we want to change the script to
|
||||
* 'alert("I was born in the year: " + 1234);'
|
||||
* we would pass the following object
|
||||
* {"message": 'I was born in the year', "val": 1234}
|
||||
* or if not using reserved words like do we can leave out the key quotes
|
||||
* {message: 'I was born in the year', val: 1234}
|
||||
* By default when possible the forceEvalScript will be set to true
|
||||
* The forceEvalScript option cannot be true when there are replacements
|
||||
* To force the forceEvalScript to be false you can send a blank set of replacements
|
||||
* jsx.evalFile('myFabScript.jsx', {}); Will NOT be executed using the $.evalScript method
|
||||
* jsx.evalFile('myFabScript.jsx'); Will YES be executed using the $.evalScript method
|
||||
* see the forceEvalScript parameter for details on this
|
||||
* [Optional DEFAULT no replacements]
|
||||
*
|
||||
* @param {Bolean} forceEval
|
||||
* If the script should be wrapped in an eval and try catch
|
||||
* This will 1) provide useful error feedback if heaven forbid it is needed
|
||||
* 2) The result will be a string which is required for callback results in InDesign
|
||||
* [Optional DEFAULT true]
|
||||
*
|
||||
* If no replacements are needed then the jsx script is be executed by using the $.evalFile method
|
||||
* This exposes the true value of the $.fileName property <span class="wp-font-emots-emo-sunglasses"></span>
|
||||
* In such a case it's best to avoid using the $.__fileName() with no base name as it won't work
|
||||
* BUT one can still use the $.__fileName('baseName') method which is more accurate than the standard $.fileName property <span class="wp-font-emots-emo-happy"></span>
|
||||
* Let's say you have a Drive called "Graphics" AND YOU HAVE a root folder on your "main" drive called "Graphics"
|
||||
* You call a script jsx.evalFile('/Volumes/Graphics/myFabScript.jsx');
|
||||
* $.fileName will give you '/Graphics/myFabScript.jsx' which is wrong
|
||||
* $.__fileName('myFabScript.jsx') will give you '/Volumes/Graphics/myFabScript.jsx' which is correct
|
||||
* $.__fileName() will not give you a reliable result
|
||||
* Note that if your calling multiple versions of myFabScript.jsx stored in multiple folders then you can get stuffed!
|
||||
* i.e. if the fileName is important to you then don't do that.
|
||||
* It also will force the result of the jsx file as a string which is particularly useful for InDesign callBacks
|
||||
*
|
||||
* Note 1) The order of the parameters is irrelevant
|
||||
* Note 2) One can pass the arguments as an object if desired
|
||||
* jsx.evalScript(myCallBackFunction, 'alert("__myMessage__");', true);
|
||||
* is the same as
|
||||
* jsx.evalScript({
|
||||
* script: 'alert("__myMessage__");',
|
||||
* replacements: {myMessage: 'Hi there'},
|
||||
* callBack: myCallBackFunction,
|
||||
* eval: false,
|
||||
* });
|
||||
* note that either lower or camelCase key names or valid
|
||||
* i.e. both callback or callBack will work
|
||||
*
|
||||
* The following keys are the same file || jsx || script || jsxScript || jsxscript
|
||||
* The following keys are the same callBack || callback
|
||||
* The following keys are the same replacements || replace
|
||||
* The following keys are the same eval || forceEval || forceeval
|
||||
*
|
||||
* @return {Boolean} if the jsxScript was executed or not
|
||||
*/
|
||||
|
||||
Jsx.prototype.evalFile = function() {
|
||||
var arg, args, callback, fileName, fileNameScript, forceEval, forceEvalScript,
|
||||
i, jsxFolder, jsxScript, newLine, replacements, success;
|
||||
|
||||
success = true; // optimistic <span class="wp-font-emots-emo-happy"></span>
|
||||
args = arguments;
|
||||
|
||||
jsxFolder = path.join(__dirname, 'jsx');
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// $.fileName does not return it's correct path in the jsx engine for files called from the js engine //
|
||||
// In Illustrator it returns an integer in InDesign it returns an empty string //
|
||||
// This script injection allows for the script to know it's path by calling //
|
||||
// $.__fileName(); //
|
||||
// on Illustrator this works pretty well //
|
||||
// on InDesign it's best to use with a bit of care //
|
||||
// If the a second script has been called the InDesing will "forget" the path to the first script //
|
||||
// 2 work-arounds for this //
|
||||
// 1) at the beginning of your script add var thePathToMeIs = $.fileName(); //
|
||||
// thePathToMeIs will not be forgotten after running the second script //
|
||||
// 2) $.__fileName('myBaseName.jsx'); //
|
||||
// for example you have file with the following path //
|
||||
// /path/to/me.jsx //
|
||||
// Call $.__fileName('me.jsx') and you will get /path/to/me.jsx even after executing a second script //
|
||||
// Note When the forceEvalScript option is used then you just use the regular $.fileName property //
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
fileNameScript = [
|
||||
// The if statement should not normally be executed
|
||||
'if(!$.__fileNames){',
|
||||
' $.__fileNames = {};',
|
||||
' $.__dirname = "__dirname__";'.replace('__dirname__', __dirname),
|
||||
' $.__fileName = function(name){',
|
||||
' name = name || $.fileName;',
|
||||
' return ($.__fileNames && $.__fileNames[name]) || $.fileName;',
|
||||
' };',
|
||||
'}',
|
||||
'$.__fileNames["__basename__"] = $.__fileNames["" + $.fileName] = "__fileName__";'
|
||||
].join('');
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
// sort out order which arguments into jsxScript, callback, replacements, forceEval //
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
// Detect if the parameters were passed as an object and if so allow for various keys
|
||||
if (args.length === 1 && (arg = args[0]) instanceof Object) {
|
||||
jsxScript = arg.jsxScript || arg.jsx || arg.script || arg.file || arg.jsxscript;
|
||||
callback = arg.callBack || arg.callback;
|
||||
replacements = arg.replacements || arg.replace;
|
||||
forceEval = arg.eval || arg.forceEval || arg.forceeval;
|
||||
} else {
|
||||
for (i = 0; i < 5; i++) {
|
||||
arg = args[i];
|
||||
if (arg === undefined) {
|
||||
continue;
|
||||
}
|
||||
if (arg.constructor.name === 'String') {
|
||||
jsxScript = arg;
|
||||
continue;
|
||||
}
|
||||
if (arg.constructor.name === 'Object') {
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// If no replacements are provided then the $.evalScript method will be used //
|
||||
// This will allow directly for the $.fileName property to be used //
|
||||
// If one does not want the $.evalScript method to be used then //
|
||||
// either send a blank object as the replacements {} //
|
||||
// or explicitly set the forceEvalScript option to false //
|
||||
// This can only be done if the parameters are passed as an object //
|
||||
// i.e. jsx.evalFile({file:'myFabScript.jsx', forceEvalScript: false}); //
|
||||
// if the file was called using //
|
||||
// i.e. jsx.evalFile('myFabScript.jsx'); //
|
||||
// then the following jsx code is called $.evalFile(new File('Path/to/myFabScript.jsx', 10000000000)) + ''; //
|
||||
// forceEval is never needed if the forceEvalScript is triggered //
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
replacements = arg;
|
||||
continue;
|
||||
}
|
||||
if (arg.constructor === Function) {
|
||||
callback = arg;
|
||||
continue;
|
||||
}
|
||||
if (arg === false) {
|
||||
forceEval = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no script provide then not too much to do!
|
||||
if (!jsxScript) {
|
||||
return false;
|
||||
}
|
||||
|
||||
forceEvalScript = !replacements;
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// Get path of script //
|
||||
// Check if it's literal, relative or in jsx folder //
|
||||
//////////////////////////////////////////////////////
|
||||
|
||||
if (/^\/|[a-zA-Z]+:/.test(jsxScript)) { // absolute path Mac | Windows
|
||||
jsxScript = path.normalize(jsxScript);
|
||||
} else if (/^\.+\//.test(jsxScript)) {
|
||||
jsxScript = path.join(__dirname, jsxScript); // relative path
|
||||
} else {
|
||||
jsxScript = path.join(jsxFolder, jsxScript); // files in the jsxFolder
|
||||
}
|
||||
|
||||
if (forceEvalScript) {
|
||||
jsxScript = jsxScript.replace(/"/g, '\\"');
|
||||
// Check that the path exist, should change this to asynchronous at some point
|
||||
if (!window.cep.fs.stat(jsxScript).err) {
|
||||
jsxScript = fileNameScript.replace(/__fileName__/, jsxScript).replace(/__basename__/, path.basename(jsxScript)) +
|
||||
'$.evalFile(new File("' + jsxScript.replace(/\\/g, '\\\\') + '")) + "";';
|
||||
return this.evalScript(jsxScript, callback, forceEval);
|
||||
} else {
|
||||
throw new Error(`The file: {jsxScript} could not be found / read`);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Replacements made so we can't use $.evalFile and need to read the jsx script for ourselves //
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
fileName = jsxScript.replace(/\\/g, '\\\\').replace(/"/g, '\\"');
|
||||
try {
|
||||
jsxScript = window.cep.fs.readFile(jsxScript).data;
|
||||
} catch (er) {
|
||||
throw new Error(`The file: ${fileName} could not be read`);
|
||||
}
|
||||
// It is desirable that the injected fileNameScript is on the same line as the 1st line of the script
|
||||
// This is so that the $.line or error.line returns the same value as the actual file
|
||||
// However if the 1st line contains a # directive then we need to insert a new line and stuff the above problem
|
||||
// When possible i.e. when there's no replacements then $.evalFile will be used and then the whole issue is avoided
|
||||
newLine = /^\s*#/.test(jsxScript) ? '\n' : '';
|
||||
jsxScript = fileNameScript.replace(/__fileName__/, fileName).replace(/__basename__/, path.basename(fileName)) + newLine + jsxScript;
|
||||
|
||||
try {
|
||||
// evalScript(jsxScript, callback);
|
||||
return this.evalScript(jsxScript, callback, replacements, forceEval);
|
||||
} catch (err) {
|
||||
////////////////////////////////////////////////
|
||||
// Do whatever error handling you want here ! //
|
||||
////////////////////////////////////////////////
|
||||
var newErr;
|
||||
newErr = new Error(err);
|
||||
alert('Error Eek: ' + newErr.stack);
|
||||
return false;
|
||||
}
|
||||
|
||||
return success; // success should be an array but for now it's a Boolean
|
||||
};
|
||||
|
||||
|
||||
////////////////////////////////////
|
||||
// Setup alternative method names //
|
||||
////////////////////////////////////
|
||||
Jsx.prototype.eval = Jsx.prototype.script = Jsx.prototype.evalscript = Jsx.prototype.evalScript;
|
||||
Jsx.prototype.file = Jsx.prototype.evalfile = Jsx.prototype.evalFile;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Examples //
|
||||
// jsx.evalScript('alert("foo");'); //
|
||||
// jsx.evalFile('foo.jsx'); // where foo.jsx is stored in the jsx folder at the base of the extensions directory //
|
||||
// jsx.evalFile('../myFolder/foo.jsx'); // where a relative or absolute file path is given //
|
||||
// //
|
||||
// using conventional methods one would use in the case were the values to swap were supplied by variables //
|
||||
// csInterface.evalScript('var q = "' + name + '"; alert("' + myString + '" ' + myOp + ' q);q;', callback); //
|
||||
// Using all the '' + foo + '' is very error prone //
|
||||
// jsx.evalScript('var q = "__name__"; alert(__string__ __opp__ q);q;',{'name':'Fred', 'string':'Hello ', 'opp':'+'}, callBack); //
|
||||
// is much simpler and less error prone //
|
||||
// //
|
||||
// more readable to use object //
|
||||
// jsx.evalFile({ //
|
||||
// file: 'yetAnotherFabScript.jsx', //
|
||||
// replacements: {"this": foo, That: bar, and: "&&", the: foo2, other: bar2}, //
|
||||
// eval: true //
|
||||
// }) //
|
||||
// Enjoy <span class="wp-font-emots-emo-happy"></span> //
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
jsx = new Jsx();
|
||||
})();
|
||||
File diff suppressed because one or more lines are too long
|
|
@ -1,530 +0,0 @@
|
|||
// json2.js
|
||||
// 2017-06-12
|
||||
// Public Domain.
|
||||
// NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
|
||||
|
||||
// USE YOUR OWN COPY. IT IS EXTREMELY UNWISE TO LOAD CODE FROM SERVERS YOU DO
|
||||
// NOT CONTROL.
|
||||
|
||||
// This file creates a global JSON object containing two methods: stringify
|
||||
// and parse. This file provides the ES5 JSON capability to ES3 systems.
|
||||
// If a project might run on IE8 or earlier, then this file should be included.
|
||||
// This file does nothing on ES5 systems.
|
||||
|
||||
// JSON.stringify(value, replacer, space)
|
||||
// value any JavaScript value, usually an object or array.
|
||||
// replacer an optional parameter that determines how object
|
||||
// values are stringified for objects. It can be a
|
||||
// function or an array of strings.
|
||||
// space an optional parameter that specifies the indentation
|
||||
// of nested structures. If it is omitted, the text will
|
||||
// be packed without extra whitespace. If it is a number,
|
||||
// it will specify the number of spaces to indent at each
|
||||
// level. If it is a string (such as "\t" or " "),
|
||||
// it contains the characters used to indent at each level.
|
||||
// This method produces a JSON text from a JavaScript value.
|
||||
// When an object value is found, if the object contains a toJSON
|
||||
// method, its toJSON method will be called and the result will be
|
||||
// stringified. A toJSON method does not serialize: it returns the
|
||||
// value represented by the name/value pair that should be serialized,
|
||||
// or undefined if nothing should be serialized. The toJSON method
|
||||
// will be passed the key associated with the value, and this will be
|
||||
// bound to the value.
|
||||
|
||||
// For example, this would serialize Dates as ISO strings.
|
||||
|
||||
// Date.prototype.toJSON = function (key) {
|
||||
// function f(n) {
|
||||
// // Format integers to have at least two digits.
|
||||
// return (n < 10)
|
||||
// ? "0" + n
|
||||
// : n;
|
||||
// }
|
||||
// return this.getUTCFullYear() + "-" +
|
||||
// f(this.getUTCMonth() + 1) + "-" +
|
||||
// f(this.getUTCDate()) + "T" +
|
||||
// f(this.getUTCHours()) + ":" +
|
||||
// f(this.getUTCMinutes()) + ":" +
|
||||
// f(this.getUTCSeconds()) + "Z";
|
||||
// };
|
||||
|
||||
// You can provide an optional replacer method. It will be passed the
|
||||
// key and value of each member, with this bound to the containing
|
||||
// object. The value that is returned from your method will be
|
||||
// serialized. If your method returns undefined, then the member will
|
||||
// be excluded from the serialization.
|
||||
|
||||
// If the replacer parameter is an array of strings, then it will be
|
||||
// used to select the members to be serialized. It filters the results
|
||||
// such that only members with keys listed in the replacer array are
|
||||
// stringified.
|
||||
|
||||
// Values that do not have JSON representations, such as undefined or
|
||||
// functions, will not be serialized. Such values in objects will be
|
||||
// dropped; in arrays they will be replaced with null. You can use
|
||||
// a replacer function to replace those with JSON values.
|
||||
|
||||
// JSON.stringify(undefined) returns undefined.
|
||||
|
||||
// The optional space parameter produces a stringification of the
|
||||
// value that is filled with line breaks and indentation to make it
|
||||
// easier to read.
|
||||
|
||||
// If the space parameter is a non-empty string, then that string will
|
||||
// be used for indentation. If the space parameter is a number, then
|
||||
// the indentation will be that many spaces.
|
||||
|
||||
// Example:
|
||||
|
||||
// text = JSON.stringify(["e", {pluribus: "unum"}]);
|
||||
// // text is '["e",{"pluribus":"unum"}]'
|
||||
|
||||
// text = JSON.stringify(["e", {pluribus: "unum"}], null, "\t");
|
||||
// // text is '[\n\t"e",\n\t{\n\t\t"pluribus": "unum"\n\t}\n]'
|
||||
|
||||
// text = JSON.stringify([new Date()], function (key, value) {
|
||||
// return this[key] instanceof Date
|
||||
// ? "Date(" + this[key] + ")"
|
||||
// : value;
|
||||
// });
|
||||
// // text is '["Date(---current time---)"]'
|
||||
|
||||
// JSON.parse(text, reviver)
|
||||
// This method parses a JSON text to produce an object or array.
|
||||
// It can throw a SyntaxError exception.
|
||||
|
||||
// The optional reviver parameter is a function that can filter and
|
||||
// transform the results. It receives each of the keys and values,
|
||||
// and its return value is used instead of the original value.
|
||||
// If it returns what it received, then the structure is not modified.
|
||||
// If it returns undefined then the member is deleted.
|
||||
|
||||
// Example:
|
||||
|
||||
// // Parse the text. Values that look like ISO date strings will
|
||||
// // be converted to Date objects.
|
||||
|
||||
// myData = JSON.parse(text, function (key, value) {
|
||||
// var a;
|
||||
// if (typeof value === "string") {
|
||||
// a =
|
||||
// /^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:\.\d*)?)Z$/.exec(value);
|
||||
// if (a) {
|
||||
// return new Date(Date.UTC(
|
||||
// +a[1], +a[2] - 1, +a[3], +a[4], +a[5], +a[6]
|
||||
// ));
|
||||
// }
|
||||
// return value;
|
||||
// }
|
||||
// });
|
||||
|
||||
// myData = JSON.parse(
|
||||
// "[\"Date(09/09/2001)\"]",
|
||||
// function (key, value) {
|
||||
// var d;
|
||||
// if (
|
||||
// typeof value === "string"
|
||||
// && value.slice(0, 5) === "Date("
|
||||
// && value.slice(-1) === ")"
|
||||
// ) {
|
||||
// d = new Date(value.slice(5, -1));
|
||||
// if (d) {
|
||||
// return d;
|
||||
// }
|
||||
// }
|
||||
// return value;
|
||||
// }
|
||||
// );
|
||||
|
||||
// This is a reference implementation. You are free to copy, modify, or
|
||||
// redistribute.
|
||||
|
||||
/*jslint
|
||||
eval, for, this
|
||||
*/
|
||||
|
||||
/*property
|
||||
JSON, apply, call, charCodeAt, getUTCDate, getUTCFullYear, getUTCHours,
|
||||
getUTCMinutes, getUTCMonth, getUTCSeconds, hasOwnProperty, join,
|
||||
lastIndex, length, parse, prototype, push, replace, slice, stringify,
|
||||
test, toJSON, toString, valueOf
|
||||
*/
|
||||
|
||||
|
||||
// Create a JSON object only if one does not already exist. We create the
|
||||
// methods in a closure to avoid creating global variables.
|
||||
|
||||
if (typeof JSON !== "object") {
|
||||
JSON = {};
|
||||
}
|
||||
|
||||
(function () {
|
||||
"use strict";
|
||||
|
||||
var rx_one = /^[\],:{}\s]*$/;
|
||||
var rx_two = /\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g;
|
||||
var rx_three = /"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g;
|
||||
var rx_four = /(?:^|:|,)(?:\s*\[)+/g;
|
||||
var rx_escapable = /[\\"\u0000-\u001f\u007f-\u009f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g;
|
||||
var rx_dangerous = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g;
|
||||
|
||||
function f(n) {
|
||||
// Format integers to have at least two digits.
|
||||
return (n < 10)
|
||||
? "0" + n
|
||||
: n;
|
||||
}
|
||||
|
||||
function this_value() {
|
||||
return this.valueOf();
|
||||
}
|
||||
|
||||
if (typeof Date.prototype.toJSON !== "function") {
|
||||
|
||||
Date.prototype.toJSON = function () {
|
||||
|
||||
return isFinite(this.valueOf())
|
||||
? (
|
||||
this.getUTCFullYear()
|
||||
+ "-"
|
||||
+ f(this.getUTCMonth() + 1)
|
||||
+ "-"
|
||||
+ f(this.getUTCDate())
|
||||
+ "T"
|
||||
+ f(this.getUTCHours())
|
||||
+ ":"
|
||||
+ f(this.getUTCMinutes())
|
||||
+ ":"
|
||||
+ f(this.getUTCSeconds())
|
||||
+ "Z"
|
||||
)
|
||||
: null;
|
||||
};
|
||||
|
||||
Boolean.prototype.toJSON = this_value;
|
||||
Number.prototype.toJSON = this_value;
|
||||
String.prototype.toJSON = this_value;
|
||||
}
|
||||
|
||||
var gap;
|
||||
var indent;
|
||||
var meta;
|
||||
var rep;
|
||||
|
||||
|
||||
function quote(string) {
|
||||
|
||||
// If the string contains no control characters, no quote characters, and no
|
||||
// backslash characters, then we can safely slap some quotes around it.
|
||||
// Otherwise we must also replace the offending characters with safe escape
|
||||
// sequences.
|
||||
|
||||
rx_escapable.lastIndex = 0;
|
||||
return rx_escapable.test(string)
|
||||
? "\"" + string.replace(rx_escapable, function (a) {
|
||||
var c = meta[a];
|
||||
return typeof c === "string"
|
||||
? c
|
||||
: "\\u" + ("0000" + a.charCodeAt(0).toString(16)).slice(-4);
|
||||
}) + "\""
|
||||
: "\"" + string + "\"";
|
||||
}
|
||||
|
||||
|
||||
function str(key, holder) {
|
||||
|
||||
// Produce a string from holder[key].
|
||||
|
||||
var i; // The loop counter.
|
||||
var k; // The member key.
|
||||
var v; // The member value.
|
||||
var length;
|
||||
var mind = gap;
|
||||
var partial;
|
||||
var value = holder[key];
|
||||
|
||||
// If the value has a toJSON method, call it to obtain a replacement value.
|
||||
|
||||
if (
|
||||
value
|
||||
&& typeof value === "object"
|
||||
&& typeof value.toJSON === "function"
|
||||
) {
|
||||
value = value.toJSON(key);
|
||||
}
|
||||
|
||||
// If we were called with a replacer function, then call the replacer to
|
||||
// obtain a replacement value.
|
||||
|
||||
if (typeof rep === "function") {
|
||||
value = rep.call(holder, key, value);
|
||||
}
|
||||
|
||||
// What happens next depends on the value's type.
|
||||
|
||||
switch (typeof value) {
|
||||
case "string":
|
||||
return quote(value);
|
||||
|
||||
case "number":
|
||||
|
||||
// JSON numbers must be finite. Encode non-finite numbers as null.
|
||||
|
||||
return (isFinite(value))
|
||||
? String(value)
|
||||
: "null";
|
||||
|
||||
case "boolean":
|
||||
case "null":
|
||||
|
||||
// If the value is a boolean or null, convert it to a string. Note:
|
||||
// typeof null does not produce "null". The case is included here in
|
||||
// the remote chance that this gets fixed someday.
|
||||
|
||||
return String(value);
|
||||
|
||||
// If the type is "object", we might be dealing with an object or an array or
|
||||
// null.
|
||||
|
||||
case "object":
|
||||
|
||||
// Due to a specification blunder in ECMAScript, typeof null is "object",
|
||||
// so watch out for that case.
|
||||
|
||||
if (!value) {
|
||||
return "null";
|
||||
}
|
||||
|
||||
// Make an array to hold the partial results of stringifying this object value.
|
||||
|
||||
gap += indent;
|
||||
partial = [];
|
||||
|
||||
// Is the value an array?
|
||||
|
||||
if (Object.prototype.toString.apply(value) === "[object Array]") {
|
||||
|
||||
// The value is an array. Stringify every element. Use null as a placeholder
|
||||
// for non-JSON values.
|
||||
|
||||
length = value.length;
|
||||
for (i = 0; i < length; i += 1) {
|
||||
partial[i] = str(i, value) || "null";
|
||||
}
|
||||
|
||||
// Join all of the elements together, separated with commas, and wrap them in
|
||||
// brackets.
|
||||
|
||||
v = partial.length === 0
|
||||
? "[]"
|
||||
: gap
|
||||
? (
|
||||
"[\n"
|
||||
+ gap
|
||||
+ partial.join(",\n" + gap)
|
||||
+ "\n"
|
||||
+ mind
|
||||
+ "]"
|
||||
)
|
||||
: "[" + partial.join(",") + "]";
|
||||
gap = mind;
|
||||
return v;
|
||||
}
|
||||
|
||||
// If the replacer is an array, use it to select the members to be stringified.
|
||||
|
||||
if (rep && typeof rep === "object") {
|
||||
length = rep.length;
|
||||
for (i = 0; i < length; i += 1) {
|
||||
if (typeof rep[i] === "string") {
|
||||
k = rep[i];
|
||||
v = str(k, value);
|
||||
if (v) {
|
||||
partial.push(quote(k) + (
|
||||
(gap)
|
||||
? ": "
|
||||
: ":"
|
||||
) + v);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
// Otherwise, iterate through all of the keys in the object.
|
||||
|
||||
for (k in value) {
|
||||
if (Object.prototype.hasOwnProperty.call(value, k)) {
|
||||
v = str(k, value);
|
||||
if (v) {
|
||||
partial.push(quote(k) + (
|
||||
(gap)
|
||||
? ": "
|
||||
: ":"
|
||||
) + v);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Join all of the member texts together, separated with commas,
|
||||
// and wrap them in braces.
|
||||
|
||||
v = partial.length === 0
|
||||
? "{}"
|
||||
: gap
|
||||
? "{\n" + gap + partial.join(",\n" + gap) + "\n" + mind + "}"
|
||||
: "{" + partial.join(",") + "}";
|
||||
gap = mind;
|
||||
return v;
|
||||
}
|
||||
}
|
||||
|
||||
// If the JSON object does not yet have a stringify method, give it one.
|
||||
|
||||
if (typeof JSON.stringify !== "function") {
|
||||
meta = { // table of character substitutions
|
||||
"\b": "\\b",
|
||||
"\t": "\\t",
|
||||
"\n": "\\n",
|
||||
"\f": "\\f",
|
||||
"\r": "\\r",
|
||||
"\"": "\\\"",
|
||||
"\\": "\\\\"
|
||||
};
|
||||
JSON.stringify = function (value, replacer, space) {
|
||||
|
||||
// The stringify method takes a value and an optional replacer, and an optional
|
||||
// space parameter, and returns a JSON text. The replacer can be a function
|
||||
// that can replace values, or an array of strings that will select the keys.
|
||||
// A default replacer method can be provided. Use of the space parameter can
|
||||
// produce text that is more easily readable.
|
||||
|
||||
var i;
|
||||
gap = "";
|
||||
indent = "";
|
||||
|
||||
// If the space parameter is a number, make an indent string containing that
|
||||
// many spaces.
|
||||
|
||||
if (typeof space === "number") {
|
||||
for (i = 0; i < space; i += 1) {
|
||||
indent += " ";
|
||||
}
|
||||
|
||||
// If the space parameter is a string, it will be used as the indent string.
|
||||
|
||||
} else if (typeof space === "string") {
|
||||
indent = space;
|
||||
}
|
||||
|
||||
// If there is a replacer, it must be a function or an array.
|
||||
// Otherwise, throw an error.
|
||||
|
||||
rep = replacer;
|
||||
if (replacer && typeof replacer !== "function" && (
|
||||
typeof replacer !== "object"
|
||||
|| typeof replacer.length !== "number"
|
||||
)) {
|
||||
throw new Error("JSON.stringify");
|
||||
}
|
||||
|
||||
// Make a fake root object containing our value under the key of "".
|
||||
// Return the result of stringifying the value.
|
||||
|
||||
return str("", {"": value});
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
// If the JSON object does not yet have a parse method, give it one.
|
||||
|
||||
if (typeof JSON.parse !== "function") {
|
||||
JSON.parse = function (text, reviver) {
|
||||
|
||||
// The parse method takes a text and an optional reviver function, and returns
|
||||
// a JavaScript value if the text is a valid JSON text.
|
||||
|
||||
var j;
|
||||
|
||||
function walk(holder, key) {
|
||||
|
||||
// The walk method is used to recursively walk the resulting structure so
|
||||
// that modifications can be made.
|
||||
|
||||
var k;
|
||||
var v;
|
||||
var value = holder[key];
|
||||
if (value && typeof value === "object") {
|
||||
for (k in value) {
|
||||
if (Object.prototype.hasOwnProperty.call(value, k)) {
|
||||
v = walk(value, k);
|
||||
if (v !== undefined) {
|
||||
value[k] = v;
|
||||
} else {
|
||||
delete value[k];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return reviver.call(holder, key, value);
|
||||
}
|
||||
|
||||
|
||||
// Parsing happens in four stages. In the first stage, we replace certain
|
||||
// Unicode characters with escape sequences. JavaScript handles many characters
|
||||
// incorrectly, either silently deleting them, or treating them as line endings.
|
||||
|
||||
text = String(text);
|
||||
rx_dangerous.lastIndex = 0;
|
||||
if (rx_dangerous.test(text)) {
|
||||
text = text.replace(rx_dangerous, function (a) {
|
||||
return (
|
||||
"\\u"
|
||||
+ ("0000" + a.charCodeAt(0).toString(16)).slice(-4)
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
// In the second stage, we run the text against regular expressions that look
|
||||
// for non-JSON patterns. We are especially concerned with "()" and "new"
|
||||
// because they can cause invocation, and "=" because it can cause mutation.
|
||||
// But just to be safe, we want to reject all unexpected forms.
|
||||
|
||||
// We split the second stage into 4 regexp operations in order to work around
|
||||
// crippling inefficiencies in IE's and Safari's regexp engines. First we
|
||||
// replace the JSON backslash pairs with "@" (a non-JSON character). Second, we
|
||||
// replace all simple value tokens with "]" characters. Third, we delete all
|
||||
// open brackets that follow a colon or comma or that begin the text. Finally,
|
||||
// we look to see that the remaining characters are only whitespace or "]" or
|
||||
// "," or ":" or "{" or "}". If that is so, then the text is safe for eval.
|
||||
|
||||
if (
|
||||
rx_one.test(
|
||||
text
|
||||
.replace(rx_two, "@")
|
||||
.replace(rx_three, "]")
|
||||
.replace(rx_four, "")
|
||||
)
|
||||
) {
|
||||
|
||||
// In the third stage we use the eval function to compile the text into a
|
||||
// JavaScript structure. The "{" operator is subject to a syntactic ambiguity
|
||||
// in JavaScript: it can begin a block or an object literal. We wrap the text
|
||||
// in parens to eliminate the ambiguity.
|
||||
|
||||
j = eval("(" + text + ")");
|
||||
|
||||
// In the optional fourth stage, we recursively walk the new structure, passing
|
||||
// each name/value pair to a reviver function for possible transformation.
|
||||
|
||||
return (typeof reviver === "function")
|
||||
? walk({"": j}, "")
|
||||
: j;
|
||||
}
|
||||
|
||||
// If the text is not JSON parseable, then a SyntaxError is thrown.
|
||||
|
||||
throw new SyntaxError("JSON.parse");
|
||||
};
|
||||
}
|
||||
}());
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 3.5 KiB |
|
|
@ -1,95 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<style type="text/css">
|
||||
html, body, iframe {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
border: 0px;
|
||||
margin: 0px;
|
||||
overflow: hidden;
|
||||
background-color: #424242;
|
||||
}
|
||||
button {width: 100%;}
|
||||
</style>
|
||||
|
||||
<style>
|
||||
button {width: 100%;}
|
||||
body {margin:0; padding:0; height: 100%;}
|
||||
html {height: 100%;}
|
||||
</style>
|
||||
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js">
|
||||
</script>
|
||||
|
||||
<script type=text/javascript>
|
||||
$(function() {
|
||||
$("a#workfiles-button").bind("click", function() {
|
||||
RPC.call('Photoshop.workfiles_route').then(function (data) {
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
|
||||
<script type=text/javascript>
|
||||
$(function() {
|
||||
$("a#loader-button").bind("click", function() {
|
||||
RPC.call('Photoshop.loader_route').then(function (data) {
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
|
||||
<script type=text/javascript>
|
||||
$(function() {
|
||||
$("a#publish-button").bind("click", function() {
|
||||
RPC.call('Photoshop.publish_route').then(function (data) {
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
|
||||
<script type=text/javascript>
|
||||
$(function() {
|
||||
$("a#sceneinventory-button").bind("click", function() {
|
||||
RPC.call('Photoshop.sceneinventory_route').then(function (data) {
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
|
||||
<script type=text/javascript>
|
||||
$(function() {
|
||||
$("a#experimental-button").bind("click", function() {
|
||||
RPC.call('Photoshop.experimental_tools_route').then(function (data) {
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<script type="text/javascript" src="./client/wsrpc.js"></script>
|
||||
<script type="text/javascript" src="./client/CSInterface.js"></script>
|
||||
<script type="text/javascript" src="./client/loglevel.min.js"></script>
|
||||
|
||||
<!-- helper library for better debugging of .jsx check its license! -->
|
||||
<script type="text/javascript" src="./host/JSX.js"></script>
|
||||
|
||||
<script type="text/javascript" src="./client/client.js"></script>
|
||||
|
||||
<a href=# id=workfiles-button><button>Workfiles...</button></a>
|
||||
<a href=# id=loader-button><button>Load...</button></a>
|
||||
<a href=# id=publish-button><button>Publish...</button></a>
|
||||
<a href=# id=sceneinventory-button><button>Manage...</button></a>
|
||||
<a href=# id=experimental-button><button>Experimental Tools...</button></a>
|
||||
</body>
|
||||
</html>
|
||||
|
|
@ -1,406 +0,0 @@
|
|||
import os
|
||||
import subprocess
|
||||
import collections
|
||||
import asyncio
|
||||
|
||||
from wsrpc_aiohttp import (
|
||||
WebSocketRoute,
|
||||
WebSocketAsync
|
||||
)
|
||||
|
||||
import ayon_api
|
||||
from qtpy import QtCore
|
||||
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.pipeline import (
|
||||
registered_host,
|
||||
Anatomy,
|
||||
)
|
||||
from ayon_core.pipeline.workfile import (
|
||||
get_workfile_template_key_from_context,
|
||||
get_last_workfile,
|
||||
)
|
||||
from ayon_core.pipeline.template_data import get_template_data_with_names
|
||||
from ayon_core.tools.utils import host_tools
|
||||
from ayon_core.pipeline.context_tools import change_current_context
|
||||
|
||||
from .webserver import WebServerTool
|
||||
from .ws_stub import PhotoshopServerStub
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
class ConnectionNotEstablishedYet(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class MainThreadItem:
|
||||
"""Structure to store information about callback in main thread.
|
||||
|
||||
Item should be used to execute callback in main thread which may be needed
|
||||
for execution of Qt objects.
|
||||
|
||||
Item store callback (callable variable), arguments and keyword arguments
|
||||
for the callback. Item hold information about it's process.
|
||||
"""
|
||||
not_set = object()
|
||||
|
||||
def __init__(self, callback, *args, **kwargs):
|
||||
self._done = False
|
||||
self._exception = self.not_set
|
||||
self._result = self.not_set
|
||||
self._callback = callback
|
||||
self._args = args
|
||||
self._kwargs = kwargs
|
||||
|
||||
@property
|
||||
def done(self):
|
||||
return self._done
|
||||
|
||||
@property
|
||||
def exception(self):
|
||||
return self._exception
|
||||
|
||||
@property
|
||||
def result(self):
|
||||
return self._result
|
||||
|
||||
def execute(self):
|
||||
"""Execute callback and store its result.
|
||||
|
||||
Method must be called from main thread. Item is marked as `done`
|
||||
when callback execution finished. Store output of callback of exception
|
||||
information when callback raises one.
|
||||
"""
|
||||
log.debug("Executing process in main thread")
|
||||
if self.done:
|
||||
log.warning("- item is already processed")
|
||||
return
|
||||
|
||||
log.info("Running callback: {}".format(str(self._callback)))
|
||||
try:
|
||||
result = self._callback(*self._args, **self._kwargs)
|
||||
self._result = result
|
||||
|
||||
except Exception as exc:
|
||||
self._exception = exc
|
||||
|
||||
finally:
|
||||
self._done = True
|
||||
|
||||
|
||||
def stub():
|
||||
"""
|
||||
Convenience function to get server RPC stub to call methods directed
|
||||
for host (Photoshop).
|
||||
It expects already created connection, started from client.
|
||||
Currently created when panel is opened (PS: Window>Extensions>Avalon)
|
||||
:return: <PhotoshopClientStub> where functions could be called from
|
||||
"""
|
||||
ps_stub = PhotoshopServerStub()
|
||||
if not ps_stub.client:
|
||||
raise ConnectionNotEstablishedYet("Connection is not created yet")
|
||||
|
||||
return ps_stub
|
||||
|
||||
|
||||
def show_tool_by_name(tool_name):
|
||||
kwargs = {}
|
||||
if tool_name == "loader":
|
||||
kwargs["use_context"] = True
|
||||
|
||||
host_tools.show_tool_by_name(tool_name, **kwargs)
|
||||
|
||||
|
||||
class ProcessLauncher(QtCore.QObject):
|
||||
route_name = "Photoshop"
|
||||
_main_thread_callbacks = collections.deque()
|
||||
|
||||
def __init__(self, subprocess_args):
|
||||
self._subprocess_args = subprocess_args
|
||||
self._log = None
|
||||
|
||||
super(ProcessLauncher, self).__init__()
|
||||
|
||||
# Keep track if launcher was already started
|
||||
self._started = False
|
||||
|
||||
self._process = None
|
||||
self._websocket_server = None
|
||||
|
||||
start_process_timer = QtCore.QTimer()
|
||||
start_process_timer.setInterval(100)
|
||||
|
||||
loop_timer = QtCore.QTimer()
|
||||
loop_timer.setInterval(200)
|
||||
|
||||
start_process_timer.timeout.connect(self._on_start_process_timer)
|
||||
loop_timer.timeout.connect(self._on_loop_timer)
|
||||
|
||||
self._start_process_timer = start_process_timer
|
||||
self._loop_timer = loop_timer
|
||||
|
||||
@property
|
||||
def log(self):
|
||||
if self._log is None:
|
||||
self._log = Logger.get_logger(
|
||||
"{}-launcher".format(self.route_name)
|
||||
)
|
||||
return self._log
|
||||
|
||||
@property
|
||||
def websocket_server_is_running(self):
|
||||
if self._websocket_server is not None:
|
||||
return self._websocket_server.is_running
|
||||
return False
|
||||
|
||||
@property
|
||||
def is_process_running(self):
|
||||
if self._process is not None:
|
||||
return self._process.poll() is None
|
||||
return False
|
||||
|
||||
@property
|
||||
def is_host_connected(self):
|
||||
"""Returns True if connected, False if app is not running at all."""
|
||||
if not self.is_process_running:
|
||||
return False
|
||||
|
||||
try:
|
||||
_stub = stub()
|
||||
if _stub:
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def execute_in_main_thread(cls, callback, *args, **kwargs):
|
||||
item = MainThreadItem(callback, *args, **kwargs)
|
||||
cls._main_thread_callbacks.append(item)
|
||||
return item
|
||||
|
||||
def start(self):
|
||||
if self._started:
|
||||
return
|
||||
self.log.info("Started launch logic of Photoshop")
|
||||
self._started = True
|
||||
self._start_process_timer.start()
|
||||
|
||||
def exit(self):
|
||||
""" Exit whole application. """
|
||||
if self._start_process_timer.isActive():
|
||||
self._start_process_timer.stop()
|
||||
if self._loop_timer.isActive():
|
||||
self._loop_timer.stop()
|
||||
|
||||
if self._websocket_server is not None:
|
||||
self._websocket_server.stop()
|
||||
|
||||
if self._process:
|
||||
self._process.kill()
|
||||
self._process.wait()
|
||||
|
||||
QtCore.QCoreApplication.exit()
|
||||
|
||||
def _on_loop_timer(self):
|
||||
# TODO find better way and catch errors
|
||||
# Run only callbacks that are in queue at the moment
|
||||
cls = self.__class__
|
||||
for _ in range(len(cls._main_thread_callbacks)):
|
||||
if cls._main_thread_callbacks:
|
||||
item = cls._main_thread_callbacks.popleft()
|
||||
item.execute()
|
||||
|
||||
if not self.is_process_running:
|
||||
self.log.info("Host process is not running. Closing")
|
||||
self.exit()
|
||||
|
||||
elif not self.websocket_server_is_running:
|
||||
self.log.info("Websocket server is not running. Closing")
|
||||
self.exit()
|
||||
|
||||
def _on_start_process_timer(self):
|
||||
# TODO add try except validations for each part in this method
|
||||
# Start server as first thing
|
||||
if self._websocket_server is None:
|
||||
self._init_server()
|
||||
return
|
||||
|
||||
# TODO add waiting time
|
||||
# Wait for webserver
|
||||
if not self.websocket_server_is_running:
|
||||
return
|
||||
|
||||
# Start application process
|
||||
if self._process is None:
|
||||
self._start_process()
|
||||
self.log.info("Waiting for host to connect")
|
||||
return
|
||||
|
||||
# TODO add waiting time
|
||||
# Wait until host is connected
|
||||
if self.is_host_connected:
|
||||
self._start_process_timer.stop()
|
||||
self._loop_timer.start()
|
||||
elif (
|
||||
not self.is_process_running
|
||||
or not self.websocket_server_is_running
|
||||
):
|
||||
self.exit()
|
||||
|
||||
def _init_server(self):
|
||||
if self._websocket_server is not None:
|
||||
return
|
||||
|
||||
self.log.debug(
|
||||
"Initialization of websocket server for host communication"
|
||||
)
|
||||
|
||||
self._websocket_server = websocket_server = WebServerTool()
|
||||
if websocket_server.port_occupied(
|
||||
websocket_server.host_name,
|
||||
websocket_server.port
|
||||
):
|
||||
self.log.info(
|
||||
"Server already running, sending actual context and exit."
|
||||
)
|
||||
asyncio.run(websocket_server.send_context_change(self.route_name))
|
||||
self.exit()
|
||||
return
|
||||
|
||||
# Add Websocket route
|
||||
websocket_server.add_route("*", "/ws/", WebSocketAsync)
|
||||
# Add after effects route to websocket handler
|
||||
|
||||
print("Adding {} route".format(self.route_name))
|
||||
WebSocketAsync.add_route(
|
||||
self.route_name, PhotoshopRoute
|
||||
)
|
||||
self.log.info("Starting websocket server for host communication")
|
||||
websocket_server.start_server()
|
||||
|
||||
def _start_process(self):
|
||||
if self._process is not None:
|
||||
return
|
||||
self.log.info("Starting host process")
|
||||
try:
|
||||
self._process = subprocess.Popen(
|
||||
self._subprocess_args,
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL
|
||||
)
|
||||
except Exception:
|
||||
self.log.info("exce", exc_info=True)
|
||||
self.exit()
|
||||
|
||||
|
||||
class PhotoshopRoute(WebSocketRoute):
|
||||
"""
|
||||
One route, mimicking external application (like Harmony, etc).
|
||||
All functions could be called from client.
|
||||
'do_notify' function calls function on the client - mimicking
|
||||
notification after long running job on the server or similar
|
||||
"""
|
||||
instance = None
|
||||
|
||||
def init(self, **kwargs):
|
||||
# Python __init__ must be return "self".
|
||||
# This method might return anything.
|
||||
log.debug("someone called Photoshop route")
|
||||
self.instance = self
|
||||
return kwargs
|
||||
|
||||
# server functions
|
||||
async def ping(self):
|
||||
log.debug("someone called Photoshop route ping")
|
||||
|
||||
# This method calls function on the client side
|
||||
# client functions
|
||||
async def set_context(self, project, folder, task):
|
||||
"""
|
||||
Sets 'project' and 'folder' to envs, eg. setting context.
|
||||
|
||||
Opens last workile from that context if exists.
|
||||
|
||||
Args:
|
||||
project (str)
|
||||
folder (str)
|
||||
task (str
|
||||
"""
|
||||
log.info("Setting context change")
|
||||
log.info(f"project {project} folder {folder} task {task}")
|
||||
|
||||
folder_entity = ayon_api.get_folder_by_path(project, folder)
|
||||
task_entity = ayon_api.get_task_by_name(
|
||||
project, folder_entity["id"], task
|
||||
)
|
||||
change_current_context(folder_entity, task_entity)
|
||||
|
||||
last_workfile_path = self._get_last_workfile_path(project,
|
||||
folder,
|
||||
task)
|
||||
if last_workfile_path and os.path.exists(last_workfile_path):
|
||||
ProcessLauncher.execute_in_main_thread(
|
||||
lambda: stub().open(last_workfile_path))
|
||||
|
||||
|
||||
async def read(self):
|
||||
log.debug("photoshop.read client calls server server calls "
|
||||
"photoshop client")
|
||||
return await self.socket.call('photoshop.read')
|
||||
|
||||
# panel routes for tools
|
||||
async def workfiles_route(self):
|
||||
self._tool_route("workfiles")
|
||||
|
||||
async def loader_route(self):
|
||||
self._tool_route("loader")
|
||||
|
||||
async def publish_route(self):
|
||||
self._tool_route("publisher")
|
||||
|
||||
async def sceneinventory_route(self):
|
||||
self._tool_route("sceneinventory")
|
||||
|
||||
async def experimental_tools_route(self):
|
||||
self._tool_route("experimental_tools")
|
||||
|
||||
def _tool_route(self, _tool_name):
|
||||
"""The address accessed when clicking on the buttons."""
|
||||
|
||||
ProcessLauncher.execute_in_main_thread(show_tool_by_name, _tool_name)
|
||||
|
||||
# Required return statement.
|
||||
return "nothing"
|
||||
|
||||
def _get_last_workfile_path(self, project_name, folder_path, task_name):
|
||||
"""Returns last workfile path if exists"""
|
||||
host = registered_host()
|
||||
host_name = "photoshop"
|
||||
template_key = get_workfile_template_key_from_context(
|
||||
project_name,
|
||||
folder_path,
|
||||
task_name,
|
||||
host_name,
|
||||
)
|
||||
anatomy = Anatomy(project_name)
|
||||
|
||||
data = get_template_data_with_names(
|
||||
project_name, folder_path, task_name, host_name
|
||||
)
|
||||
data["root"] = anatomy.roots
|
||||
|
||||
work_template = anatomy.get_template_item("work", template_key)
|
||||
|
||||
# Define saving file extension
|
||||
extensions = host.get_workfile_extensions()
|
||||
|
||||
work_root = work_template["directory"].format_strict(data)
|
||||
file_template = work_template["file"].template
|
||||
last_workfile_path = get_last_workfile(
|
||||
work_root, file_template, data, extensions, True
|
||||
)
|
||||
|
||||
return last_workfile_path
|
||||
|
|
@ -1,93 +0,0 @@
|
|||
"""Script wraps launch mechanism of Photoshop implementations.
|
||||
|
||||
Arguments passed to the script are passed to launch function in host
|
||||
implementation. In all cases requires host app executable and may contain
|
||||
workfile or others.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ayon_photoshop.api.lib import main as host_main
|
||||
|
||||
# Get current file to locate start point of sys.argv
|
||||
CURRENT_FILE = os.path.abspath(__file__)
|
||||
|
||||
|
||||
def show_error_messagebox(title, message, detail_message=None):
|
||||
"""Function will show message and process ends after closing it."""
|
||||
from qtpy import QtWidgets, QtCore
|
||||
from ayon_core import style
|
||||
|
||||
app = QtWidgets.QApplication([])
|
||||
app.setStyleSheet(style.load_stylesheet())
|
||||
|
||||
msgbox = QtWidgets.QMessageBox()
|
||||
msgbox.setWindowTitle(title)
|
||||
msgbox.setText(message)
|
||||
|
||||
if detail_message:
|
||||
msgbox.setDetailedText(detail_message)
|
||||
|
||||
msgbox.setWindowModality(QtCore.Qt.ApplicationModal)
|
||||
msgbox.show()
|
||||
|
||||
sys.exit(app.exec_())
|
||||
|
||||
|
||||
def on_invalid_args(script_not_found):
|
||||
"""Show to user message box saying that something went wrong.
|
||||
|
||||
Tell user that arguments to launch implementation are invalid with
|
||||
arguments details.
|
||||
|
||||
Args:
|
||||
script_not_found (bool): Use different message based on this value.
|
||||
"""
|
||||
|
||||
title = "Invalid arguments"
|
||||
joined_args = ", ".join("\"{}\"".format(arg) for arg in sys.argv)
|
||||
if script_not_found:
|
||||
submsg = "Where couldn't find script path:\n\"{}\""
|
||||
else:
|
||||
submsg = "Expected Host executable after script path:\n\"{}\""
|
||||
|
||||
message = "BUG: Got invalid arguments so can't launch Host application."
|
||||
detail_message = "Process was launched with arguments:\n{}\n\n{}".format(
|
||||
joined_args,
|
||||
submsg.format(CURRENT_FILE)
|
||||
)
|
||||
|
||||
show_error_messagebox(title, message, detail_message)
|
||||
|
||||
|
||||
def main(argv):
|
||||
# Modify current file path to find match in sys.argv which may be different
|
||||
# on windows (different letter cases and slashes).
|
||||
modified_current_file = CURRENT_FILE.replace("\\", "/").lower()
|
||||
|
||||
# Create a copy of sys argv
|
||||
sys_args = list(argv)
|
||||
after_script_idx = None
|
||||
# Find script path in sys.argv to know index of argv where host
|
||||
# executable should be.
|
||||
for idx, item in enumerate(sys_args):
|
||||
if item.replace("\\", "/").lower() == modified_current_file:
|
||||
after_script_idx = idx + 1
|
||||
break
|
||||
|
||||
# Validate that there is at least one argument after script path
|
||||
launch_args = None
|
||||
if after_script_idx is not None:
|
||||
launch_args = sys_args[after_script_idx:]
|
||||
|
||||
if launch_args:
|
||||
# Launch host implementation
|
||||
host_main(*launch_args)
|
||||
else:
|
||||
# Show message box
|
||||
on_invalid_args(after_script_idx is None)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv)
|
||||
|
|
@ -1,84 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
import contextlib
|
||||
import traceback
|
||||
|
||||
from ayon_core.lib import env_value_to_bool, Logger, is_in_tests
|
||||
from ayon_core.addon import AddonsManager
|
||||
from ayon_core.pipeline import install_host
|
||||
from ayon_core.tools.utils import host_tools
|
||||
from ayon_core.tools.utils import get_ayon_qt_app
|
||||
|
||||
from .launch_logic import ProcessLauncher, stub
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def safe_excepthook(*args):
|
||||
traceback.print_exception(*args)
|
||||
|
||||
|
||||
def main(*subprocess_args):
|
||||
from ayon_photoshop.api import PhotoshopHost
|
||||
|
||||
host = PhotoshopHost()
|
||||
install_host(host)
|
||||
|
||||
sys.excepthook = safe_excepthook
|
||||
|
||||
# coloring in StdOutBroker
|
||||
os.environ["AYON_LOG_NO_COLORS"] = "0"
|
||||
app = get_ayon_qt_app()
|
||||
app.setQuitOnLastWindowClosed(False)
|
||||
|
||||
launcher = ProcessLauncher(subprocess_args)
|
||||
launcher.start()
|
||||
|
||||
if env_value_to_bool("HEADLESS_PUBLISH"):
|
||||
manager = AddonsManager()
|
||||
webpublisher_addon = manager["webpublisher"]
|
||||
launcher.execute_in_main_thread(
|
||||
webpublisher_addon.headless_publish,
|
||||
log,
|
||||
"ClosePS",
|
||||
is_in_tests()
|
||||
)
|
||||
elif env_value_to_bool("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH",
|
||||
default=True):
|
||||
|
||||
launcher.execute_in_main_thread(
|
||||
host_tools.show_workfiles,
|
||||
save=env_value_to_bool("WORKFILES_SAVE_AS")
|
||||
)
|
||||
|
||||
sys.exit(app.exec_())
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_selection():
|
||||
"""Maintain selection during context."""
|
||||
selection = stub().get_selected_layers()
|
||||
try:
|
||||
yield selection
|
||||
finally:
|
||||
stub().select_layers(selection)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_visibility(layers=None):
|
||||
"""Maintain visibility during context.
|
||||
|
||||
Args:
|
||||
layers (list) of PSItem (used for caching)
|
||||
"""
|
||||
visibility = {}
|
||||
if not layers:
|
||||
layers = stub().get_layers()
|
||||
for layer in layers:
|
||||
visibility[layer.id] = layer.visible
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
for layer in layers:
|
||||
stub().set_visible(layer.id, visibility[layer.id])
|
||||
pass
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 8.6 KiB |
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue