diff --git a/client/ayon_core/plugins/publish/integrate_hero_version.py b/client/ayon_core/plugins/publish/integrate_hero_version.py
index 8c36719b77..4fb8b886a9 100644
--- a/client/ayon_core/plugins/publish/integrate_hero_version.py
+++ b/client/ayon_core/plugins/publish/integrate_hero_version.py
@@ -87,7 +87,9 @@ class IntegrateHeroVersion(
]
# QUESTION/TODO this process should happen on server if crashed due to
# permissions error on files (files were used or user didn't have perms)
- # *but all other plugins must be sucessfully completed
+ # *but all other plugins must be successfully completed
+
+ use_hardlinks = False
def process(self, instance):
if not self.is_active(instance.data):
@@ -617,24 +619,32 @@ class IntegrateHeroVersion(
self.log.debug("Folder already exists: \"{}\"".format(dirname))
+ if self.use_hardlinks:
+ # First try hardlink and copy if paths are cross drive
+ self.log.debug("Hardlinking file \"{}\" to \"{}\"".format(
+ src_path, dst_path
+ ))
+ try:
+ create_hard_link(src_path, dst_path)
+ # Return when successful
+ return
+
+ except OSError as exc:
+ # re-raise exception if different than
+ # EXDEV - cross drive path
+ # EINVAL - wrong format, must be NTFS
+ self.log.debug(
+ "Hardlink failed with errno:'{}'".format(exc.errno))
+ if exc.errno not in [errno.EXDEV, errno.EINVAL]:
+ raise
+
+ self.log.debug(
+ "Hardlinking failed, falling back to regular copy...")
+
self.log.debug("Copying file \"{}\" to \"{}\"".format(
src_path, dst_path
))
- # First try hardlink and copy if paths are cross drive
- try:
- create_hard_link(src_path, dst_path)
- # Return when successful
- return
-
- except OSError as exc:
- # re-raise exception if different than
- # EXDEV - cross drive path
- # EINVAL - wrong format, must be NTFS
- self.log.debug("Hardlink failed with errno:'{}'".format(exc.errno))
- if exc.errno not in [errno.EXDEV, errno.EINVAL]:
- raise
-
shutil.copy(src_path, dst_path)
def version_from_representations(self, project_name, repres):
diff --git a/server/settings/publish_plugins.py b/server/settings/publish_plugins.py
index b37be1afe6..1b3d382f01 100644
--- a/server/settings/publish_plugins.py
+++ b/server/settings/publish_plugins.py
@@ -743,6 +743,14 @@ class IntegrateHeroVersionModel(BaseSettingsModel):
optional: bool = SettingsField(False, title="Optional")
active: bool = SettingsField(True, title="Active")
families: list[str] = SettingsField(default_factory=list, title="Families")
+ use_hardlinks: bool = SettingsField(
+ False, title="Use Hardlinks",
+ description="When enabled first try to make a hardlink of the version "
+ "instead of a copy. This helps reduce disk usage, but may "
+ "create issues.\nFor example there are known issues on "
+ "Windows being unable to delete any of the hardlinks if "
+ "any of the links is in use creating issues with updating "
+ "hero versions.")
class CleanUpModel(BaseSettingsModel):
@@ -1136,7 +1144,8 @@ DEFAULT_PUBLISH_VALUES = {
"layout",
"mayaScene",
"simpleUnrealTexture"
- ]
+ ],
+ "use_hardlinks": False
},
"CleanUp": {
"paterns": [],
diff --git a/server_addon/celaction/client/ayon_celaction/__init__.py b/server_addon/celaction/client/ayon_celaction/__init__.py
deleted file mode 100644
index 0df0224125..0000000000
--- a/server_addon/celaction/client/ayon_celaction/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from .version import __version__
-from .addon import (
- CELACTION_ROOT_DIR,
- CelactionAddon,
-)
-
-
-__all__ = (
- "__version__",
-
- "CELACTION_ROOT_DIR",
- "CelactionAddon",
-)
diff --git a/server_addon/celaction/client/ayon_celaction/addon.py b/server_addon/celaction/client/ayon_celaction/addon.py
deleted file mode 100644
index ad04a54088..0000000000
--- a/server_addon/celaction/client/ayon_celaction/addon.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import os
-from ayon_core.addon import AYONAddon, IHostAddon
-
-from .version import __version__
-
-CELACTION_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
-
-
-class CelactionAddon(AYONAddon, IHostAddon):
- name = "celaction"
- version = __version__
- host_name = "celaction"
-
- def get_launch_hook_paths(self, app):
- if app.host_name != self.host_name:
- return []
- return [
- os.path.join(CELACTION_ROOT_DIR, "hooks")
- ]
-
- def add_implementation_envs(self, env, _app):
- # Set default values if are not already set via settings
- defaults = {
- "LOGLEVEL": "DEBUG"
- }
- for key, value in defaults.items():
- if not env.get(key):
- env[key] = value
-
- def get_workfile_extensions(self):
- return [".scn"]
diff --git a/server_addon/celaction/client/ayon_celaction/hooks/pre_celaction_setup.py b/server_addon/celaction/client/ayon_celaction/hooks/pre_celaction_setup.py
deleted file mode 100644
index 52622d43b8..0000000000
--- a/server_addon/celaction/client/ayon_celaction/hooks/pre_celaction_setup.py
+++ /dev/null
@@ -1,152 +0,0 @@
-import os
-import shutil
-import winreg
-import subprocess
-from ayon_core.lib import get_ayon_launcher_args
-from ayon_applications import PreLaunchHook, LaunchTypes
-from ayon_celaction import CELACTION_ROOT_DIR
-
-
-class CelactionPrelaunchHook(PreLaunchHook):
- """Bootstrap celacion with AYON"""
- app_groups = {"celaction"}
- platforms = {"windows"}
- launch_types = {LaunchTypes.local}
-
- def execute(self):
- folder_attributes = self.data["folder_entity"]["attrib"]
- width = folder_attributes["resolutionWidth"]
- height = folder_attributes["resolutionHeight"]
-
- # Add workfile path to launch arguments
- workfile_path = self.workfile_path()
- if workfile_path:
- self.launch_context.launch_args.append(workfile_path)
-
- # setting output parameters
- path_user_settings = "\\".join([
- "Software", "CelAction", "CelAction2D", "User Settings"
- ])
- winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_user_settings)
- hKey = winreg.OpenKey(
- winreg.HKEY_CURRENT_USER, path_user_settings, 0,
- winreg.KEY_ALL_ACCESS
- )
-
- path_to_cli = os.path.join(
- CELACTION_ROOT_DIR, "scripts", "publish_cli.py"
- )
- subprocess_args = get_ayon_launcher_args("run", path_to_cli)
- executable = subprocess_args.pop(0)
- workfile_settings = self.get_workfile_settings()
-
- winreg.SetValueEx(
- hKey,
- "SubmitAppTitle",
- 0,
- winreg.REG_SZ,
- executable
- )
-
- # add required arguments for workfile path
- parameters = subprocess_args + [
- "--currentFile", "*SCENE*"
- ]
-
- # Add custom parameters from workfile settings
- if "render_chunk" in workfile_settings["submission_overrides"]:
- parameters += [
- "--chunk", "*CHUNK*"
- ]
- if "resolution" in workfile_settings["submission_overrides"]:
- parameters += [
- "--resolutionWidth", "*X*",
- "--resolutionHeight", "*Y*"
- ]
- if "frame_range" in workfile_settings["submission_overrides"]:
- parameters += [
- "--frameStart", "*START*",
- "--frameEnd", "*END*"
- ]
-
- winreg.SetValueEx(
- hKey, "SubmitParametersTitle", 0, winreg.REG_SZ,
- subprocess.list2cmdline(parameters)
- )
-
- self.log.debug(f"__ parameters: \"{parameters}\"")
-
- # setting resolution parameters
- path_submit = "\\".join([
- path_user_settings, "Dialogs", "SubmitOutput"
- ])
- winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_submit)
- hKey = winreg.OpenKey(
- winreg.HKEY_CURRENT_USER, path_submit, 0,
- winreg.KEY_ALL_ACCESS
- )
- winreg.SetValueEx(hKey, "SaveScene", 0, winreg.REG_DWORD, 1)
- winreg.SetValueEx(hKey, "CustomX", 0, winreg.REG_DWORD, width)
- winreg.SetValueEx(hKey, "CustomY", 0, winreg.REG_DWORD, height)
-
- # making sure message dialogs don't appear when overwriting
- path_overwrite_scene = "\\".join([
- path_user_settings, "Messages", "OverwriteScene"
- ])
- winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_overwrite_scene)
- hKey = winreg.OpenKey(
- winreg.HKEY_CURRENT_USER, path_overwrite_scene, 0,
- winreg.KEY_ALL_ACCESS
- )
- winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 6)
- winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1)
-
- # set scane as not saved
- path_scene_saved = "\\".join([
- path_user_settings, "Messages", "SceneSaved"
- ])
- winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_scene_saved)
- hKey = winreg.OpenKey(
- winreg.HKEY_CURRENT_USER, path_scene_saved, 0,
- winreg.KEY_ALL_ACCESS
- )
- winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 1)
- winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1)
-
- def workfile_path(self):
- workfile_path = self.data["last_workfile_path"]
-
- # copy workfile from template if doesn't exist any on path
- if not os.path.exists(workfile_path):
- # TODO add ability to set different template workfile path via
- # settings
- template_path = os.path.join(
- CELACTION_ROOT_DIR,
- "resources",
- "celaction_template_scene.scn"
- )
-
- if not os.path.exists(template_path):
- self.log.warning(
- "Couldn't find workfile template file in {}".format(
- template_path
- )
- )
- return
-
- self.log.info(
- f"Creating workfile from template: \"{template_path}\""
- )
-
- # Copy template workfile to new destinantion
- shutil.copy2(
- os.path.normpath(template_path),
- os.path.normpath(workfile_path)
- )
-
- self.log.info(f"Workfile to open: \"{workfile_path}\"")
-
- return workfile_path
-
- def get_workfile_settings(self):
- return self.data["project_settings"]["celaction"]["workfile"]
diff --git a/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_celaction_cli_kwargs.py b/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_celaction_cli_kwargs.py
deleted file mode 100644
index 1820569918..0000000000
--- a/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_celaction_cli_kwargs.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import pyblish.api
-import sys
-from pprint import pformat
-
-
-class CollectCelactionCliKwargs(pyblish.api.ContextPlugin):
- """ Collects all keyword arguments passed from the terminal """
-
- label = "Collect Celaction Cli Kwargs"
- order = pyblish.api.CollectorOrder - 0.1
-
- def process(self, context):
- args = list(sys.argv[1:])
- self.log.info(str(args))
- missing_kwargs = []
- passing_kwargs = {}
- for key in (
- "chunk",
- "frameStart",
- "frameEnd",
- "resolutionWidth",
- "resolutionHeight",
- "currentFile",
- ):
- arg_key = f"--{key}"
- if arg_key not in args:
- missing_kwargs.append(key)
- continue
- arg_idx = args.index(arg_key)
- args.pop(arg_idx)
- if key != "currentFile":
- value = args.pop(arg_idx)
- else:
- path_parts = []
- while arg_idx < len(args):
- path_parts.append(args.pop(arg_idx))
- value = " ".join(path_parts).strip('"')
-
- passing_kwargs[key] = value
-
- if missing_kwargs:
- self.log.debug("Missing arguments {}".format(
- ", ".join(
- [f'"{key}"' for key in missing_kwargs]
- )
- ))
-
- self.log.info("Storing kwargs ...")
- self.log.debug("_ passing_kwargs: {}".format(pformat(passing_kwargs)))
-
- # set kwargs to context data
- context.set_data("passingKwargs", passing_kwargs)
-
- # get kwargs onto context data as keys with values
- for k, v in passing_kwargs.items():
- self.log.info(f"Setting `{k}` to instance.data with value: `{v}`")
- if k in ["frameStart", "frameEnd"]:
- context.data[k] = passing_kwargs[k] = int(v)
- else:
- context.data[k] = v
diff --git a/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_celaction_instances.py b/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_celaction_instances.py
deleted file mode 100644
index 7c22201e3e..0000000000
--- a/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_celaction_instances.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import os
-import pyblish.api
-
-
-class CollectCelactionInstances(pyblish.api.ContextPlugin):
- """ Adds the celaction render instances """
-
- label = "Collect Celaction Instances"
- order = pyblish.api.CollectorOrder + 0.1
-
- def process(self, context):
- task = context.data["task"]
- current_file = context.data["currentFile"]
- staging_dir = os.path.dirname(current_file)
- scene_file = os.path.basename(current_file)
- version = context.data["version"]
-
- folder_entity = context.data["folderEntity"]
-
- folder_attributes = folder_entity["attrib"]
-
- shared_instance_data = {
- "folderPath": folder_entity["path"],
- "frameStart": folder_attributes["frameStart"],
- "frameEnd": folder_attributes["frameEnd"],
- "handleStart": folder_attributes["handleStart"],
- "handleEnd": folder_attributes["handleEnd"],
- "fps": folder_attributes["fps"],
- "resolutionWidth": folder_attributes["resolutionWidth"],
- "resolutionHeight": folder_attributes["resolutionHeight"],
- "pixelAspect": 1,
- "step": 1,
- "version": version
- }
-
- celaction_kwargs = context.data.get(
- "passingKwargs", {})
-
- if celaction_kwargs:
- shared_instance_data.update(celaction_kwargs)
-
- # workfile instance
- product_type = "workfile"
- product_name = product_type + task.capitalize()
- # Create instance
- instance = context.create_instance(product_name)
-
- # creating instance data
- instance.data.update({
- "label": scene_file,
- "productName": product_name,
- "productType": product_type,
- "family": product_type,
- "families": [product_type],
- "representations": []
- })
-
- # adding basic script data
- instance.data.update(shared_instance_data)
-
- # creating representation
- representation = {
- 'name': 'scn',
- 'ext': 'scn',
- 'files': scene_file,
- "stagingDir": staging_dir,
- }
-
- instance.data["representations"].append(representation)
-
- self.log.info('Publishing Celaction workfile')
-
- # render instance
- product_name = f"render{task}Main"
- product_type = "render.farm"
- instance = context.create_instance(name=product_name)
- # getting instance state
- instance.data["publish"] = True
-
- # add folderEntity data into instance
- instance.data.update({
- "label": "{} - farm".format(product_name),
- "productType": product_type,
- "family": product_type,
- "families": [product_type],
- "productName": product_name
- })
-
- # adding basic script data
- instance.data.update(shared_instance_data)
-
- self.log.info('Publishing Celaction render instance')
- self.log.debug(f"Instance data: `{instance.data}`")
-
- for i in context:
- self.log.debug(f"{i.data['families']}")
diff --git a/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_render_path.py b/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_render_path.py
deleted file mode 100644
index 3bcd1c69b3..0000000000
--- a/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_render_path.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import os
-import copy
-import pyblish.api
-
-
-class CollectRenderPath(pyblish.api.InstancePlugin):
- """Generate file and directory path where rendered images will be"""
-
- label = "Collect Render Path"
- order = pyblish.api.CollectorOrder + 0.495
- families = ["render.farm"]
-
- settings_category = "celaction"
-
- # Presets
- output_extension = "png"
- anatomy_template_key_render_files = None
- anatomy_template_key_metadata = None
-
- def process(self, instance):
- anatomy = instance.context.data["anatomy"]
- anatomy_data = copy.deepcopy(instance.data["anatomyData"])
- padding = anatomy.templates_obj.frame_padding
- product_type = "render"
- anatomy_data.update({
- "frame": f"%0{padding}d",
- "family": product_type,
- "representation": self.output_extension,
- "ext": self.output_extension
- })
- anatomy_data["product"]["type"] = product_type
-
- # get anatomy rendering keys
- r_anatomy_key = self.anatomy_template_key_render_files
- m_anatomy_key = self.anatomy_template_key_metadata
-
- # get folder and path for rendering images from celaction
- r_template_item = anatomy.get_template_item("publish", r_anatomy_key)
- render_dir = r_template_item["directory"].format_strict(anatomy_data)
- render_path = r_template_item["path"].format_strict(anatomy_data)
- self.log.debug("__ render_path: `{}`".format(render_path))
-
- # create dir if it doesn't exists
- try:
- if not os.path.isdir(render_dir):
- os.makedirs(render_dir, exist_ok=True)
- except OSError:
- # directory is not available
- self.log.warning("Path is unreachable: `{}`".format(render_dir))
-
- # add rendering path to instance data
- instance.data["path"] = render_path
-
- # get anatomy for published renders folder path
- m_template_item = anatomy.get_template_item(
- "publish", m_anatomy_key, default=None
- )
- if m_template_item is not None:
- metadata_path = m_template_item["directory"].format_strict(
- anatomy_data
- )
- instance.data["publishRenderMetadataFolder"] = metadata_path
- self.log.info("Metadata render path: `{}`".format(metadata_path))
-
- self.log.info(f"Render output path set to: `{render_path}`")
diff --git a/server_addon/celaction/client/ayon_celaction/plugins/publish/integrate_version_up.py b/server_addon/celaction/client/ayon_celaction/plugins/publish/integrate_version_up.py
deleted file mode 100644
index c165b0c871..0000000000
--- a/server_addon/celaction/client/ayon_celaction/plugins/publish/integrate_version_up.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import shutil
-
-import pyblish.api
-
-from ayon_core.lib import version_up
-
-
-class VersionUpScene(pyblish.api.ContextPlugin):
- order = pyblish.api.IntegratorOrder + 0.5
- label = 'Version Up Scene'
- families = ['workfile']
- optional = True
- active = True
-
- def process(self, context):
- current_file = context.data.get('currentFile')
- v_up = version_up(current_file)
- self.log.debug('Current file is: {}'.format(current_file))
- self.log.debug('Version up: {}'.format(v_up))
-
- shutil.copy2(current_file, v_up)
- self.log.info('Scene saved into new version: {}'.format(v_up))
diff --git a/server_addon/celaction/client/ayon_celaction/resources/celaction_template_scene.scn b/server_addon/celaction/client/ayon_celaction/resources/celaction_template_scene.scn
deleted file mode 100644
index 54e4497a31..0000000000
Binary files a/server_addon/celaction/client/ayon_celaction/resources/celaction_template_scene.scn and /dev/null differ
diff --git a/server_addon/celaction/client/ayon_celaction/scripts/__init__.py b/server_addon/celaction/client/ayon_celaction/scripts/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/server_addon/celaction/client/ayon_celaction/scripts/publish_cli.py b/server_addon/celaction/client/ayon_celaction/scripts/publish_cli.py
deleted file mode 100644
index 4e54aa253a..0000000000
--- a/server_addon/celaction/client/ayon_celaction/scripts/publish_cli.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import os
-import sys
-
-import pyblish.api
-import pyblish.util
-
-from ayon_celaction import CELACTION_ROOT_DIR
-from ayon_core.lib import Logger
-from ayon_core.tools.utils import host_tools
-from ayon_core.pipeline import install_ayon_plugins
-
-
-log = Logger.get_logger("celaction")
-
-PUBLISH_HOST = "celaction"
-PLUGINS_DIR = os.path.join(CELACTION_ROOT_DIR, "plugins")
-PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
-
-
-def main():
- # Registers global pyblish plugins
- install_ayon_plugins()
-
- if os.path.exists(PUBLISH_PATH):
- log.info(f"Registering path: {PUBLISH_PATH}")
- pyblish.api.register_plugin_path(PUBLISH_PATH)
-
- pyblish.api.register_host(PUBLISH_HOST)
- pyblish.api.register_target("local")
-
- return host_tools.show_publish()
-
-
-if __name__ == "__main__":
- result = main()
- sys.exit(not bool(result))
diff --git a/server_addon/celaction/client/ayon_celaction/version.py b/server_addon/celaction/client/ayon_celaction/version.py
deleted file mode 100644
index ceed47c3a0..0000000000
--- a/server_addon/celaction/client/ayon_celaction/version.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Package declaring AYON addon 'celaction' version."""
-__version__ = "0.2.0"
diff --git a/server_addon/celaction/package.py b/server_addon/celaction/package.py
deleted file mode 100644
index 8b9069d019..0000000000
--- a/server_addon/celaction/package.py
+++ /dev/null
@@ -1,12 +0,0 @@
-name = "celaction"
-title = "CelAction"
-version = "0.2.0"
-
-client_dir = "ayon_celaction"
-
-ayon_required_addons = {
- "core": ">0.3.2",
-}
-ayon_compatible_addons = {
- "applications": ">=0.2.0",
-}
diff --git a/server_addon/celaction/server/__init__.py b/server_addon/celaction/server/__init__.py
deleted file mode 100644
index e3769a4b7f..0000000000
--- a/server_addon/celaction/server/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from typing import Type
-
-from ayon_server.addons import BaseServerAddon
-
-from .settings import CelActionSettings, DEFAULT_VALUES
-
-
-class CelActionAddon(BaseServerAddon):
- settings_model: Type[CelActionSettings] = CelActionSettings
-
- async def get_default_settings(self):
- settings_model_cls = self.get_settings_model()
- return settings_model_cls(**DEFAULT_VALUES)
diff --git a/server_addon/celaction/server/imageio.py b/server_addon/celaction/server/imageio.py
deleted file mode 100644
index e0e685a244..0000000000
--- a/server_addon/celaction/server/imageio.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from pydantic import validator
-from ayon_server.settings import BaseSettingsModel, SettingsField
-from ayon_server.settings.validators import ensure_unique_names
-
-
-class ImageIOConfigModel(BaseSettingsModel):
- """[DEPRECATED] Addon OCIO config settings. Please set the OCIO config
- path in the Core addon profiles here
- (ayon+settings://core/imageio/ocio_config_profiles).
- """
-
- override_global_config: bool = SettingsField(
- False,
- title="Override global OCIO config",
- description=(
- "DEPRECATED functionality. Please set the OCIO config path in the "
- "Core addon profiles here (ayon+settings://core/imageio/"
- "ocio_config_profiles)."
- ),
- )
- filepath: list[str] = SettingsField(
- default_factory=list,
- title="Config path",
- description=(
- "DEPRECATED functionality. Please set the OCIO config path in the "
- "Core addon profiles here (ayon+settings://core/imageio/"
- "ocio_config_profiles)."
- ),
- )
-
-
-class ImageIOFileRuleModel(BaseSettingsModel):
- name: str = SettingsField("", title="Rule name")
- pattern: str = SettingsField("", title="Regex pattern")
- colorspace: str = SettingsField("", title="Colorspace name")
- ext: str = SettingsField("", title="File extension")
-
-
-class ImageIOFileRulesModel(BaseSettingsModel):
- activate_host_rules: bool = SettingsField(False)
- rules: list[ImageIOFileRuleModel] = SettingsField(
- default_factory=list,
- title="Rules"
- )
-
- @validator("rules")
- def validate_unique_outputs(cls, value):
- ensure_unique_names(value)
- return value
-
-
-class CelActionImageIOModel(BaseSettingsModel):
- activate_host_color_management: bool = SettingsField(
- True, title="Enable Color Management"
- )
- ocio_config: ImageIOConfigModel = SettingsField(
- default_factory=ImageIOConfigModel,
- title="OCIO config"
- )
- file_rules: ImageIOFileRulesModel = SettingsField(
- default_factory=ImageIOFileRulesModel,
- title="File Rules"
- )
diff --git a/server_addon/celaction/server/settings.py b/server_addon/celaction/server/settings.py
deleted file mode 100644
index afa9773477..0000000000
--- a/server_addon/celaction/server/settings.py
+++ /dev/null
@@ -1,91 +0,0 @@
-from ayon_server.settings import BaseSettingsModel, SettingsField
-from .imageio import CelActionImageIOModel
-
-
-class CollectRenderPathModel(BaseSettingsModel):
- output_extension: str = SettingsField(
- "",
- title="Output render file extension"
- )
- anatomy_template_key_render_files: str = SettingsField(
- "",
- title="Anatomy template key: render files"
- )
- anatomy_template_key_metadata: str = SettingsField(
- "",
- title="Anatomy template key: metadata job file"
- )
-
-
-def _workfile_submit_overrides():
- return [
- {
- "value": "render_chunk",
- "label": "Pass chunk size"
- },
- {
- "value": "frame_range",
- "label": "Pass frame range"
- },
- {
- "value": "resolution",
- "label": "Pass resolution"
- }
- ]
-
-
-class WorkfileModel(BaseSettingsModel):
- submission_overrides: list[str] = SettingsField(
- default_factory=list,
- title="Submission workfile overrides",
- enum_resolver=_workfile_submit_overrides
- )
-
-
-class PublishPluginsModel(BaseSettingsModel):
- CollectRenderPath: CollectRenderPathModel = SettingsField(
- default_factory=CollectRenderPathModel,
- title="Collect Render Path"
- )
-
-
-class CelActionSettings(BaseSettingsModel):
- imageio: CelActionImageIOModel = SettingsField(
- default_factory=CelActionImageIOModel,
- title="Color Management (ImageIO)"
- )
- workfile: WorkfileModel = SettingsField(
- title="Workfile"
- )
- publish: PublishPluginsModel = SettingsField(
- default_factory=PublishPluginsModel,
- title="Publish plugins",
- )
-
-
-DEFAULT_VALUES = {
- "imageio": {
- "ocio_config": {
- "enabled": False,
- "filepath": []
- },
- "file_rules": {
- "enabled": False,
- "rules": []
- }
- },
- "workfile": {
- "submission_overrides": [
- "render_chunk",
- "frame_range",
- "resolution"
- ]
- },
- "publish": {
- "CollectRenderPath": {
- "output_extension": "png",
- "anatomy_template_key_render_files": "render",
- "anatomy_template_key_metadata": "render"
- }
- }
-}
diff --git a/server_addon/clockify/client/ayon_clockify/__init__.py b/server_addon/clockify/client/ayon_clockify/__init__.py
deleted file mode 100644
index 75fb87494e..0000000000
--- a/server_addon/clockify/client/ayon_clockify/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from .addon import ClockifyAddon
-
-__all__ = (
- "ClockifyAddon",
-)
diff --git a/server_addon/clockify/client/ayon_clockify/addon.py b/server_addon/clockify/client/ayon_clockify/addon.py
deleted file mode 100644
index cf35e77ce4..0000000000
--- a/server_addon/clockify/client/ayon_clockify/addon.py
+++ /dev/null
@@ -1,290 +0,0 @@
-import os
-import threading
-import time
-
-from ayon_core.addon import AYONAddon, ITrayAddon, IPluginPaths
-
-from .version import __version__
-from .constants import CLOCKIFY_FTRACK_USER_PATH, CLOCKIFY_FTRACK_SERVER_PATH
-
-
-class ClockifyAddon(AYONAddon, ITrayAddon, IPluginPaths):
- name = "clockify"
- version = __version__
-
- def initialize(self, studio_settings):
- enabled = self.name in studio_settings
- workspace_name = None
- if enabled:
- clockify_settings = studio_settings[self.name]
- workspace_name = clockify_settings["workspace_name"]
-
- if enabled and workspace_name:
- self.log.warning("Clockify Workspace is not set in settings.")
- enabled = False
- self.enabled = enabled
- self.workspace_name = workspace_name
-
- self.timer_manager = None
- self.MessageWidgetClass = None
- self.message_widget = None
- self._clockify_api = None
-
- # TimersManager attributes
- # - set `timers_manager_connector` only in `tray_init`
- self.timers_manager_connector = None
- self._timer_manager_addon = None
-
- @property
- def clockify_api(self):
- if self._clockify_api is None:
- from .clockify_api import ClockifyAPI
-
- self._clockify_api = ClockifyAPI(master_parent=self)
- return self._clockify_api
-
- def get_global_environments(self):
- return {"CLOCKIFY_WORKSPACE": self.workspace_name}
-
- def tray_init(self):
- from .widgets import ClockifySettings, MessageWidget
-
- self.MessageWidgetClass = MessageWidget
-
- self.message_widget = None
- self.widget_settings = ClockifySettings(self.clockify_api)
- self.widget_settings_required = None
-
- self.thread_timer_check = None
- # Bools
- self.bool_thread_check_running = False
- self.bool_api_key_set = False
- self.bool_workspace_set = False
- self.bool_timer_run = False
- self.bool_api_key_set = self.clockify_api.set_api()
-
- # Define itself as TimersManager connector
- self.timers_manager_connector = self
-
- def tray_start(self):
- if self.bool_api_key_set is False:
- self.show_settings()
- return
-
- self.bool_workspace_set = self.clockify_api.workspace_id is not None
- if self.bool_workspace_set is False:
- return
-
- self.start_timer_check()
- self.set_menu_visibility()
-
- def tray_exit(self, *_a, **_kw):
- return
-
- def get_plugin_paths(self):
- """Implementation of IPluginPaths to get plugin paths."""
- actions_path = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), "launcher_actions"
- )
- return {"actions": [actions_path]}
-
- def get_ftrack_event_handler_paths(self):
- """Function for ftrack addon to add ftrack event handler paths."""
- return {
- "user": [CLOCKIFY_FTRACK_USER_PATH],
- "server": [CLOCKIFY_FTRACK_SERVER_PATH],
- }
-
- def clockify_timer_stopped(self):
- self.bool_timer_run = False
- self.timer_stopped()
-
- def start_timer_check(self):
- self.bool_thread_check_running = True
- if self.thread_timer_check is None:
- self.thread_timer_check = threading.Thread(
- target=self.check_running
- )
- self.thread_timer_check.daemon = True
- self.thread_timer_check.start()
-
- def stop_timer_check(self):
- self.bool_thread_check_running = True
- if self.thread_timer_check is not None:
- self.thread_timer_check.join()
- self.thread_timer_check = None
-
- def check_running(self):
- while self.bool_thread_check_running is True:
- bool_timer_run = False
- if self.clockify_api.get_in_progress() is not None:
- bool_timer_run = True
-
- if self.bool_timer_run != bool_timer_run:
- if self.bool_timer_run is True:
- self.clockify_timer_stopped()
- elif self.bool_timer_run is False:
- current_timer = self.clockify_api.get_in_progress()
- if current_timer is None:
- continue
- current_proj_id = current_timer.get("projectId")
- if not current_proj_id:
- continue
-
- project = self.clockify_api.get_project_by_id(
- current_proj_id
- )
- if project and project.get("code") == 501:
- continue
-
- project_name = project.get("name")
-
- current_timer_hierarchy = current_timer.get("description")
- if not current_timer_hierarchy:
- continue
- hierarchy_items = current_timer_hierarchy.split("/")
- # Each pype timer must have at least 2 items!
- if len(hierarchy_items) < 2:
- continue
-
- task_name = hierarchy_items[-1]
- hierarchy = hierarchy_items[:-1]
-
- data = {
- "task_name": task_name,
- "hierarchy": hierarchy,
- "project_name": project_name,
- }
- self.timer_started(data)
-
- self.bool_timer_run = bool_timer_run
- self.set_menu_visibility()
- time.sleep(5)
-
- def signed_in(self):
- if not self.timer_manager:
- return
-
- if not self.timer_manager.last_task:
- return
-
- if self.timer_manager.is_running:
- self.start_timer_manager(self.timer_manager.last_task)
-
- def on_message_widget_close(self):
- self.message_widget = None
-
- # Definition of Tray menu
- def tray_menu(self, parent_menu):
- # Menu for Tray App
- from qtpy import QtWidgets
-
- menu = QtWidgets.QMenu("Clockify", parent_menu)
- menu.setProperty("submenu", "on")
-
- # Actions
- action_show_settings = QtWidgets.QAction("Settings", menu)
- action_stop_timer = QtWidgets.QAction("Stop timer", menu)
-
- menu.addAction(action_show_settings)
- menu.addAction(action_stop_timer)
-
- action_show_settings.triggered.connect(self.show_settings)
- action_stop_timer.triggered.connect(self.stop_timer)
-
- self.action_stop_timer = action_stop_timer
-
- self.set_menu_visibility()
-
- parent_menu.addMenu(menu)
-
- def show_settings(self):
- self.widget_settings.input_api_key.setText(
- self.clockify_api.get_api_key()
- )
- self.widget_settings.show()
-
- def set_menu_visibility(self):
- self.action_stop_timer.setVisible(self.bool_timer_run)
-
- # --- TimersManager connection methods ---
- def register_timers_manager(self, timer_manager_addon):
- """Store TimersManager for future use."""
- self._timer_manager_addon = timer_manager_addon
-
- def timer_started(self, data):
- """Tell TimersManager that timer started."""
- if self._timer_manager_addon is not None:
- self._timer_manager_addon.timer_started(self.id, data)
-
- def timer_stopped(self):
- """Tell TimersManager that timer stopped."""
- if self._timer_manager_addon is not None:
- self._timer_manager_addon.timer_stopped(self.id)
-
- def stop_timer(self):
- """Called from TimersManager to stop timer."""
- self.clockify_api.finish_time_entry()
-
- def _verify_project_exists(self, project_name):
- project_id = self.clockify_api.get_project_id(project_name)
- if not project_id:
- self.log.warning(
- 'Project "{}" was not found in Clockify. Timer won\'t start.'
- ).format(project_name)
-
- if not self.MessageWidgetClass:
- return
-
- msg = (
- 'Project "{}" is not'
- ' in Clockify Workspace "{}".'
- "
Please inform your Project Manager."
- ).format(project_name, str(self.clockify_api.workspace_name))
-
- self.message_widget = self.MessageWidgetClass(
- msg, "Clockify - Info Message"
- )
- self.message_widget.closed.connect(self.on_message_widget_close)
- self.message_widget.show()
- return False
- return project_id
-
- def start_timer(self, input_data):
- """Called from TimersManager to start timer."""
- # If not api key is not entered then skip
- if not self.clockify_api.get_api_key():
- return
-
- project_name = input_data.get("project_name")
- folder_path = input_data.get("folder_path")
- task_name = input_data.get("task_name")
- task_type = input_data.get("task_type")
- if not all((project_name, folder_path, task_name, task_type)):
- return
-
- # Concatenate hierarchy and task to get description
- description = "/".join([folder_path.lstrip("/"), task_name])
-
- # Check project existence
- project_id = self._verify_project_exists(project_name)
- if not project_id:
- return
-
- # Setup timer tags
- if not task_type:
- self.log.info("No tag information found for the timer")
-
- tag_ids = []
- task_tag_id = self.clockify_api.get_tag_id(task_type)
- if task_tag_id is not None:
- tag_ids.append(task_tag_id)
-
- # Start timer
- self.clockify_api.start_time_entry(
- description,
- project_id,
- tag_ids=tag_ids,
- workspace_id=self.clockify_api.workspace_id,
- user_id=self.clockify_api.user_id,
- )
diff --git a/server_addon/clockify/client/ayon_clockify/clockify_api.py b/server_addon/clockify/client/ayon_clockify/clockify_api.py
deleted file mode 100644
index 38ca6cdb66..0000000000
--- a/server_addon/clockify/client/ayon_clockify/clockify_api.py
+++ /dev/null
@@ -1,447 +0,0 @@
-import os
-import json
-import datetime
-
-import requests
-
-from ayon_core.lib.local_settings import AYONSecureRegistry
-from ayon_core.lib import Logger
-
-from .constants import (
- CLOCKIFY_ENDPOINT,
- ADMIN_PERMISSION_NAMES,
-)
-
-
-class ClockifyAPI:
- log = Logger.get_logger(__name__)
-
- def __init__(self, api_key=None, master_parent=None):
- self.workspace_name = None
- self.master_parent = master_parent
- self.api_key = api_key
- self._workspace_id = None
- self._user_id = None
- self._secure_registry = None
-
- @property
- def secure_registry(self):
- if self._secure_registry is None:
- self._secure_registry = AYONSecureRegistry("clockify")
- return self._secure_registry
-
- @property
- def headers(self):
- return {"x-api-key": self.api_key}
-
- @property
- def workspace_id(self):
- return self._workspace_id
-
- @property
- def user_id(self):
- return self._user_id
-
- def verify_api(self):
- for key, value in self.headers.items():
- if value is None or value.strip() == "":
- return False
- return True
-
- def set_api(self, api_key=None):
- if api_key is None:
- api_key = self.get_api_key()
-
- if api_key is not None and self.validate_api_key(api_key) is True:
- self.api_key = api_key
- self.set_workspace()
- self.set_user_id()
- if self.master_parent:
- self.master_parent.signed_in()
- return True
- return False
-
- def validate_api_key(self, api_key):
- test_headers = {"x-api-key": api_key}
- action_url = "user"
- response = requests.get(
- CLOCKIFY_ENDPOINT + action_url, headers=test_headers
- )
- if response.status_code != 200:
- return False
- return True
-
- def validate_workspace_permissions(self, workspace_id=None, user_id=None):
- if user_id is None:
- self.log.info("No user_id found during validation")
- return False
- if workspace_id is None:
- workspace_id = self.workspace_id
- action_url = f"workspaces/{workspace_id}/users?includeRoles=1"
- response = requests.get(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers
- )
- data = response.json()
- for user in data:
- if user.get("id") == user_id:
- roles_data = user.get("roles")
- for entities in roles_data:
- if entities.get("role") in ADMIN_PERMISSION_NAMES:
- return True
- return False
-
- def get_user_id(self):
- action_url = "user"
- response = requests.get(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers
- )
- result = response.json()
- user_id = result.get("id", None)
-
- return user_id
-
- def set_workspace(self, name=None):
- if name is None:
- name = os.environ.get("CLOCKIFY_WORKSPACE", None)
- self.workspace_name = name
- if self.workspace_name is None:
- return
- try:
- result = self.validate_workspace()
- except Exception:
- result = False
- if result is not False:
- self._workspace_id = result
- if self.master_parent is not None:
- self.master_parent.start_timer_check()
- return True
- return False
-
- def validate_workspace(self, name=None):
- if name is None:
- name = self.workspace_name
- all_workspaces = self.get_workspaces()
- if name in all_workspaces:
- return all_workspaces[name]
- return False
-
- def set_user_id(self):
- try:
- user_id = self.get_user_id()
- except Exception:
- user_id = None
- if user_id is not None:
- self._user_id = user_id
-
- def get_api_key(self):
- return self.secure_registry.get_item("api_key", None)
-
- def save_api_key(self, api_key):
- self.secure_registry.set_item("api_key", api_key)
-
- def get_workspaces(self):
- action_url = "workspaces/"
- response = requests.get(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers
- )
- return {
- workspace["name"]: workspace["id"] for workspace in response.json()
- }
-
- def get_projects(self, workspace_id=None):
- if workspace_id is None:
- workspace_id = self.workspace_id
- action_url = f"workspaces/{workspace_id}/projects"
- response = requests.get(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers
- )
- if response.status_code != 403:
- result = response.json()
- return {project["name"]: project["id"] for project in result}
-
- def get_project_by_id(self, project_id, workspace_id=None):
- if workspace_id is None:
- workspace_id = self.workspace_id
- action_url = "workspaces/{}/projects/{}".format(
- workspace_id, project_id
- )
- response = requests.get(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers
- )
-
- return response.json()
-
- def get_tags(self, workspace_id=None):
- if workspace_id is None:
- workspace_id = self.workspace_id
- action_url = "workspaces/{}/tags".format(workspace_id)
- response = requests.get(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers
- )
-
- return {tag["name"]: tag["id"] for tag in response.json()}
-
- def get_tasks(self, project_id, workspace_id=None):
- if workspace_id is None:
- workspace_id = self.workspace_id
- action_url = "workspaces/{}/projects/{}/tasks".format(
- workspace_id, project_id
- )
- response = requests.get(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers
- )
-
- return {task["name"]: task["id"] for task in response.json()}
-
- def get_workspace_id(self, workspace_name):
- all_workspaces = self.get_workspaces()
- if workspace_name not in all_workspaces:
- return None
- return all_workspaces[workspace_name]
-
- def get_project_id(self, project_name, workspace_id=None):
- if workspace_id is None:
- workspace_id = self.workspace_id
- all_projects = self.get_projects(workspace_id)
- if project_name not in all_projects:
- return None
- return all_projects[project_name]
-
- def get_tag_id(self, tag_name, workspace_id=None):
- if workspace_id is None:
- workspace_id = self.workspace_id
- all_tasks = self.get_tags(workspace_id)
- if tag_name not in all_tasks:
- return None
- return all_tasks[tag_name]
-
- def get_task_id(self, task_name, project_id, workspace_id=None):
- if workspace_id is None:
- workspace_id = self.workspace_id
- all_tasks = self.get_tasks(project_id, workspace_id)
- if task_name not in all_tasks:
- return None
- return all_tasks[task_name]
-
- def get_current_time(self):
- return str(datetime.datetime.utcnow().isoformat()) + "Z"
-
- def start_time_entry(
- self,
- description,
- project_id,
- task_id=None,
- tag_ids=None,
- workspace_id=None,
- user_id=None,
- billable=True,
- ):
- # Workspace
- if workspace_id is None:
- workspace_id = self.workspace_id
- # User ID
- if user_id is None:
- user_id = self._user_id
-
- # get running timer to check if we need to start it
- current_timer = self.get_in_progress()
-
- # Check if is currently run another times and has same values
- # DO not restart the timer, if it is already running for current task
- if current_timer:
- current_timer_hierarchy = current_timer.get("description")
- current_project_id = current_timer.get("projectId")
- current_task_id = current_timer.get("taskId")
- if (
- description == current_timer_hierarchy
- and project_id == current_project_id
- and task_id == current_task_id
- ):
- self.log.info(
- "Timer for the current project is already running"
- )
- self.bool_timer_run = True
- return self.bool_timer_run
- self.finish_time_entry()
-
- # Convert billable to strings
- if billable:
- billable = "true"
- else:
- billable = "false"
- # Rest API Action
- action_url = "workspaces/{}/user/{}/time-entries".format(
- workspace_id, user_id
- )
- start = self.get_current_time()
- body = {
- "start": start,
- "billable": billable,
- "description": description,
- "projectId": project_id,
- "taskId": task_id,
- "tagIds": tag_ids,
- }
- response = requests.post(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
- )
- if response.status_code < 300:
- return True
- return False
-
- def _get_current_timer_values(self, response):
- if response is None:
- return
- try:
- output = response.json()
- except json.decoder.JSONDecodeError:
- return None
- if output and isinstance(output, list):
- return output[0]
- return None
-
- def get_in_progress(self, user_id=None, workspace_id=None):
- if workspace_id is None:
- workspace_id = self.workspace_id
- if user_id is None:
- user_id = self.user_id
-
- action_url = (
- f"workspaces/{workspace_id}/user/"
- f"{user_id}/time-entries?in-progress=1"
- )
- response = requests.get(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers
- )
- return self._get_current_timer_values(response)
-
- def finish_time_entry(self, workspace_id=None, user_id=None):
- if workspace_id is None:
- workspace_id = self.workspace_id
- if user_id is None:
- user_id = self.user_id
- current_timer = self.get_in_progress()
- if not current_timer:
- return
- action_url = "workspaces/{}/user/{}/time-entries".format(
- workspace_id, user_id
- )
- body = {"end": self.get_current_time()}
- response = requests.patch(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
- )
- return response.json()
-
- def get_time_entries(self, workspace_id=None, user_id=None, quantity=10):
- if workspace_id is None:
- workspace_id = self.workspace_id
- if user_id is None:
- user_id = self.user_id
- action_url = "workspaces/{}/user/{}/time-entries".format(
- workspace_id, user_id
- )
- response = requests.get(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers
- )
- return response.json()[:quantity]
-
- def remove_time_entry(self, tid, workspace_id=None, user_id=None):
- if workspace_id is None:
- workspace_id = self.workspace_id
- action_url = "workspaces/{}/user/{}/time-entries/{}".format(
- workspace_id, user_id, tid
- )
- response = requests.delete(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers
- )
- return response.json()
-
- def add_project(self, name, workspace_id=None):
- if workspace_id is None:
- workspace_id = self.workspace_id
- action_url = "workspaces/{}/projects".format(workspace_id)
- body = {
- "name": name,
- "clientId": "",
- "isPublic": "false",
- "estimate": {"estimate": 0, "type": "AUTO"},
- "color": "#f44336",
- "billable": "true",
- }
- response = requests.post(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
- )
- return response.json()
-
- def add_workspace(self, name):
- action_url = "workspaces/"
- body = {"name": name}
- response = requests.post(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
- )
- return response.json()
-
- def add_task(self, name, project_id, workspace_id=None):
- if workspace_id is None:
- workspace_id = self.workspace_id
- action_url = "workspaces/{}/projects/{}/tasks".format(
- workspace_id, project_id
- )
- body = {"name": name, "projectId": project_id}
- response = requests.post(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
- )
- return response.json()
-
- def add_tag(self, name, workspace_id=None):
- if workspace_id is None:
- workspace_id = self.workspace_id
- action_url = "workspaces/{}/tags".format(workspace_id)
- body = {"name": name}
- response = requests.post(
- CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
- )
- return response.json()
-
- def delete_project(self, project_id, workspace_id=None):
- if workspace_id is None:
- workspace_id = self.workspace_id
- action_url = "/workspaces/{}/projects/{}".format(
- workspace_id, project_id
- )
- response = requests.delete(
- CLOCKIFY_ENDPOINT + action_url,
- headers=self.headers,
- )
- return response.json()
-
- def convert_input(
- self, entity_id, entity_name, mode="Workspace", project_id=None
- ):
- if entity_id is None:
- error = False
- error_msg = 'Missing information "{}"'
- if mode.lower() == "workspace":
- if entity_id is None and entity_name is None:
- if self.workspace_id is not None:
- entity_id = self.workspace_id
- else:
- error = True
- else:
- entity_id = self.get_workspace_id(entity_name)
- else:
- if entity_id is None and entity_name is None:
- error = True
- elif mode.lower() == "project":
- entity_id = self.get_project_id(entity_name)
- elif mode.lower() == "task":
- entity_id = self.get_task_id(
- task_name=entity_name, project_id=project_id
- )
- else:
- raise TypeError("Unknown type")
- # Raise error
- if error:
- raise ValueError(error_msg.format(mode))
-
- return entity_id
diff --git a/server_addon/clockify/client/ayon_clockify/constants.py b/server_addon/clockify/client/ayon_clockify/constants.py
deleted file mode 100644
index 4574f91be1..0000000000
--- a/server_addon/clockify/client/ayon_clockify/constants.py
+++ /dev/null
@@ -1,12 +0,0 @@
-import os
-
-
-CLOCKIFY_FTRACK_SERVER_PATH = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), "ftrack", "server"
-)
-CLOCKIFY_FTRACK_USER_PATH = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), "ftrack", "user"
-)
-
-ADMIN_PERMISSION_NAMES = ["WORKSPACE_OWN", "WORKSPACE_ADMIN"]
-CLOCKIFY_ENDPOINT = "https://api.clockify.me/api/v1/"
diff --git a/server_addon/clockify/client/ayon_clockify/ftrack/server/action_clockify_sync_server.py b/server_addon/clockify/client/ayon_clockify/ftrack/server/action_clockify_sync_server.py
deleted file mode 100644
index ed83fed287..0000000000
--- a/server_addon/clockify/client/ayon_clockify/ftrack/server/action_clockify_sync_server.py
+++ /dev/null
@@ -1,146 +0,0 @@
-import os
-import json
-
-from ayon_clockify.clockify_api import ClockifyAPI
-
-from ayon_ftrack.lib import ServerAction
-
-
-class SyncClockifyServer(ServerAction):
- '''Synchronise project names and task types.'''
-
- identifier = "clockify.sync.server"
- label = "Sync To Clockify (server)"
- description = "Synchronise data to Clockify workspace"
-
- role_list = ["Administrator", "project Manager"]
-
- def __init__(self, *args, **kwargs):
- super(SyncClockifyServer, self).__init__(*args, **kwargs)
-
- workspace_name = os.environ.get("CLOCKIFY_WORKSPACE")
- api_key = os.environ.get("CLOCKIFY_API_KEY")
- self.clockify_api = ClockifyAPI(api_key)
- self.clockify_api.set_workspace(workspace_name)
- if api_key is None:
- modified_key = "None"
- else:
- str_len = int(len(api_key) / 2)
- start_replace = int(len(api_key) / 4)
- modified_key = ""
- for idx in range(len(api_key)):
- if idx >= start_replace and idx < start_replace + str_len:
- replacement = "X"
- else:
- replacement = api_key[idx]
- modified_key += replacement
-
- self.log.info(
- "Clockify info. Workspace: \"{}\" API key: \"{}\"".format(
- str(workspace_name), str(modified_key)
- )
- )
-
- def discover(self, session, entities, event):
- if (
- len(entities) != 1
- or entities[0].entity_type.lower() != "project"
- ):
- return False
- return True
-
- def launch(self, session, entities, event):
- self.clockify_api.set_api()
- if self.clockify_api.workspace_id is None:
- return {
- "success": False,
- "message": "Clockify Workspace or API key are not set!"
- }
-
- if not self.clockify_api.validate_workspace_permissions(
- self.clockify_api.workspace_id, self.clockify_api.user_id
- ):
- return {
- "success": False,
- "message": "Missing permissions for this action!"
- }
-
- # JOB SETTINGS
- user_id = event["source"]["user"]["id"]
- user = session.query("User where id is " + user_id).one()
-
- job = session.create("Job", {
- "user": user,
- "status": "running",
- "data": json.dumps({"description": "Sync Ftrack to Clockify"})
- })
- session.commit()
-
- project_entity = entities[0]
- if project_entity.entity_type.lower() != "project":
- project_entity = self.get_project_from_entity(project_entity)
-
- project_name = project_entity["full_name"]
- self.log.info(
- "Synchronization of project \"{}\" to clockify begins.".format(
- project_name
- )
- )
- task_types = (
- project_entity["project_schema"]["_task_type_schema"]["types"]
- )
- task_type_names = [
- task_type["name"] for task_type in task_types
- ]
- try:
- clockify_projects = self.clockify_api.get_projects()
- if project_name not in clockify_projects:
- response = self.clockify_api.add_project(project_name)
- if "id" not in response:
- self.log.warning(
- "Project \"{}\" can't be created. Response: {}".format(
- project_name, response
- )
- )
- return {
- "success": False,
- "message": (
- "Can't create clockify project \"{}\"."
- " Unexpected error."
- ).format(project_name)
- }
-
- clockify_workspace_tags = self.clockify_api.get_tags()
- for task_type_name in task_type_names:
- if task_type_name in clockify_workspace_tags:
- self.log.debug(
- "Task \"{}\" already exist".format(task_type_name)
- )
- continue
-
- response = self.clockify_api.add_tag(task_type_name)
- if "id" not in response:
- self.log.warning(
- "Task \"{}\" can't be created. Response: {}".format(
- task_type_name, response
- )
- )
-
- job["status"] = "done"
-
- except Exception:
- self.log.warning(
- "Synchronization to clockify failed.",
- exc_info=True
- )
-
- finally:
- if job["status"] != "done":
- job["status"] = "failed"
- session.commit()
-
- return True
-
-
-def register(session, **kw):
- SyncClockifyServer(session).register()
diff --git a/server_addon/clockify/client/ayon_clockify/ftrack/user/action_clockify_sync_local.py b/server_addon/clockify/client/ayon_clockify/ftrack/user/action_clockify_sync_local.py
deleted file mode 100644
index 05a94e56fd..0000000000
--- a/server_addon/clockify/client/ayon_clockify/ftrack/user/action_clockify_sync_local.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import json
-from ayon_clockify.clockify_api import ClockifyAPI
-from ayon_ftrack.lib import BaseAction, statics_icon
-
-
-class SyncClockifyLocal(BaseAction):
- """Synchronise project names and task types."""
-
- identifier = "clockify.sync.local"
- label = "Sync To Clockify"
- description = "Synchronise data to Clockify workspace"
- role_list = ["Administrator", "project Manager"]
- icon = statics_icon("app_icons", "clockify-white.png")
-
- def __init__(self, *args, **kwargs):
- super(SyncClockifyLocal, self).__init__(*args, **kwargs)
-
- self.clockify_api = ClockifyAPI()
-
- def discover(self, session, entities, event):
- if (
- len(entities) == 1
- and entities[0].entity_type.lower() == "project"
- ):
- return True
- return False
-
- def launch(self, session, entities, event):
- self.clockify_api.set_api()
- if self.clockify_api.workspace_id is None:
- return {
- "success": False,
- "message": "Clockify Workspace or API key are not set!"
- }
-
- if (
- self.clockify_api.validate_workspace_permissions(
- self.clockify_api.workspace_id, self.clockify_api.user_id)
- is False
- ):
- return {
- "success": False,
- "message": "Missing permissions for this action!"
- }
-
- # JOB SETTINGS
- userId = event['source']['user']['id']
- user = session.query('User where id is ' + userId).one()
-
- job = session.create('Job', {
- 'user': user,
- 'status': 'running',
- 'data': json.dumps({
- 'description': 'Sync ftrack to Clockify'
- })
- })
- session.commit()
-
- project_entity = entities[0]
- if project_entity.entity_type.lower() != "project":
- project_entity = self.get_project_from_entity(project_entity)
-
- project_name = project_entity["full_name"]
- self.log.info(
- "Synchronization of project \"{}\" to clockify begins.".format(
- project_name
- )
- )
- task_types = (
- project_entity["project_schema"]["_task_type_schema"]["types"]
- )
- task_type_names = [
- task_type["name"] for task_type in task_types
- ]
- try:
- clockify_projects = self.clockify_api.get_projects()
- if project_name not in clockify_projects:
- response = self.clockify_api.add_project(project_name)
- if "id" not in response:
- self.log.warning(
- "Project \"{}\" can't be created. Response: {}".format(
- project_name, response
- )
- )
- return {
- "success": False,
- "message": (
- "Can't create clockify project \"{}\"."
- " Unexpected error."
- ).format(project_name)
- }
-
- clockify_workspace_tags = self.clockify_api.get_tags()
- for task_type_name in task_type_names:
- if task_type_name in clockify_workspace_tags:
- self.log.debug(
- "Task \"{}\" already exist".format(task_type_name)
- )
- continue
-
- response = self.clockify_api.add_tag(task_type_name)
- if "id" not in response:
- self.log.warning(
- "Task \"{}\" can't be created. Response: {}".format(
- task_type_name, response
- )
- )
-
- job["status"] = "done"
-
- except Exception:
- pass
-
- finally:
- if job["status"] != "done":
- job["status"] = "failed"
- session.commit()
-
- return True
-
-
-def register(session, **kw):
- SyncClockifyLocal(session).register()
diff --git a/server_addon/clockify/client/ayon_clockify/launcher_actions/ClockifyStart.py b/server_addon/clockify/client/ayon_clockify/launcher_actions/ClockifyStart.py
deleted file mode 100644
index d69d0371c0..0000000000
--- a/server_addon/clockify/client/ayon_clockify/launcher_actions/ClockifyStart.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import ayon_api
-
-from ayon_clockify.clockify_api import ClockifyAPI
-
-from ayon_core.pipeline import LauncherAction
-
-
-class ClockifyStart(LauncherAction):
- name = "clockify_start_timer"
- label = "Clockify - Start Timer"
- icon = "app_icons/clockify.png"
- order = 500
- clockify_api = ClockifyAPI()
-
- def is_compatible(self, selection):
- """Return whether the action is compatible with the session"""
- return selection.is_task_selected
-
- def process(self, selection, **kwargs):
- self.clockify_api.set_api()
- user_id = self.clockify_api.user_id
- workspace_id = self.clockify_api.workspace_id
- project_name = selection.project_name
- folder_path = selection.folder_path
- task_name = selection.task_name
- description = "/".join([folder_path.lstrip("/"), task_name])
-
- # fetch folder entity
- folder_entity = ayon_api.get_folder_by_path(project_name, folder_path)
- task_entity = ayon_api.get_task_by_name(
- project_name, folder_entity["id"], task_name
- )
-
- # get task type to fill the timer tag
- task_type = task_entity["taskType"]
-
- project_id = self.clockify_api.get_project_id(
- project_name, workspace_id
- )
- tag_ids = []
- tag_name = task_type
- tag_ids.append(self.clockify_api.get_tag_id(tag_name, workspace_id))
- self.clockify_api.start_time_entry(
- description,
- project_id,
- tag_ids=tag_ids,
- workspace_id=workspace_id,
- user_id=user_id,
- )
diff --git a/server_addon/clockify/client/ayon_clockify/launcher_actions/ClockifySync.py b/server_addon/clockify/client/ayon_clockify/launcher_actions/ClockifySync.py
deleted file mode 100644
index a32f2a8082..0000000000
--- a/server_addon/clockify/client/ayon_clockify/launcher_actions/ClockifySync.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import ayon_api
-
-from ayon_clockify.clockify_api import ClockifyAPI
-from ayon_core.pipeline import LauncherAction
-
-
-class ClockifyPermissionsCheckFailed(Exception):
- """Timer start failed due to user permissions check.
- Message should be self explanatory as traceback won't be shown.
- """
-
- pass
-
-
-class ClockifySync(LauncherAction):
- name = "sync_to_clockify"
- label = "Sync to Clockify"
- icon = "app_icons/clockify-white.png"
- order = 500
- clockify_api = ClockifyAPI()
-
- def is_compatible(self, selection):
- """Check if there's some projects to sync"""
- if selection.is_project_selected:
- return True
-
- try:
- next(ayon_api.get_projects())
- return True
- except StopIteration:
- return False
-
- def process(self, selection, **kwargs):
- self.clockify_api.set_api()
- workspace_id = self.clockify_api.workspace_id
- user_id = self.clockify_api.user_id
- if not self.clockify_api.validate_workspace_permissions(
- workspace_id, user_id
- ):
- raise ClockifyPermissionsCheckFailed(
- "Current CLockify user is missing permissions for this action!"
- )
-
- if selection.is_project_selected:
- projects_to_sync = [selection.project_entity]
- else:
- projects_to_sync = ayon_api.get_projects()
-
- projects_info = {
- project["name"]: {
- task_type["name"]
- for task_type in project["taskTypes"]
- }
- for project in projects_to_sync
- }
-
- clockify_projects = self.clockify_api.get_projects(workspace_id)
- for project_name, task_types in projects_info.items():
- if project_name in clockify_projects:
- continue
-
- response = self.clockify_api.add_project(
- project_name, workspace_id
- )
- if "id" not in response:
- self.log.error(
- "Project {} can't be created".format(project_name)
- )
- continue
-
- clockify_workspace_tags = self.clockify_api.get_tags(workspace_id)
- for task_type in task_types:
- if task_type not in clockify_workspace_tags:
- response = self.clockify_api.add_tag(
- task_type, workspace_id
- )
- if "id" not in response:
- self.log.error(
- "Task {} can't be created".format(task_type)
- )
- continue
diff --git a/server_addon/clockify/client/ayon_clockify/version.py b/server_addon/clockify/client/ayon_clockify/version.py
deleted file mode 100644
index 36bfd79364..0000000000
--- a/server_addon/clockify/client/ayon_clockify/version.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Package declaring AYON addon 'clockify' version."""
-__version__ = "0.2.1"
diff --git a/server_addon/clockify/client/ayon_clockify/widgets.py b/server_addon/clockify/client/ayon_clockify/widgets.py
deleted file mode 100644
index e64b64601d..0000000000
--- a/server_addon/clockify/client/ayon_clockify/widgets.py
+++ /dev/null
@@ -1,207 +0,0 @@
-from qtpy import QtCore, QtGui, QtWidgets
-from ayon_core import resources, style
-
-
-class MessageWidget(QtWidgets.QWidget):
-
- SIZE_W = 300
- SIZE_H = 130
-
- closed = QtCore.Signal()
-
- def __init__(self, messages, title):
- super(MessageWidget, self).__init__()
-
- # Icon
- icon = QtGui.QIcon(resources.get_ayon_icon_filepath())
- self.setWindowIcon(icon)
-
- self.setWindowFlags(
- QtCore.Qt.WindowCloseButtonHint |
- QtCore.Qt.WindowMinimizeButtonHint
- )
-
- # Size setting
- self.resize(self.SIZE_W, self.SIZE_H)
- self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H))
- self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100))
-
- # Style
- self.setStyleSheet(style.load_stylesheet())
-
- self.setLayout(self._ui_layout(messages))
- self.setWindowTitle(title)
-
- def _ui_layout(self, messages):
- if not messages:
- messages = ["*Missing messages (This is a bug)*", ]
-
- elif not isinstance(messages, (tuple, list)):
- messages = [messages, ]
-
- main_layout = QtWidgets.QVBoxLayout(self)
-
- labels = []
- for message in messages:
- label = QtWidgets.QLabel(message)
- label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
- label.setTextFormat(QtCore.Qt.RichText)
- label.setWordWrap(True)
-
- labels.append(label)
- main_layout.addWidget(label)
-
- btn_close = QtWidgets.QPushButton("Close")
- btn_close.setToolTip('Close this window')
- btn_close.clicked.connect(self.on_close_clicked)
-
- btn_group = QtWidgets.QHBoxLayout()
- btn_group.addStretch(1)
- btn_group.addWidget(btn_close)
-
- main_layout.addLayout(btn_group)
-
- self.labels = labels
- self.btn_group = btn_group
- self.btn_close = btn_close
- self.main_layout = main_layout
-
- return main_layout
-
- def on_close_clicked(self):
- self.close()
-
- def close(self, *args, **kwargs):
- self.closed.emit()
- super(MessageWidget, self).close(*args, **kwargs)
-
-
-class ClockifySettings(QtWidgets.QWidget):
- SIZE_W = 500
- SIZE_H = 130
-
- loginSignal = QtCore.Signal(object, object, object)
-
- def __init__(self, clockify_api, optional=True):
- super(ClockifySettings, self).__init__()
-
- self.clockify_api = clockify_api
- self.optional = optional
- self.validated = False
-
- # Icon
- icon = QtGui.QIcon(resources.get_ayon_icon_filepath())
- self.setWindowIcon(icon)
-
- self.setWindowTitle("Clockify settings")
- self.setWindowFlags(
- QtCore.Qt.WindowCloseButtonHint |
- QtCore.Qt.WindowMinimizeButtonHint
- )
-
- # Size setting
- self.resize(self.SIZE_W, self.SIZE_H)
- self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H))
- self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100))
- self.setStyleSheet(style.load_stylesheet())
-
- self._ui_init()
-
- def _ui_init(self):
- label_api_key = QtWidgets.QLabel("Clockify API key:")
-
- input_api_key = QtWidgets.QLineEdit()
- input_api_key.setFrame(True)
- input_api_key.setPlaceholderText("e.g. XX1XxXX2x3x4xXxx")
-
- error_label = QtWidgets.QLabel("")
- error_label.setTextFormat(QtCore.Qt.RichText)
- error_label.setWordWrap(True)
- error_label.hide()
-
- form_layout = QtWidgets.QFormLayout()
- form_layout.setContentsMargins(10, 15, 10, 5)
- form_layout.addRow(label_api_key, input_api_key)
- form_layout.addRow(error_label)
-
- btn_ok = QtWidgets.QPushButton("Ok")
- btn_ok.setToolTip('Sets Clockify API Key so can Start/Stop timer')
-
- btn_cancel = QtWidgets.QPushButton("Cancel")
- cancel_tooltip = 'Application won\'t start'
- if self.optional:
- cancel_tooltip = 'Close this window'
- btn_cancel.setToolTip(cancel_tooltip)
-
- btn_group = QtWidgets.QHBoxLayout()
- btn_group.addStretch(1)
- btn_group.addWidget(btn_ok)
- btn_group.addWidget(btn_cancel)
-
- main_layout = QtWidgets.QVBoxLayout(self)
- main_layout.addLayout(form_layout)
- main_layout.addLayout(btn_group)
-
- btn_ok.clicked.connect(self.click_ok)
- btn_cancel.clicked.connect(self._close_widget)
-
- self.label_api_key = label_api_key
- self.input_api_key = input_api_key
- self.error_label = error_label
-
- self.btn_ok = btn_ok
- self.btn_cancel = btn_cancel
-
- def setError(self, msg):
- self.error_label.setText(msg)
- self.error_label.show()
-
- def invalid_input(self, entity):
- entity.setStyleSheet("border: 1px solid red;")
-
- def click_ok(self):
- api_key = self.input_api_key.text().strip()
- if self.optional is True and api_key == '':
- self.clockify_api.save_api_key(None)
- self.clockify_api.set_api(api_key)
- self.validated = False
- self._close_widget()
- return
-
- validation = self.clockify_api.validate_api_key(api_key)
-
- if validation:
- self.clockify_api.save_api_key(api_key)
- self.clockify_api.set_api(api_key)
- self.validated = True
- self._close_widget()
- else:
- self.invalid_input(self.input_api_key)
- self.validated = False
- self.setError(
- "Entered invalid API key"
- )
-
- def showEvent(self, event):
- super(ClockifySettings, self).showEvent(event)
-
- # Make btns same width
- max_width = max(
- self.btn_ok.sizeHint().width(),
- self.btn_cancel.sizeHint().width()
- )
- self.btn_ok.setMinimumWidth(max_width)
- self.btn_cancel.setMinimumWidth(max_width)
-
- def closeEvent(self, event):
- if self.optional is True:
- event.ignore()
- self._close_widget()
- else:
- self.validated = False
-
- def _close_widget(self):
- if self.optional is True:
- self.hide()
- else:
- self.close()
diff --git a/server_addon/clockify/package.py b/server_addon/clockify/package.py
deleted file mode 100644
index 3245e61ca1..0000000000
--- a/server_addon/clockify/package.py
+++ /dev/null
@@ -1,9 +0,0 @@
-name = "clockify"
-title = "Clockify"
-version = "0.2.1"
-client_dir = "ayon_clockify"
-
-ayon_required_addons = {
- "core": ">0.3.2",
-}
-ayon_compatible_addons = {}
diff --git a/server_addon/clockify/server/__init__.py b/server_addon/clockify/server/__init__.py
deleted file mode 100644
index 11bbfed261..0000000000
--- a/server_addon/clockify/server/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from typing import Type
-
-from ayon_server.addons import BaseServerAddon
-
-from .settings import ClockifySettings
-
-
-class ClockifyAddon(BaseServerAddon):
- settings_model: Type[ClockifySettings] = ClockifySettings
diff --git a/server_addon/clockify/server/settings.py b/server_addon/clockify/server/settings.py
deleted file mode 100644
index c01d4c1545..0000000000
--- a/server_addon/clockify/server/settings.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from ayon_server.settings import BaseSettingsModel, SettingsField
-
-
-class ClockifySettings(BaseSettingsModel):
- workspace_name: str = SettingsField(
- "",
- title="Workspace name",
- scope=["studio"]
- )
diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_active_view_thumbnail.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_active_view_thumbnail.py
index c4d51c0808..e85df4ee81 100644
--- a/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_active_view_thumbnail.py
+++ b/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_active_view_thumbnail.py
@@ -1,10 +1,13 @@
import tempfile
import pyblish.api
+
+from ayon_core.pipeline import OptionalPyblishPluginMixin
from ayon_houdini.api import lib, plugin
from ayon_houdini.api.pipeline import IS_HEADLESS
-class ExtractActiveViewThumbnail(plugin.HoudiniExtractorPlugin):
+class ExtractActiveViewThumbnail(plugin.HoudiniExtractorPlugin,
+ OptionalPyblishPluginMixin):
"""Set instance thumbnail to a screengrab of current active viewport.
This makes it so that if an instance does not have a thumbnail set yet that
@@ -17,6 +20,9 @@ class ExtractActiveViewThumbnail(plugin.HoudiniExtractorPlugin):
families = ["workfile"]
def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
if IS_HEADLESS:
self.log.debug(
"Skip extraction of active view thumbnail, due to being in"
diff --git a/server_addon/houdini/server/settings/publish.py b/server_addon/houdini/server/settings/publish.py
index 4f324f0726..2b88f96922 100644
--- a/server_addon/houdini/server/settings/publish.py
+++ b/server_addon/houdini/server/settings/publish.py
@@ -58,7 +58,7 @@ class ValidateWorkfilePathsModel(BaseSettingsModel):
)
-class BasicValidateModel(BaseSettingsModel):
+class BasicEnabledStatesModel(BaseSettingsModel):
enabled: bool = SettingsField(title="Enabled")
optional: bool = SettingsField(title="Optional")
active: bool = SettingsField(title="Active")
@@ -78,25 +78,30 @@ class PublishPluginsModel(BaseSettingsModel):
default_factory=CollectLocalRenderInstancesModel,
title="Collect Local Render Instances"
)
- ValidateInstanceInContextHoudini: BasicValidateModel = SettingsField(
- default_factory=BasicValidateModel,
+ ValidateInstanceInContextHoudini: BasicEnabledStatesModel = SettingsField(
+ default_factory=BasicEnabledStatesModel,
title="Validate Instance is in same Context",
section="Validators")
- ValidateMeshIsStatic: BasicValidateModel = SettingsField(
- default_factory=BasicValidateModel,
+ ValidateMeshIsStatic: BasicEnabledStatesModel = SettingsField(
+ default_factory=BasicEnabledStatesModel,
title="Validate Mesh is Static")
- ValidateReviewColorspace: BasicValidateModel = SettingsField(
- default_factory=BasicValidateModel,
+ ValidateReviewColorspace: BasicEnabledStatesModel = SettingsField(
+ default_factory=BasicEnabledStatesModel,
title="Validate Review Colorspace")
- ValidateSubsetName: BasicValidateModel = SettingsField(
- default_factory=BasicValidateModel,
+ ValidateSubsetName: BasicEnabledStatesModel = SettingsField(
+ default_factory=BasicEnabledStatesModel,
title="Validate Subset Name")
- ValidateUnrealStaticMeshName: BasicValidateModel = SettingsField(
- default_factory=BasicValidateModel,
+ ValidateUnrealStaticMeshName: BasicEnabledStatesModel = SettingsField(
+ default_factory=BasicEnabledStatesModel,
title="Validate Unreal Static Mesh Name")
ValidateWorkfilePaths: ValidateWorkfilePathsModel = SettingsField(
default_factory=ValidateWorkfilePathsModel,
title="Validate workfile paths settings")
+ ExtractActiveViewThumbnail: BasicEnabledStatesModel = SettingsField(
+ default_factory=BasicEnabledStatesModel,
+ title="Extract Active View Thumbnail",
+ section="Extractors"
+ )
DEFAULT_HOUDINI_PUBLISH_SETTINGS = {
@@ -153,5 +158,10 @@ DEFAULT_HOUDINI_PUBLISH_SETTINGS = {
"$HIP",
"$JOB"
]
+ },
+ "ExtractActiveViewThumbnail": {
+ "enabled": True,
+ "optional": False,
+ "active": True
}
}
diff --git a/server_addon/nuke/client/ayon_nuke/api/lib.py b/server_addon/nuke/client/ayon_nuke/api/lib.py
index 905521255f..6caaed3801 100644
--- a/server_addon/nuke/client/ayon_nuke/api/lib.py
+++ b/server_addon/nuke/client/ayon_nuke/api/lib.py
@@ -561,7 +561,7 @@ def read_avalon_data(node):
node (nuke.Node): Nuke node object
Returns:
- list: A list of nuke.Knob object
+ Dict[str, nuke.Knob]: A dictionary of knob name to nuke.Knob objects
"""
def compat_prefixed(knob_name):
@@ -613,7 +613,7 @@ def get_node_path(path, padding=4):
path (str): The path to render to.
Returns:
- tuple: head, padding, tail (extension)
+ Tuple[str, int, str]: head, padding, tail (extension)
Examples:
>>> get_frame_path("test.exr")
@@ -655,8 +655,7 @@ def get_nuke_imageio_settings():
def get_imageio_node_setting(node_class, plugin_name, product_name):
- ''' Get preset data for dataflow (fileType, compression, bitDepth)
- '''
+ """Get preset data for dataflow (fileType, compression, bitDepth)"""
imageio_nodes = get_nuke_imageio_settings()["nodes"]
required_nodes = imageio_nodes["required_nodes"]
@@ -686,8 +685,8 @@ def get_imageio_node_setting(node_class, plugin_name, product_name):
def get_imageio_node_override_setting(
node_class, plugin_name, product_name, knobs_settings
):
- ''' Get imageio node overrides from settings
- '''
+ """ Get imageio node overrides from settings
+ """
imageio_nodes = get_nuke_imageio_settings()["nodes"]
override_nodes = imageio_nodes["override_nodes"]
@@ -745,8 +744,7 @@ def get_imageio_node_override_setting(
def get_imageio_input_colorspace(filename):
- ''' Get input file colorspace based on regex in settings.
- '''
+ """Get input file colorspace based on regex in settings."""
imageio_regex_inputs = (
get_nuke_imageio_settings()["regex_inputs"]["inputs"])
@@ -791,8 +789,7 @@ def get_view_process_node():
def on_script_load():
- ''' Callback for ffmpeg support
- '''
+ """Callback for ffmpeg support"""
if nuke.env["LINUX"]:
nuke.tcl('load ffmpegReader')
nuke.tcl('load ffmpegWriter')
@@ -815,7 +812,7 @@ def check_inventory_versions():
# get all Loader nodes by avalon attribute metadata
node_with_repre_id = []
repre_ids = set()
- # Find all containers and collect it's node and representation ids
+ # Find all containers and collect its node and representation ids
for node in nuke.allNodes():
container = parse_container(node)
@@ -896,8 +893,7 @@ def check_inventory_versions():
def writes_version_sync():
- ''' Callback synchronizing version of publishable write nodes
- '''
+ """Callback synchronizing version of publishable write nodes"""
try:
rootVersion = get_version_from_path(nuke.root().name())
padding = len(rootVersion)
@@ -934,8 +930,7 @@ def writes_version_sync():
def version_up_script():
- ''' Raising working script's version
- '''
+ """Raising working script's version"""
import nukescripts
nukescripts.script_and_write_nodes_version_up()
@@ -957,14 +952,14 @@ def check_product_name_exists(nodes, product_name):
def format_anatomy(data):
- ''' Helping function for formatting of anatomy paths
+ """Helping function for formatting of anatomy paths
Arguments:
data (dict): dictionary with attributes used for formatting
Return:
- path (str)
- '''
+ str: Formatted path.
+ """
project_name = get_current_project_name()
anatomy = Anatomy(project_name)
@@ -996,9 +991,8 @@ def format_anatomy(data):
return anatomy.format(data)
-def script_name():
- ''' Returns nuke script path
- '''
+def script_name() -> str:
+ """Returns nuke script path"""
return nuke.root().knob("name").value()
@@ -1100,7 +1094,7 @@ def create_write_node(
linked_knobs=None,
**kwargs
):
- ''' Creating write node which is group node
+ """Creating write node which is group node
Arguments:
name (str): name of node
@@ -1134,8 +1128,8 @@ def create_write_node(
Return:
- node (obj): group node with avalon data as Knobs
- '''
+ node (nuke.Node): group node with avalon data as Knobs
+ """
# Ensure name does not contain any invalid characters.
special_chars = re.escape("!@#$%^&*()=[]{}|\\;',.<>/?~+-")
special_chars_regex = re.compile(f"[{special_chars}]")
@@ -1300,7 +1294,7 @@ def create_write_node(
def set_node_knobs_from_settings(node, knob_settings, **kwargs):
- """ Overriding knob values from settings
+ """Overriding knob values from settings
Using `schema_nuke_knob_inputs` for knob type definitions.
@@ -1393,8 +1387,7 @@ def color_gui_to_int(color_gui):
def create_backdrop(label="", color=None, layer=0,
nodes=None):
- """
- Create Backdrop node
+ """Create Backdrop node
Arguments:
color (str): nuke compatible string with color code
@@ -1402,6 +1395,9 @@ def create_backdrop(label="", color=None, layer=0,
label (str): the message
nodes (list): list of nodes to be wrapped into backdrop
+ Returns:
+ nuke.Node: The created backdrop node.
+
"""
assert isinstance(nodes, list), "`nodes` should be a list of nodes"
@@ -1491,12 +1487,12 @@ class WorkfileSettings(object):
return [n for n in self._nodes if filter in n.Class()]
def set_viewers_colorspace(self, imageio_nuke):
- ''' Adds correct colorspace to viewer
+ """Adds correct colorspace to viewer
Arguments:
imageio_nuke (dict): nuke colorspace configurations
- '''
+ """
filter_knobs = [
"viewerProcess",
"wipe_position",
@@ -1560,12 +1556,12 @@ class WorkfileSettings(object):
return StringTemplate(display_view).format_strict(self.formatting_data)
def set_root_colorspace(self, imageio_host):
- ''' Adds correct colorspace to root
+ """Adds correct colorspace to root
Arguments:
imageio_host (dict): host colorspace configurations
- '''
+ """
config_data = get_current_context_imageio_config_preset()
workfile_settings = imageio_host["workfile"]
@@ -1819,9 +1815,8 @@ Reopening Nuke should synchronize these paths and resolve any discrepancies.
return new_path
def set_writes_colorspace(self):
- ''' Adds correct colorspace to write node dict
-
- '''
+ """ Adds correct colorspace to write node dict
+ """
for node in nuke.allNodes(filter="Group", group=self._root_node):
log.info("Setting colorspace to `{}`".format(node.name()))
@@ -1943,8 +1938,8 @@ Reopening Nuke should synchronize these paths and resolve any discrepancies.
knobs["to"]))
def set_colorspace(self):
- ''' Setting colorspace following presets
- '''
+ """ Setting colorspace following presets
+ """
# get imageio
nuke_colorspace = get_nuke_imageio_settings()
@@ -2152,9 +2147,8 @@ Reopening Nuke should synchronize these paths and resolve any discrepancies.
def get_write_node_template_attr(node):
- ''' Gets all defined data from presets
-
- '''
+ """ Gets all defined data from presets
+ """
# TODO: add identifiers to settings and rename settings key
plugin_names_mapping = {
diff --git a/server_addon/photoshop/LICENSE b/server_addon/photoshop/LICENSE
deleted file mode 100644
index d645695673..0000000000
--- a/server_addon/photoshop/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/server_addon/photoshop/README.md b/server_addon/photoshop/README.md
deleted file mode 100644
index 2d1e1c745c..0000000000
--- a/server_addon/photoshop/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-Photoshp Addon
-===============
-
-Integration with Adobe Photoshop.
diff --git a/server_addon/photoshop/client/ayon_photoshop/__init__.py b/server_addon/photoshop/client/ayon_photoshop/__init__.py
deleted file mode 100644
index e72c79c812..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from .version import __version__
-from .addon import (
- PHOTOSHOP_ADDON_ROOT,
- PhotoshopAddon,
- get_launch_script_path,
-)
-
-
-__all__ = (
- "__version__",
-
- "PHOTOSHOP_ADDON_ROOT",
- "PhotoshopAddon",
- "get_launch_script_path",
-)
diff --git a/server_addon/photoshop/client/ayon_photoshop/addon.py b/server_addon/photoshop/client/ayon_photoshop/addon.py
deleted file mode 100644
index d0fe638f15..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/addon.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import os
-from ayon_core.addon import AYONAddon, IHostAddon
-
-from .version import __version__
-
-PHOTOSHOP_ADDON_ROOT = os.path.dirname(os.path.abspath(__file__))
-
-
-class PhotoshopAddon(AYONAddon, IHostAddon):
- name = "photoshop"
- version = __version__
- host_name = "photoshop"
-
- def add_implementation_envs(self, env, _app):
- """Modify environments to contain all required for implementation."""
- defaults = {
- "AYON_LOG_NO_COLORS": "1",
- "WEBSOCKET_URL": "ws://localhost:8099/ws/"
- }
- for key, value in defaults.items():
- if not env.get(key):
- env[key] = value
-
- def get_workfile_extensions(self):
- return [".psd", ".psb"]
-
- def get_launch_hook_paths(self, app):
- if app.host_name != self.host_name:
- return []
- return [
- os.path.join(PHOTOSHOP_ADDON_ROOT, "hooks")
- ]
-
-
-def get_launch_script_path():
- return os.path.join(
- PHOTOSHOP_ADDON_ROOT, "api", "launch_script.py"
- )
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/README.md b/server_addon/photoshop/client/ayon_photoshop/api/README.md
deleted file mode 100644
index ef458dea16..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/README.md
+++ /dev/null
@@ -1,257 +0,0 @@
-# Photoshop Integration
-
-## Setup
-
-The Photoshop integration requires two components to work; `extension` and `server`.
-
-### Extension
-
-To install the extension download [Extension Manager Command Line tool (ExManCmd)](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#option-2---exmancmd).
-
-```
-ExManCmd /install {path to addon}/api/extension.zxp
-```
-
-### Server
-
-The easiest way to get the server and Photoshop launch is with:
-
-```
-python -c ^"import ayon_photoshop;ayon_photoshop.launch(""C:\Program Files\Adobe\Adobe Photoshop 2020\Photoshop.exe"")^"
-```
-
-`avalon.photoshop.launch` launches the application and server, and also closes the server when Photoshop exists.
-
-## Usage
-
-The Photoshop extension can be found under `Window > Extensions > Ayon`. Once launched you should be presented with a panel like this:
-
-
-
-
-## Developing
-
-### Extension
-When developing the extension you can load it [unsigned](https://github.com/Adobe-CEP/CEP-Resources/blob/master/CEP_9.x/Documentation/CEP%209.0%20HTML%20Extension%20Cookbook.md#debugging-unsigned-extensions).
-
-When signing the extension you can use this [guide](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#package-distribute-install-guide).
-
-```
-ZXPSignCmd -selfSignedCert NA NA Ayon Ayon-Photoshop Ayon extension.p12
-ZXPSignCmd -sign {path to avalon-core}\avalon\photoshop\extension {path to avalon-core}\avalon\photoshop\extension.zxp extension.p12 avalon
-```
-
-### Plugin Examples
-
-These plugins were made with the [polly config](https://github.com/mindbender-studio/config). To fully integrate and load, you will have to use this config and add `image` to the [integration plugin](https://github.com/mindbender-studio/config/blob/master/polly/plugins/publish/integrate_asset.py).
-
-#### Creator Plugin
-```python
-from avalon import photoshop
-
-
-class CreateImage(photoshop.Creator):
- """Image folder for publish."""
-
- name = "imageDefault"
- label = "Image"
- product_type = "image"
-
- def __init__(self, *args, **kwargs):
- super(CreateImage, self).__init__(*args, **kwargs)
-```
-
-#### Collector Plugin
-```python
-import pythoncom
-
-import pyblish.api
-
-
-class CollectInstances(pyblish.api.ContextPlugin):
- """Gather instances by LayerSet and file metadata
-
- This collector takes into account assets that are associated with
- an LayerSet and marked with a unique identifier;
-
- Identifier:
- id (str): "ayon.create.instance"
- """
-
- label = "Instances"
- order = pyblish.api.CollectorOrder
- hosts = ["photoshop"]
- families_mapping = {
- "image": []
- }
-
- def process(self, context):
- # Necessary call when running in a different thread which pyblish-qml
- # can be.
- pythoncom.CoInitialize()
-
- photoshop_client = PhotoshopClientStub()
- layers = photoshop_client.get_layers()
- layers_meta = photoshop_client.get_layers_metadata()
- for layer in layers:
- layer_data = photoshop_client.read(layer, layers_meta)
-
- # Skip layers without metadata.
- if layer_data is None:
- continue
-
- # Skip containers.
- if "container" in layer_data["id"]:
- continue
-
- # child_layers = [*layer.Layers]
- # self.log.debug("child_layers {}".format(child_layers))
- # if not child_layers:
- # self.log.info("%s skipped, it was empty." % layer.Name)
- # continue
-
- instance = context.create_instance(layer.name)
- instance.append(layer)
- instance.data.update(layer_data)
- instance.data["families"] = self.families_mapping[
- layer_data["productType"]
- ]
- instance.data["publish"] = layer.visible
-
- # Produce diagnostic message for any graphical
- # user interface interested in visualising it.
- self.log.info("Found: \"%s\" " % instance.data["name"])
-```
-
-#### Extractor Plugin
-```python
-import os
-
-from ayon_core.pipeline import publish
-from ayon_photoshop import api as photoshop
-
-
-class ExtractImage(publish.Extractor):
- """Produce a flattened image file from instance
-
- This plug-in takes into account only the layers in the group.
- """
-
- label = "Extract Image"
- hosts = ["photoshop"]
- families = ["image"]
- formats = ["png", "jpg"]
-
- def process(self, instance):
-
- staging_dir = self.staging_dir(instance)
- self.log.info("Outputting image to {}".format(staging_dir))
-
- # Perform extraction
- stub = photoshop.stub()
- files = {}
- with photoshop.maintained_selection():
- self.log.info("Extracting %s" % str(list(instance)))
- with photoshop.maintained_visibility():
- # Hide all other layers.
- extract_ids = set([ll.id for ll in stub.
- get_layers_in_layers([instance[0]])])
-
- for layer in stub.get_layers():
- # limit unnecessary calls to client
- if layer.visible and layer.id not in extract_ids:
- stub.set_visible(layer.id, False)
-
- save_options = []
- if "png" in self.formats:
- save_options.append('png')
- if "jpg" in self.formats:
- save_options.append('jpg')
-
- file_basename = os.path.splitext(
- stub.get_active_document_name()
- )[0]
- for extension in save_options:
- _filename = "{}.{}".format(file_basename, extension)
- files[extension] = _filename
-
- full_filename = os.path.join(staging_dir, _filename)
- stub.saveAs(full_filename, extension, True)
-
- representations = []
- for extension, filename in files.items():
- representations.append({
- "name": extension,
- "ext": extension,
- "files": filename,
- "stagingDir": staging_dir
- })
- instance.data["representations"] = representations
- instance.data["stagingDir"] = staging_dir
-
- self.log.info(f"Extracted {instance} to {staging_dir}")
-```
-
-#### Loader Plugin
-```python
-from avalon import api, photoshop
-from ayon_core.pipeline import load, get_representation_path
-
-stub = photoshop.stub()
-
-
-class ImageLoader(load.LoaderPlugin):
- """Load images
-
- Stores the imported asset in a container named after the asset.
- """
-
- families = ["image"]
- representations = {"*"}
-
- def load(self, context, name=None, namespace=None, data=None):
- path = self.filepath_from_context(context)
- with photoshop.maintained_selection():
- layer = stub.import_smart_object(path)
-
- self[:] = [layer]
-
- return photoshop.containerise(
- name,
- namespace,
- layer,
- context,
- self.__class__.__name__
- )
-
- def update(self, container, context):
- layer = container.pop("layer")
- repre_entity = context["representation"]
- with photoshop.maintained_selection():
- stub.replace_smart_object(
- layer, get_representation_path(repre_entity)
- )
-
- stub.imprint(
- layer, {"representation": repre_entity["id"]}
- )
-
- def remove(self, container):
- container["layer"].Delete()
-
- def switch(self, container, context):
- self.update(container, context)
-```
-For easier debugging of Javascript:
-https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/td-p/10911704?page=1
-Add --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome
-then localhost:8078 (port set in `photoshop\extension\.debug`)
-
-Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01
-
-Or install CEF client from https://github.com/Adobe-CEP/CEP-Resources/tree/master/CEP_9.x
-## Resources
- - https://github.com/lohriialo/photoshop-scripting-python
- - https://www.adobe.com/devnet/photoshop/scripting.html
- - https://github.com/Adobe-CEP/Getting-Started-guides
- - https://github.com/Adobe-CEP/CEP-Resources
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/__init__.py b/server_addon/photoshop/client/ayon_photoshop/api/__init__.py
deleted file mode 100644
index c5a12cba06..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/__init__.py
+++ /dev/null
@@ -1,41 +0,0 @@
-"""Public API
-
-Anything that isn't defined here is INTERNAL and unreliable for external use.
-
-"""
-
-from .launch_logic import stub
-
-from .pipeline import (
- PhotoshopHost,
- ls,
- containerise
-)
-from .plugin import (
- PhotoshopLoader,
- get_unique_layer_name
-)
-
-
-from .lib import (
- maintained_selection,
- maintained_visibility
-)
-
-__all__ = [
- # launch_logic
- "stub",
-
- # pipeline
- "PhotoshopHost",
- "ls",
- "containerise",
-
- # Plugin
- "PhotoshopLoader",
- "get_unique_layer_name",
-
- # lib
- "maintained_selection",
- "maintained_visibility",
-]
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension.zxp b/server_addon/photoshop/client/ayon_photoshop/api/extension.zxp
deleted file mode 100644
index 26a73a37fd..0000000000
Binary files a/server_addon/photoshop/client/ayon_photoshop/api/extension.zxp and /dev/null differ
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/.debug b/server_addon/photoshop/client/ayon_photoshop/api/extension/.debug
deleted file mode 100644
index 4cea03cb41..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/extension/.debug
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-
-
-
-
-
-
-
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/CSXS/manifest.xml b/server_addon/photoshop/client/ayon_photoshop/api/extension/CSXS/manifest.xml
deleted file mode 100644
index 16d85be9b4..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/extension/CSXS/manifest.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- ./index.html
-
-
-
- true
-
-
- applicationActivate
- com.adobe.csxs.events.ApplicationInitialized
-
-
-
- Panel
-
-
-
- 300
- 140
-
-
- 400
- 200
-
-
-
- ./icons/ayon_logo.png
-
-
-
-
-
-
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/CSInterface.js b/server_addon/photoshop/client/ayon_photoshop/api/extension/client/CSInterface.js
deleted file mode 100644
index 4239391efd..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/CSInterface.js
+++ /dev/null
@@ -1,1193 +0,0 @@
-/**************************************************************************************************
-*
-* ADOBE SYSTEMS INCORPORATED
-* Copyright 2013 Adobe Systems Incorporated
-* All Rights Reserved.
-*
-* NOTICE: Adobe permits you to use, modify, and distribute this file in accordance with the
-* terms of the Adobe license agreement accompanying it. If you have received this file from a
-* source other than Adobe, then your use, modification, or distribution of it requires the prior
-* written permission of Adobe.
-*
-**************************************************************************************************/
-
-/** CSInterface - v8.0.0 */
-
-/**
- * Stores constants for the window types supported by the CSXS infrastructure.
- */
-function CSXSWindowType()
-{
-}
-
-/** Constant for the CSXS window type Panel. */
-CSXSWindowType._PANEL = "Panel";
-
-/** Constant for the CSXS window type Modeless. */
-CSXSWindowType._MODELESS = "Modeless";
-
-/** Constant for the CSXS window type ModalDialog. */
-CSXSWindowType._MODAL_DIALOG = "ModalDialog";
-
-/** EvalScript error message */
-EvalScript_ErrMessage = "EvalScript error.";
-
-/**
- * @class Version
- * Defines a version number with major, minor, micro, and special
- * components. The major, minor and micro values are numeric; the special
- * value can be any string.
- *
- * @param major The major version component, a positive integer up to nine digits long.
- * @param minor The minor version component, a positive integer up to nine digits long.
- * @param micro The micro version component, a positive integer up to nine digits long.
- * @param special The special version component, an arbitrary string.
- *
- * @return A new \c Version object.
- */
-function Version(major, minor, micro, special)
-{
- this.major = major;
- this.minor = minor;
- this.micro = micro;
- this.special = special;
-}
-
-/**
- * The maximum value allowed for a numeric version component.
- * This reflects the maximum value allowed in PlugPlug and the manifest schema.
- */
-Version.MAX_NUM = 999999999;
-
-/**
- * @class VersionBound
- * Defines a boundary for a version range, which associates a \c Version object
- * with a flag for whether it is an inclusive or exclusive boundary.
- *
- * @param version The \c #Version object.
- * @param inclusive True if this boundary is inclusive, false if it is exclusive.
- *
- * @return A new \c VersionBound object.
- */
-function VersionBound(version, inclusive)
-{
- this.version = version;
- this.inclusive = inclusive;
-}
-
-/**
- * @class VersionRange
- * Defines a range of versions using a lower boundary and optional upper boundary.
- *
- * @param lowerBound The \c #VersionBound object.
- * @param upperBound The \c #VersionBound object, or null for a range with no upper boundary.
- *
- * @return A new \c VersionRange object.
- */
-function VersionRange(lowerBound, upperBound)
-{
- this.lowerBound = lowerBound;
- this.upperBound = upperBound;
-}
-
-/**
- * @class Runtime
- * Represents a runtime related to the CEP infrastructure.
- * Extensions can declare dependencies on particular
- * CEP runtime versions in the extension manifest.
- *
- * @param name The runtime name.
- * @param version A \c #VersionRange object that defines a range of valid versions.
- *
- * @return A new \c Runtime object.
- */
-function Runtime(name, versionRange)
-{
- this.name = name;
- this.versionRange = versionRange;
-}
-
-/**
-* @class Extension
-* Encapsulates a CEP-based extension to an Adobe application.
-*
-* @param id The unique identifier of this extension.
-* @param name The localizable display name of this extension.
-* @param mainPath The path of the "index.html" file.
-* @param basePath The base path of this extension.
-* @param windowType The window type of the main window of this extension.
- Valid values are defined by \c #CSXSWindowType.
-* @param width The default width in pixels of the main window of this extension.
-* @param height The default height in pixels of the main window of this extension.
-* @param minWidth The minimum width in pixels of the main window of this extension.
-* @param minHeight The minimum height in pixels of the main window of this extension.
-* @param maxWidth The maximum width in pixels of the main window of this extension.
-* @param maxHeight The maximum height in pixels of the main window of this extension.
-* @param defaultExtensionDataXml The extension data contained in the default \c ExtensionDispatchInfo section of the extension manifest.
-* @param specialExtensionDataXml The extension data contained in the application-specific \c ExtensionDispatchInfo section of the extension manifest.
-* @param requiredRuntimeList An array of \c Runtime objects for runtimes required by this extension.
-* @param isAutoVisible True if this extension is visible on loading.
-* @param isPluginExtension True if this extension has been deployed in the Plugins folder of the host application.
-*
-* @return A new \c Extension object.
-*/
-function Extension(id, name, mainPath, basePath, windowType, width, height, minWidth, minHeight, maxWidth, maxHeight,
- defaultExtensionDataXml, specialExtensionDataXml, requiredRuntimeList, isAutoVisible, isPluginExtension)
-{
- this.id = id;
- this.name = name;
- this.mainPath = mainPath;
- this.basePath = basePath;
- this.windowType = windowType;
- this.width = width;
- this.height = height;
- this.minWidth = minWidth;
- this.minHeight = minHeight;
- this.maxWidth = maxWidth;
- this.maxHeight = maxHeight;
- this.defaultExtensionDataXml = defaultExtensionDataXml;
- this.specialExtensionDataXml = specialExtensionDataXml;
- this.requiredRuntimeList = requiredRuntimeList;
- this.isAutoVisible = isAutoVisible;
- this.isPluginExtension = isPluginExtension;
-}
-
-/**
- * @class CSEvent
- * A standard JavaScript event, the base class for CEP events.
- *
- * @param type The name of the event type.
- * @param scope The scope of event, can be "GLOBAL" or "APPLICATION".
- * @param appId The unique identifier of the application that generated the event.
- * @param extensionId The unique identifier of the extension that generated the event.
- *
- * @return A new \c CSEvent object
- */
-function CSEvent(type, scope, appId, extensionId)
-{
- this.type = type;
- this.scope = scope;
- this.appId = appId;
- this.extensionId = extensionId;
-}
-
-/** Event-specific data. */
-CSEvent.prototype.data = "";
-
-/**
- * @class SystemPath
- * Stores operating-system-specific location constants for use in the
- * \c #CSInterface.getSystemPath() method.
- * @return A new \c SystemPath object.
- */
-function SystemPath()
-{
-}
-
-/** The path to user data. */
-SystemPath.USER_DATA = "userData";
-
-/** The path to common files for Adobe applications. */
-SystemPath.COMMON_FILES = "commonFiles";
-
-/** The path to the user's default document folder. */
-SystemPath.MY_DOCUMENTS = "myDocuments";
-
-/** @deprecated. Use \c #SystemPath.Extension. */
-SystemPath.APPLICATION = "application";
-
-/** The path to current extension. */
-SystemPath.EXTENSION = "extension";
-
-/** The path to hosting application's executable. */
-SystemPath.HOST_APPLICATION = "hostApplication";
-
-/**
- * @class ColorType
- * Stores color-type constants.
- */
-function ColorType()
-{
-}
-
-/** RGB color type. */
-ColorType.RGB = "rgb";
-
-/** Gradient color type. */
-ColorType.GRADIENT = "gradient";
-
-/** Null color type. */
-ColorType.NONE = "none";
-
-/**
- * @class RGBColor
- * Stores an RGB color with red, green, blue, and alpha values.
- * All values are in the range [0.0 to 255.0]. Invalid numeric values are
- * converted to numbers within this range.
- *
- * @param red The red value, in the range [0.0 to 255.0].
- * @param green The green value, in the range [0.0 to 255.0].
- * @param blue The blue value, in the range [0.0 to 255.0].
- * @param alpha The alpha (transparency) value, in the range [0.0 to 255.0].
- * The default, 255.0, means that the color is fully opaque.
- *
- * @return A new RGBColor object.
- */
-function RGBColor(red, green, blue, alpha)
-{
- this.red = red;
- this.green = green;
- this.blue = blue;
- this.alpha = alpha;
-}
-
-/**
- * @class Direction
- * A point value in which the y component is 0 and the x component
- * is positive or negative for a right or left direction,
- * or the x component is 0 and the y component is positive or negative for
- * an up or down direction.
- *
- * @param x The horizontal component of the point.
- * @param y The vertical component of the point.
- *
- * @return A new \c Direction object.
- */
-function Direction(x, y)
-{
- this.x = x;
- this.y = y;
-}
-
-/**
- * @class GradientStop
- * Stores gradient stop information.
- *
- * @param offset The offset of the gradient stop, in the range [0.0 to 1.0].
- * @param rgbColor The color of the gradient at this point, an \c #RGBColor object.
- *
- * @return GradientStop object.
- */
-function GradientStop(offset, rgbColor)
-{
- this.offset = offset;
- this.rgbColor = rgbColor;
-}
-
-/**
- * @class GradientColor
- * Stores gradient color information.
- *
- * @param type The gradient type, must be "linear".
- * @param direction A \c #Direction object for the direction of the gradient
- (up, down, right, or left).
- * @param numStops The number of stops in the gradient.
- * @param gradientStopList An array of \c #GradientStop objects.
- *
- * @return A new \c GradientColor object.
- */
-function GradientColor(type, direction, numStops, arrGradientStop)
-{
- this.type = type;
- this.direction = direction;
- this.numStops = numStops;
- this.arrGradientStop = arrGradientStop;
-}
-
-/**
- * @class UIColor
- * Stores color information, including the type, anti-alias level, and specific color
- * values in a color object of an appropriate type.
- *
- * @param type The color type, 1 for "rgb" and 2 for "gradient".
- The supplied color object must correspond to this type.
- * @param antialiasLevel The anti-alias level constant.
- * @param color A \c #RGBColor or \c #GradientColor object containing specific color information.
- *
- * @return A new \c UIColor object.
- */
-function UIColor(type, antialiasLevel, color)
-{
- this.type = type;
- this.antialiasLevel = antialiasLevel;
- this.color = color;
-}
-
-/**
- * @class AppSkinInfo
- * Stores window-skin properties, such as color and font. All color parameter values are \c #UIColor objects except that systemHighlightColor is \c #RGBColor object.
- *
- * @param baseFontFamily The base font family of the application.
- * @param baseFontSize The base font size of the application.
- * @param appBarBackgroundColor The application bar background color.
- * @param panelBackgroundColor The background color of the extension panel.
- * @param appBarBackgroundColorSRGB The application bar background color, as sRGB.
- * @param panelBackgroundColorSRGB The background color of the extension panel, as sRGB.
- * @param systemHighlightColor The highlight color of the extension panel, if provided by the host application. Otherwise, the operating-system highlight color.
- *
- * @return AppSkinInfo object.
- */
-function AppSkinInfo(baseFontFamily, baseFontSize, appBarBackgroundColor, panelBackgroundColor, appBarBackgroundColorSRGB, panelBackgroundColorSRGB, systemHighlightColor)
-{
- this.baseFontFamily = baseFontFamily;
- this.baseFontSize = baseFontSize;
- this.appBarBackgroundColor = appBarBackgroundColor;
- this.panelBackgroundColor = panelBackgroundColor;
- this.appBarBackgroundColorSRGB = appBarBackgroundColorSRGB;
- this.panelBackgroundColorSRGB = panelBackgroundColorSRGB;
- this.systemHighlightColor = systemHighlightColor;
-}
-
-/**
- * @class HostEnvironment
- * Stores information about the environment in which the extension is loaded.
- *
- * @param appName The application's name.
- * @param appVersion The application's version.
- * @param appLocale The application's current license locale.
- * @param appUILocale The application's current UI locale.
- * @param appId The application's unique identifier.
- * @param isAppOnline True if the application is currently online.
- * @param appSkinInfo An \c #AppSkinInfo object containing the application's default color and font styles.
- *
- * @return A new \c HostEnvironment object.
- */
-function HostEnvironment(appName, appVersion, appLocale, appUILocale, appId, isAppOnline, appSkinInfo)
-{
- this.appName = appName;
- this.appVersion = appVersion;
- this.appLocale = appLocale;
- this.appUILocale = appUILocale;
- this.appId = appId;
- this.isAppOnline = isAppOnline;
- this.appSkinInfo = appSkinInfo;
-}
-
-/**
- * @class HostCapabilities
- * Stores information about the host capabilities.
- *
- * @param EXTENDED_PANEL_MENU True if the application supports panel menu.
- * @param EXTENDED_PANEL_ICONS True if the application supports panel icon.
- * @param DELEGATE_APE_ENGINE True if the application supports delegated APE engine.
- * @param SUPPORT_HTML_EXTENSIONS True if the application supports HTML extensions.
- * @param DISABLE_FLASH_EXTENSIONS True if the application disables FLASH extensions.
- *
- * @return A new \c HostCapabilities object.
- */
-function HostCapabilities(EXTENDED_PANEL_MENU, EXTENDED_PANEL_ICONS, DELEGATE_APE_ENGINE, SUPPORT_HTML_EXTENSIONS, DISABLE_FLASH_EXTENSIONS)
-{
- this.EXTENDED_PANEL_MENU = EXTENDED_PANEL_MENU;
- this.EXTENDED_PANEL_ICONS = EXTENDED_PANEL_ICONS;
- this.DELEGATE_APE_ENGINE = DELEGATE_APE_ENGINE;
- this.SUPPORT_HTML_EXTENSIONS = SUPPORT_HTML_EXTENSIONS;
- this.DISABLE_FLASH_EXTENSIONS = DISABLE_FLASH_EXTENSIONS; // Since 5.0.0
-}
-
-/**
- * @class ApiVersion
- * Stores current api version.
- *
- * Since 4.2.0
- *
- * @param major The major version
- * @param minor The minor version.
- * @param micro The micro version.
- *
- * @return ApiVersion object.
- */
-function ApiVersion(major, minor, micro)
-{
- this.major = major;
- this.minor = minor;
- this.micro = micro;
-}
-
-/**
- * @class MenuItemStatus
- * Stores flyout menu item status
- *
- * Since 5.2.0
- *
- * @param menuItemLabel The menu item label.
- * @param enabled True if user wants to enable the menu item.
- * @param checked True if user wants to check the menu item.
- *
- * @return MenuItemStatus object.
- */
-function MenuItemStatus(menuItemLabel, enabled, checked)
-{
- this.menuItemLabel = menuItemLabel;
- this.enabled = enabled;
- this.checked = checked;
-}
-
-/**
- * @class ContextMenuItemStatus
- * Stores the status of the context menu item.
- *
- * Since 5.2.0
- *
- * @param menuItemID The menu item id.
- * @param enabled True if user wants to enable the menu item.
- * @param checked True if user wants to check the menu item.
- *
- * @return MenuItemStatus object.
- */
-function ContextMenuItemStatus(menuItemID, enabled, checked)
-{
- this.menuItemID = menuItemID;
- this.enabled = enabled;
- this.checked = checked;
-}
-//------------------------------ CSInterface ----------------------------------
-
-/**
- * @class CSInterface
- * This is the entry point to the CEP extensibility infrastructure.
- * Instantiate this object and use it to:
- *
- *
Access information about the host application in which an extension is running
- *
Launch an extension
- *
Register interest in event notifications, and dispatch events
- *
- *
- * @return A new \c CSInterface object
- */
-function CSInterface()
-{
-}
-
-/**
- * User can add this event listener to handle native application theme color changes.
- * Callback function gives extensions ability to fine-tune their theme color after the
- * global theme color has been changed.
- * The callback function should be like below:
- *
- * @example
- * // event is a CSEvent object, but user can ignore it.
- * function OnAppThemeColorChanged(event)
- * {
- * // Should get a latest HostEnvironment object from application.
- * var skinInfo = JSON.parse(window.__adobe_cep__.getHostEnvironment()).appSkinInfo;
- * // Gets the style information such as color info from the skinInfo,
- * // and redraw all UI controls of your extension according to the style info.
- * }
- */
-CSInterface.THEME_COLOR_CHANGED_EVENT = "com.adobe.csxs.events.ThemeColorChanged";
-
-/** The host environment data object. */
-CSInterface.prototype.hostEnvironment = window.__adobe_cep__ ? JSON.parse(window.__adobe_cep__.getHostEnvironment()) : null;
-
-/** Retrieves information about the host environment in which the
- * extension is currently running.
- *
- * @return A \c #HostEnvironment object.
- */
-CSInterface.prototype.getHostEnvironment = function()
-{
- this.hostEnvironment = JSON.parse(window.__adobe_cep__.getHostEnvironment());
- return this.hostEnvironment;
-};
-
-/** Closes this extension. */
-CSInterface.prototype.closeExtension = function()
-{
- window.__adobe_cep__.closeExtension();
-};
-
-/**
- * Retrieves a path for which a constant is defined in the system.
- *
- * @param pathType The path-type constant defined in \c #SystemPath ,
- *
- * @return The platform-specific system path string.
- */
-CSInterface.prototype.getSystemPath = function(pathType)
-{
- var path = decodeURI(window.__adobe_cep__.getSystemPath(pathType));
- var OSVersion = this.getOSInformation();
- if (OSVersion.indexOf("Windows") >= 0)
- {
- path = path.replace("file:///", "");
- }
- else if (OSVersion.indexOf("Mac") >= 0)
- {
- path = path.replace("file://", "");
- }
- return path;
-};
-
-/**
- * Evaluates a JavaScript script, which can use the JavaScript DOM
- * of the host application.
- *
- * @param script The JavaScript script.
- * @param callback Optional. A callback function that receives the result of execution.
- * If execution fails, the callback function receives the error message \c EvalScript_ErrMessage.
- */
-CSInterface.prototype.evalScript = function(script, callback)
-{
- if(callback === null || callback === undefined)
- {
- callback = function(result){};
- }
- window.__adobe_cep__.evalScript(script, callback);
-};
-
-/**
- * Retrieves the unique identifier of the application.
- * in which the extension is currently running.
- *
- * @return The unique ID string.
- */
-CSInterface.prototype.getApplicationID = function()
-{
- var appId = this.hostEnvironment.appId;
- return appId;
-};
-
-/**
- * Retrieves host capability information for the application
- * in which the extension is currently running.
- *
- * @return A \c #HostCapabilities object.
- */
-CSInterface.prototype.getHostCapabilities = function()
-{
- var hostCapabilities = JSON.parse(window.__adobe_cep__.getHostCapabilities() );
- return hostCapabilities;
-};
-
-/**
- * Triggers a CEP event programmatically. Yoy can use it to dispatch
- * an event of a predefined type, or of a type you have defined.
- *
- * @param event A \c CSEvent object.
- */
-CSInterface.prototype.dispatchEvent = function(event)
-{
- if (typeof event.data == "object")
- {
- event.data = JSON.stringify(event.data);
- }
-
- window.__adobe_cep__.dispatchEvent(event);
-};
-
-/**
- * Registers an interest in a CEP event of a particular type, and
- * assigns an event handler.
- * The event infrastructure notifies your extension when events of this type occur,
- * passing the event object to the registered handler function.
- *
- * @param type The name of the event type of interest.
- * @param listener The JavaScript handler function or method.
- * @param obj Optional, the object containing the handler method, if any.
- * Default is null.
- */
-CSInterface.prototype.addEventListener = function(type, listener, obj)
-{
- window.__adobe_cep__.addEventListener(type, listener, obj);
-};
-
-/**
- * Removes a registered event listener.
- *
- * @param type The name of the event type of interest.
- * @param listener The JavaScript handler function or method that was registered.
- * @param obj Optional, the object containing the handler method, if any.
- * Default is null.
- */
-CSInterface.prototype.removeEventListener = function(type, listener, obj)
-{
- window.__adobe_cep__.removeEventListener(type, listener, obj);
-};
-
-/**
- * Loads and launches another extension, or activates the extension if it is already loaded.
- *
- * @param extensionId The extension's unique identifier.
- * @param startupParams Not currently used, pass "".
- *
- * @example
- * To launch the extension "help" with ID "HLP" from this extension, call:
- * requestOpenExtension("HLP", "");
- *
- */
-CSInterface.prototype.requestOpenExtension = function(extensionId, params)
-{
- window.__adobe_cep__.requestOpenExtension(extensionId, params);
-};
-
-/**
- * Retrieves the list of extensions currently loaded in the current host application.
- * The extension list is initialized once, and remains the same during the lifetime
- * of the CEP session.
- *
- * @param extensionIds Optional, an array of unique identifiers for extensions of interest.
- * If omitted, retrieves data for all extensions.
- *
- * @return Zero or more \c #Extension objects.
- */
-CSInterface.prototype.getExtensions = function(extensionIds)
-{
- var extensionIdsStr = JSON.stringify(extensionIds);
- var extensionsStr = window.__adobe_cep__.getExtensions(extensionIdsStr);
-
- var extensions = JSON.parse(extensionsStr);
- return extensions;
-};
-
-/**
- * Retrieves network-related preferences.
- *
- * @return A JavaScript object containing network preferences.
- */
-CSInterface.prototype.getNetworkPreferences = function()
-{
- var result = window.__adobe_cep__.getNetworkPreferences();
- var networkPre = JSON.parse(result);
-
- return networkPre;
-};
-
-/**
- * Initializes the resource bundle for this extension with property values
- * for the current application and locale.
- * To support multiple locales, you must define a property file for each locale,
- * containing keyed display-string values for that locale.
- * See localization documentation for Extension Builder and related products.
- *
- * Keys can be in the
- * form key.value="localized string", for use in HTML text elements.
- * For example, in this input element, the localized \c key.value string is displayed
- * instead of the empty \c value string:
- *
- *
- *
- * @return An object containing the resource bundle information.
- */
-CSInterface.prototype.initResourceBundle = function()
-{
- var resourceBundle = JSON.parse(window.__adobe_cep__.initResourceBundle());
- var resElms = document.querySelectorAll('[data-locale]');
- for (var n = 0; n < resElms.length; n++)
- {
- var resEl = resElms[n];
- // Get the resource key from the element.
- var resKey = resEl.getAttribute('data-locale');
- if (resKey)
- {
- // Get all the resources that start with the key.
- for (var key in resourceBundle)
- {
- if (key.indexOf(resKey) === 0)
- {
- var resValue = resourceBundle[key];
- if (key.length == resKey.length)
- {
- resEl.innerHTML = resValue;
- }
- else if ('.' == key.charAt(resKey.length))
- {
- var attrKey = key.substring(resKey.length + 1);
- resEl[attrKey] = resValue;
- }
- }
- }
- }
- }
- return resourceBundle;
-};
-
-/**
- * Writes installation information to a file.
- *
- * @return The file path.
- */
-CSInterface.prototype.dumpInstallationInfo = function()
-{
- return window.__adobe_cep__.dumpInstallationInfo();
-};
-
-/**
- * Retrieves version information for the current Operating System,
- * See http://www.useragentstring.com/pages/Chrome/ for Chrome \c navigator.userAgent values.
- *
- * @return A string containing the OS version, or "unknown Operation System".
- * If user customizes the User Agent by setting CEF command parameter "--user-agent", only
- * "Mac OS X" or "Windows" will be returned.
- */
-CSInterface.prototype.getOSInformation = function()
-{
- var userAgent = navigator.userAgent;
-
- if ((navigator.platform == "Win32") || (navigator.platform == "Windows"))
- {
- var winVersion = "Windows";
- var winBit = "";
- if (userAgent.indexOf("Windows") > -1)
- {
- if (userAgent.indexOf("Windows NT 5.0") > -1)
- {
- winVersion = "Windows 2000";
- }
- else if (userAgent.indexOf("Windows NT 5.1") > -1)
- {
- winVersion = "Windows XP";
- }
- else if (userAgent.indexOf("Windows NT 5.2") > -1)
- {
- winVersion = "Windows Server 2003";
- }
- else if (userAgent.indexOf("Windows NT 6.0") > -1)
- {
- winVersion = "Windows Vista";
- }
- else if (userAgent.indexOf("Windows NT 6.1") > -1)
- {
- winVersion = "Windows 7";
- }
- else if (userAgent.indexOf("Windows NT 6.2") > -1)
- {
- winVersion = "Windows 8";
- }
- else if (userAgent.indexOf("Windows NT 6.3") > -1)
- {
- winVersion = "Windows 8.1";
- }
- else if (userAgent.indexOf("Windows NT 10") > -1)
- {
- winVersion = "Windows 10";
- }
-
- if (userAgent.indexOf("WOW64") > -1 || userAgent.indexOf("Win64") > -1)
- {
- winBit = " 64-bit";
- }
- else
- {
- winBit = " 32-bit";
- }
- }
-
- return winVersion + winBit;
- }
- else if ((navigator.platform == "MacIntel") || (navigator.platform == "Macintosh"))
- {
- var result = "Mac OS X";
-
- if (userAgent.indexOf("Mac OS X") > -1)
- {
- result = userAgent.substring(userAgent.indexOf("Mac OS X"), userAgent.indexOf(")"));
- result = result.replace(/_/g, ".");
- }
-
- return result;
- }
-
- return "Unknown Operation System";
-};
-
-/**
- * Opens a page in the default system browser.
- *
- * Since 4.2.0
- *
- * @param url The URL of the page/file to open, or the email address.
- * Must use HTTP/HTTPS/file/mailto protocol. For example:
- * "http://www.adobe.com"
- * "https://github.com"
- * "file:///C:/log.txt"
- * "mailto:test@adobe.com"
- *
- * @return One of these error codes:\n
- *
\n
- *
NO_ERROR - 0
\n
- *
ERR_UNKNOWN - 1
\n
- *
ERR_INVALID_PARAMS - 2
\n
- *
ERR_INVALID_URL - 201
\n
- *
\n
- */
-CSInterface.prototype.openURLInDefaultBrowser = function(url)
-{
- return cep.util.openURLInDefaultBrowser(url);
-};
-
-/**
- * Retrieves extension ID.
- *
- * Since 4.2.0
- *
- * @return extension ID.
- */
-CSInterface.prototype.getExtensionID = function()
-{
- return window.__adobe_cep__.getExtensionId();
-};
-
-/**
- * Retrieves the scale factor of screen.
- * On Windows platform, the value of scale factor might be different from operating system's scale factor,
- * since host application may use its self-defined scale factor.
- *
- * Since 4.2.0
- *
- * @return One of the following float number.
- *
\n
- *
-1.0 when error occurs
\n
- *
1.0 means normal screen
\n
- *
>1.0 means HiDPI screen
\n
- *
\n
- */
-CSInterface.prototype.getScaleFactor = function()
-{
- return window.__adobe_cep__.getScaleFactor();
-};
-
-/**
- * Set a handler to detect any changes of scale factor. This only works on Mac.
- *
- * Since 4.2.0
- *
- * @param handler The function to be called when scale factor is changed.
- *
- */
-CSInterface.prototype.setScaleFactorChangedHandler = function(handler)
-{
- window.__adobe_cep__.setScaleFactorChangedHandler(handler);
-};
-
-/**
- * Retrieves current API version.
- *
- * Since 4.2.0
- *
- * @return ApiVersion object.
- *
- */
-CSInterface.prototype.getCurrentApiVersion = function()
-{
- var apiVersion = JSON.parse(window.__adobe_cep__.getCurrentApiVersion());
- return apiVersion;
-};
-
-/**
- * Set panel flyout menu by an XML.
- *
- * Since 5.2.0
- *
- * Register a callback function for "com.adobe.csxs.events.flyoutMenuClicked" to get notified when a
- * menu item is clicked.
- * The "data" attribute of event is an object which contains "menuId" and "menuName" attributes.
- *
- * Register callback functions for "com.adobe.csxs.events.flyoutMenuOpened" and "com.adobe.csxs.events.flyoutMenuClosed"
- * respectively to get notified when flyout menu is opened or closed.
- *
- * @param menu A XML string which describes menu structure.
- * An example menu XML:
- *
- *
- */
-CSInterface.prototype.setPanelFlyoutMenu = function(menu)
-{
- if ("string" != typeof menu)
- {
- return;
- }
-
- window.__adobe_cep__.invokeSync("setPanelFlyoutMenu", menu);
-};
-
-/**
- * Updates a menu item in the extension window's flyout menu, by setting the enabled
- * and selection status.
- *
- * Since 5.2.0
- *
- * @param menuItemLabel The menu item label.
- * @param enabled True to enable the item, false to disable it (gray it out).
- * @param checked True to select the item, false to deselect it.
- *
- * @return false when the host application does not support this functionality (HostCapabilities.EXTENDED_PANEL_MENU is false).
- * Fails silently if menu label is invalid.
- *
- * @see HostCapabilities.EXTENDED_PANEL_MENU
- */
-CSInterface.prototype.updatePanelMenuItem = function(menuItemLabel, enabled, checked)
-{
- var ret = false;
- if (this.getHostCapabilities().EXTENDED_PANEL_MENU)
- {
- var itemStatus = new MenuItemStatus(menuItemLabel, enabled, checked);
- ret = window.__adobe_cep__.invokeSync("updatePanelMenuItem", JSON.stringify(itemStatus));
- }
- return ret;
-};
-
-
-/**
- * Set context menu by XML string.
- *
- * Since 5.2.0
- *
- * There are a number of conventions used to communicate what type of menu item to create and how it should be handled.
- * - an item without menu ID or menu name is disabled and is not shown.
- * - if the item name is "---" (three hyphens) then it is treated as a separator. The menu ID in this case will always be NULL.
- * - Checkable attribute takes precedence over Checked attribute.
- * - a PNG icon. For optimal display results please supply a 16 x 16px icon as larger dimensions will increase the size of the menu item.
- The Chrome extension contextMenus API was taken as a reference.
- https://developer.chrome.com/extensions/contextMenus
- * - the items with icons and checkable items cannot coexist on the same menu level. The former take precedences over the latter.
- *
- * @param menu A XML string which describes menu structure.
- * @param callback The callback function which is called when a menu item is clicked. The only parameter is the returned ID of clicked menu item.
- *
- * @description An example menu XML:
- *
- */
-CSInterface.prototype.setContextMenu = function(menu, callback)
-{
- if ("string" != typeof menu)
- {
- return;
- }
-
- window.__adobe_cep__.invokeAsync("setContextMenu", menu, callback);
-};
-
-/**
- * Set context menu by JSON string.
- *
- * Since 6.0.0
- *
- * There are a number of conventions used to communicate what type of menu item to create and how it should be handled.
- * - an item without menu ID or menu name is disabled and is not shown.
- * - if the item label is "---" (three hyphens) then it is treated as a separator. The menu ID in this case will always be NULL.
- * - Checkable attribute takes precedence over Checked attribute.
- * - a PNG icon. For optimal display results please supply a 16 x 16px icon as larger dimensions will increase the size of the menu item.
- The Chrome extension contextMenus API was taken as a reference.
- * - the items with icons and checkable items cannot coexist on the same menu level. The former take precedences over the latter.
- https://developer.chrome.com/extensions/contextMenus
- *
- * @param menu A JSON string which describes menu structure.
- * @param callback The callback function which is called when a menu item is clicked. The only parameter is the returned ID of clicked menu item.
- *
- * @description An example menu JSON:
- *
- * {
- * "menu": [
- * {
- * "id": "menuItemId1",
- * "label": "testExample1",
- * "enabled": true,
- * "checkable": true,
- * "checked": false,
- * "icon": "./image/small_16X16.png"
- * },
- * {
- * "id": "menuItemId2",
- * "label": "testExample2",
- * "menu": [
- * {
- * "id": "menuItemId2-1",
- * "label": "testExample2-1",
- * "menu": [
- * {
- * "id": "menuItemId2-1-1",
- * "label": "testExample2-1-1",
- * "enabled": false,
- * "checkable": true,
- * "checked": true
- * }
- * ]
- * },
- * {
- * "id": "menuItemId2-2",
- * "label": "testExample2-2",
- * "enabled": true,
- * "checkable": true,
- * "checked": true
- * }
- * ]
- * },
- * {
- * "label": "---"
- * },
- * {
- * "id": "menuItemId3",
- * "label": "testExample3",
- * "enabled": false,
- * "checkable": true,
- * "checked": false
- * }
- * ]
- * }
- *
- */
-CSInterface.prototype.setContextMenuByJSON = function(menu, callback)
-{
- if ("string" != typeof menu)
- {
- return;
- }
-
- window.__adobe_cep__.invokeAsync("setContextMenuByJSON", menu, callback);
-};
-
-/**
- * Updates a context menu item by setting the enabled and selection status.
- *
- * Since 5.2.0
- *
- * @param menuItemID The menu item ID.
- * @param enabled True to enable the item, false to disable it (gray it out).
- * @param checked True to select the item, false to deselect it.
- */
-CSInterface.prototype.updateContextMenuItem = function(menuItemID, enabled, checked)
-{
- var itemStatus = new ContextMenuItemStatus(menuItemID, enabled, checked);
- ret = window.__adobe_cep__.invokeSync("updateContextMenuItem", JSON.stringify(itemStatus));
-};
-
-/**
- * Get the visibility status of an extension window.
- *
- * Since 6.0.0
- *
- * @return true if the extension window is visible; false if the extension window is hidden.
- */
-CSInterface.prototype.isWindowVisible = function()
-{
- return window.__adobe_cep__.invokeSync("isWindowVisible", "");
-};
-
-/**
- * Resize extension's content to the specified dimensions.
- * 1. Works with modal and modeless extensions in all Adobe products.
- * 2. Extension's manifest min/max size constraints apply and take precedence.
- * 3. For panel extensions
- * 3.1 This works in all Adobe products except:
- * * Premiere Pro
- * * Prelude
- * * After Effects
- * 3.2 When the panel is in certain states (especially when being docked),
- * it will not change to the desired dimensions even when the
- * specified size satisfies min/max constraints.
- *
- * Since 6.0.0
- *
- * @param width The new width
- * @param height The new height
- */
-CSInterface.prototype.resizeContent = function(width, height)
-{
- window.__adobe_cep__.resizeContent(width, height);
-};
-
-/**
- * Register the invalid certificate callback for an extension.
- * This callback will be triggered when the extension tries to access the web site that contains the invalid certificate on the main frame.
- * But if the extension does not call this function and tries to access the web site containing the invalid certificate, a default error page will be shown.
- *
- * Since 6.1.0
- *
- * @param callback the callback function
- */
-CSInterface.prototype.registerInvalidCertificateCallback = function(callback)
-{
- return window.__adobe_cep__.registerInvalidCertificateCallback(callback);
-};
-
-/**
- * Register an interest in some key events to prevent them from being sent to the host application.
- *
- * This function works with modeless extensions and panel extensions.
- * Generally all the key events will be sent to the host application for these two extensions if the current focused element
- * is not text input or dropdown,
- * If you want to intercept some key events and want them to be handled in the extension, please call this function
- * in advance to prevent them being sent to the host application.
- *
- * Since 6.1.0
- *
- * @param keyEventsInterest A JSON string describing those key events you are interested in. A null object or
- an empty string will lead to removing the interest
- *
- * This JSON string should be an array, each object has following keys:
- *
- * keyCode: [Required] represents an OS system dependent virtual key code identifying
- * the unmodified value of the pressed key.
- * ctrlKey: [optional] a Boolean that indicates if the control key was pressed (true) or not (false) when the event occurred.
- * altKey: [optional] a Boolean that indicates if the alt key was pressed (true) or not (false) when the event occurred.
- * shiftKey: [optional] a Boolean that indicates if the shift key was pressed (true) or not (false) when the event occurred.
- * metaKey: [optional] (Mac Only) a Boolean that indicates if the Meta key was pressed (true) or not (false) when the event occurred.
- * On Macintosh keyboards, this is the command key. To detect Windows key on Windows, please use keyCode instead.
- * An example JSON string:
- *
- * [
- * {
- * "keyCode": 48
- * },
- * {
- * "keyCode": 123,
- * "ctrlKey": true
- * },
- * {
- * "keyCode": 123,
- * "ctrlKey": true,
- * "metaKey": true
- * }
- * ]
- *
- */
-CSInterface.prototype.registerKeyEventsInterest = function(keyEventsInterest)
-{
- return window.__adobe_cep__.registerKeyEventsInterest(keyEventsInterest);
-};
-
-/**
- * Set the title of the extension window.
- * This function works with modal and modeless extensions in all Adobe products, and panel extensions in Photoshop, InDesign, InCopy, Illustrator, Flash Pro and Dreamweaver.
- *
- * Since 6.1.0
- *
- * @param title The window title.
- */
-CSInterface.prototype.setWindowTitle = function(title)
-{
- window.__adobe_cep__.invokeSync("setWindowTitle", title);
-};
-
-/**
- * Get the title of the extension window.
- * This function works with modal and modeless extensions in all Adobe products, and panel extensions in Photoshop, InDesign, InCopy, Illustrator, Flash Pro and Dreamweaver.
- *
- * Since 6.1.0
- *
- * @return The window title.
- */
-CSInterface.prototype.getWindowTitle = function()
-{
- return window.__adobe_cep__.invokeSync("getWindowTitle", "");
-};
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/client.js b/server_addon/photoshop/client/ayon_photoshop/api/extension/client/client.js
deleted file mode 100644
index f4ba4cfe47..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/client.js
+++ /dev/null
@@ -1,300 +0,0 @@
- // client facing part of extension, creates WSRPC client (jsx cannot
- // do that)
- // consumes RPC calls from server (OpenPype) calls ./host/index.jsx and
- // returns values back (in json format)
-
- var logReturn = function(result){ log.warn('Result: ' + result);};
-
- var csInterface = new CSInterface();
-
- log.warn("script start");
-
- WSRPC.DEBUG = false;
- WSRPC.TRACE = false;
-
- function myCallBack(){
- log.warn("Triggered index.jsx");
- }
- // importing through manifest.xml isn't working because relative paths
- // possibly TODO
- jsx.evalFile('./host/index.jsx', myCallBack);
-
- function runEvalScript(script) {
- // because of asynchronous nature of functions in jsx
- // this waits for response
- return new Promise(function(resolve, reject){
- csInterface.evalScript(script, resolve);
- });
- }
-
- /** main entry point **/
- startUp("WEBSOCKET_URL");
-
- // get websocket server url from environment value
- async function startUp(url){
- log.warn("url", url);
- promis = runEvalScript("getEnv('" + url + "')");
-
- var res = await promis;
- // run rest only after resolved promise
- main(res);
- }
-
- function get_extension_version(){
- /** Returns version number from extension manifest.xml **/
- log.debug("get_extension_version")
- var path = csInterface.getSystemPath(SystemPath.EXTENSION);
- log.debug("extension path " + path);
-
- var result = window.cep.fs.readFile(path + "/CSXS/manifest.xml");
- var version = undefined;
- if(result.err === 0){
- if (window.DOMParser) {
- const parser = new DOMParser();
- const xmlDoc = parser.parseFromString(result.data.toString(), 'text/xml');
- const children = xmlDoc.children;
-
- for (let i = 0; i <= children.length; i++) {
- if (children[i] && children[i].getAttribute('ExtensionBundleVersion')) {
- version = children[i].getAttribute('ExtensionBundleVersion');
- }
- }
- }
- }
- return version
- }
-
- function main(websocket_url){
- // creates connection to 'websocket_url', registers routes
- log.warn("websocket_url", websocket_url);
- var default_url = 'ws://localhost:8099/ws/';
-
- if (websocket_url == ''){
- websocket_url = default_url;
- }
- log.warn("connecting to:", websocket_url);
- RPC = new WSRPC(websocket_url, 5000); // spin connection
-
- RPC.connect();
-
- log.warn("connected");
-
- function EscapeStringForJSX(str){
- // Replaces:
- // \ with \\
- // ' with \'
- // " with \"
- // See: https://stackoverflow.com/a/3967927/5285364
- return str.replace(/\\/g, '\\\\').replace(/'/g, "\\'").replace(/"/g, '\\"');
- }
-
- RPC.addRoute('Photoshop.open', function (data) {
- log.warn('Server called client route "open":', data);
- var escapedPath = EscapeStringForJSX(data.path);
- return runEvalScript("fileOpen('" + escapedPath +"')")
- .then(function(result){
- log.warn("open: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.read', function (data) {
- log.warn('Server called client route "read":', data);
- return runEvalScript("getHeadline()")
- .then(function(result){
- log.warn("getHeadline: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.get_layers', function (data) {
- log.warn('Server called client route "get_layers":', data);
- return runEvalScript("getLayers()")
- .then(function(result){
- log.warn("getLayers: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.set_visible', function (data) {
- log.warn('Server called client route "set_visible":', data);
- return runEvalScript("setVisible(" + data.layer_id + ", " +
- data.visibility + ")")
- .then(function(result){
- log.warn("setVisible: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.get_active_document_name', function (data) {
- log.warn('Server called client route "get_active_document_name":',
- data);
- return runEvalScript("getActiveDocumentName()")
- .then(function(result){
- log.warn("save: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.get_active_document_full_name', function (data) {
- log.warn('Server called client route ' +
- '"get_active_document_full_name":', data);
- return runEvalScript("getActiveDocumentFullName()")
- .then(function(result){
- log.warn("save: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.save', function (data) {
- log.warn('Server called client route "save":', data);
-
- return runEvalScript("save()")
- .then(function(result){
- log.warn("save: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.get_selected_layers', function (data) {
- log.warn('Server called client route "get_selected_layers":', data);
-
- return runEvalScript("getSelectedLayers()")
- .then(function(result){
- log.warn("get_selected_layers: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.create_group', function (data) {
- log.warn('Server called client route "create_group":', data);
-
- return runEvalScript("createGroup('" + data.name + "')")
- .then(function(result){
- log.warn("createGroup: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.group_selected_layers', function (data) {
- log.warn('Server called client route "group_selected_layers":',
- data);
-
- return runEvalScript("groupSelectedLayers(null, "+
- "'" + data.name +"')")
- .then(function(result){
- log.warn("group_selected_layers: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.import_smart_object', function (data) {
- log.warn('Server called client "import_smart_object":', data);
- var escapedPath = EscapeStringForJSX(data.path);
- return runEvalScript("importSmartObject('" + escapedPath +"', " +
- "'"+ data.name +"',"+
- + data.as_reference +")")
- .then(function(result){
- log.warn("import_smart_object: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.replace_smart_object', function (data) {
- log.warn('Server called route "replace_smart_object":', data);
- var escapedPath = EscapeStringForJSX(data.path);
- return runEvalScript("replaceSmartObjects("+data.layer_id+"," +
- "'" + escapedPath +"',"+
- "'"+ data.name +"')")
- .then(function(result){
- log.warn("replaceSmartObjects: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.delete_layer', function (data) {
- log.warn('Server called route "delete_layer":', data);
- return runEvalScript("deleteLayer("+data.layer_id+")")
- .then(function(result){
- log.warn("delete_layer: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.rename_layer', function (data) {
- log.warn('Server called route "rename_layer":', data);
- return runEvalScript("renameLayer("+data.layer_id+", " +
- "'"+ data.name +"')")
- .then(function(result){
- log.warn("rename_layer: " + result);
- return result;
- });
-});
-
- RPC.addRoute('Photoshop.select_layers', function (data) {
- log.warn('Server called client route "select_layers":', data);
-
- return runEvalScript("selectLayers('" + data.layers +"')")
- .then(function(result){
- log.warn("select_layers: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.is_saved', function (data) {
- log.warn('Server called client route "is_saved":', data);
-
- return runEvalScript("isSaved()")
- .then(function(result){
- log.warn("is_saved: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.saveAs', function (data) {
- log.warn('Server called client route "saveAsJPEG":', data);
- var escapedPath = EscapeStringForJSX(data.image_path);
- return runEvalScript("saveAs('" + escapedPath + "', " +
- "'" + data.ext + "', " +
- data.as_copy + ")")
- .then(function(result){
- log.warn("save: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.imprint', function (data) {
- log.warn('Server called client route "imprint":', data);
- var escaped = data.payload.replace(/\n/g, "\\n");
- return runEvalScript("imprint('" + escaped + "')")
- .then(function(result){
- log.warn("imprint: " + result);
- return result;
- });
- });
-
- RPC.addRoute('Photoshop.get_extension_version', function (data) {
- log.warn('Server called client route "get_extension_version":', data);
- return get_extension_version();
- });
-
- RPC.addRoute('Photoshop.close', function (data) {
- log.warn('Server called client route "close":', data);
- return runEvalScript("close()");
- });
-
- RPC.call('Photoshop.ping').then(function (data) {
- log.warn('Result for calling server route "ping": ', data);
- return runEvalScript("ping()")
- .then(function(result){
- log.warn("ping: " + result);
- return result;
- });
-
- }, function (error) {
- log.warn(error);
- });
-
- }
-
- log.warn("end script");
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/loglevel.min.js b/server_addon/photoshop/client/ayon_photoshop/api/extension/client/loglevel.min.js
deleted file mode 100644
index 648d7e9ff6..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/loglevel.min.js
+++ /dev/null
@@ -1,2 +0,0 @@
-/*! loglevel - v1.6.8 - https://github.com/pimterry/loglevel - (c) 2020 Tim Perry - licensed MIT */
-!function(a,b){"use strict";"function"==typeof define&&define.amd?define(b):"object"==typeof module&&module.exports?module.exports=b():a.log=b()}(this,function(){"use strict";function a(a,b){var c=a[b];if("function"==typeof c.bind)return c.bind(a);try{return Function.prototype.bind.call(c,a)}catch(b){return function(){return Function.prototype.apply.apply(c,[a,arguments])}}}function b(){console.log&&(console.log.apply?console.log.apply(console,arguments):Function.prototype.apply.apply(console.log,[console,arguments])),console.trace&&console.trace()}function c(c){return"debug"===c&&(c="log"),typeof console!==i&&("trace"===c&&j?b:void 0!==console[c]?a(console,c):void 0!==console.log?a(console,"log"):h)}function d(a,b){for(var c=0;c=0&&b<=j.levels.SILENT))throw"log.setLevel() called with invalid level: "+b;if(h=b,!1!==c&&e(b),d.call(j,b,a),typeof console===i&&b 1 && arguments[1] !== undefined ? arguments[1] : 1000;
-
- _classCallCheck(this, WSRPC);
-
- var self = this;
- URL = getAbsoluteWsUrl(URL);
- self.id = 1;
- self.eventId = 0;
- self.socketStarted = false;
- self.eventStore = {
- onconnect: {},
- onerror: {},
- onclose: {},
- onchange: {}
- };
- self.connectionNumber = 0;
- self.oneTimeEventStore = {
- onconnect: [],
- onerror: [],
- onclose: [],
- onchange: []
- };
- self.callQueue = [];
-
- function createSocket() {
- var ws = new WebSocket(URL);
-
- var rejectQueue = function rejectQueue() {
- self.connectionNumber++; // rejects incoming calls
-
- var deferred; //reject all pending calls
-
- while (0 < self.callQueue.length) {
- var callObj = self.callQueue.shift();
- deferred = self.store[callObj.id];
- delete self.store[callObj.id];
-
- if (deferred && deferred.promise.isPending()) {
- deferred.reject('WebSocket error occurred');
- }
- } // reject all from the store
-
-
- for (var key in self.store) {
- if (!self.store.hasOwnProperty(key)) continue;
- deferred = self.store[key];
-
- if (deferred && deferred.promise.isPending()) {
- deferred.reject('WebSocket error occurred');
- }
- }
- };
-
- function reconnect(callEvents) {
- setTimeout(function () {
- try {
- self.socket = createSocket();
- self.id = 1;
- } catch (exc) {
- callEvents('onerror', exc);
- delete self.socket;
- console.error(exc);
- }
- }, reconnectTimeout);
- }
-
- ws.onclose = function (err) {
- log('ONCLOSE CALLED', 'STATE', self.public.state());
- trace(err);
-
- for (var serial in self.store) {
- if (!self.store.hasOwnProperty(serial)) continue;
-
- if (self.store[serial].hasOwnProperty('reject')) {
- self.store[serial].reject('Connection closed');
- }
- }
-
- rejectQueue();
- callEvents('onclose', err);
- callEvents('onchange', err);
- reconnect(callEvents);
- };
-
- ws.onerror = function (err) {
- log('ONERROR CALLED', 'STATE', self.public.state());
- trace(err);
- rejectQueue();
- callEvents('onerror', err);
- callEvents('onchange', err);
- log('WebSocket has been closed by error: ', err);
- };
-
- function tryCallEvent(func, event) {
- try {
- return func(event);
- } catch (e) {
- if (e.hasOwnProperty('stack')) {
- log(e.stack);
- } else {
- log('Event function', func, 'raised unknown error:', e);
- }
-
- console.error(e);
- }
- }
-
- function callEvents(evName, event) {
- while (0 < self.oneTimeEventStore[evName].length) {
- var deferred = self.oneTimeEventStore[evName].shift();
- if (deferred.hasOwnProperty('resolve') && deferred.promise.isPending()) deferred.resolve();
- }
-
- for (var i in self.eventStore[evName]) {
- if (!self.eventStore[evName].hasOwnProperty(i)) continue;
- var cur = self.eventStore[evName][i];
- tryCallEvent(cur, event);
- }
- }
-
- ws.onopen = function (ev) {
- log('ONOPEN CALLED', 'STATE', self.public.state());
- trace(ev);
-
- while (0 < self.callQueue.length) {
- // noinspection JSUnresolvedFunction
- self.socket.send(JSON.stringify(self.callQueue.shift(), 0, 1));
- }
-
- callEvents('onconnect', ev);
- callEvents('onchange', ev);
- };
-
- function handleCall(self, data) {
- if (!self.routes.hasOwnProperty(data.method)) throw new Error('Route not found');
- var connectionNumber = self.connectionNumber;
- var deferred = new Deferred();
- deferred.promise.then(function (result) {
- if (connectionNumber !== self.connectionNumber) return;
- self.socket.send(JSON.stringify({
- id: data.id,
- result: result
- }));
- }, function (error) {
- if (connectionNumber !== self.connectionNumber) return;
- self.socket.send(JSON.stringify({
- id: data.id,
- error: error
- }));
- });
- var func = self.routes[data.method];
- if (self.asyncRoutes[data.method]) return func.apply(deferred, [data.params]);
-
- function badPromise() {
- throw new Error("You should register route with async flag.");
- }
-
- var promiseMock = {
- resolve: badPromise,
- reject: badPromise
- };
-
- try {
- deferred.resolve(func.apply(promiseMock, [data.params]));
- } catch (e) {
- deferred.reject(e);
- console.error(e);
- }
- }
-
- function handleError(self, data) {
- if (!self.store.hasOwnProperty(data.id)) return log('Unknown callback');
- var deferred = self.store[data.id];
- if (typeof deferred === 'undefined') return log('Confirmation without handler');
- delete self.store[data.id];
- log('REJECTING', data.error);
- deferred.reject(data.error);
- }
-
- function handleResult(self, data) {
- var deferred = self.store[data.id];
- if (typeof deferred === 'undefined') return log('Confirmation without handler');
- delete self.store[data.id];
-
- if (data.hasOwnProperty('result')) {
- return deferred.resolve(data.result);
- }
-
- return deferred.reject(data.error);
- }
-
- ws.onmessage = function (message) {
- log('ONMESSAGE CALLED', 'STATE', self.public.state());
- trace(message);
- if (message.type !== 'message') return;
- var data;
-
- try {
- data = JSON.parse(message.data);
- log(data);
-
- if (data.hasOwnProperty('method')) {
- return handleCall(self, data);
- } else if (data.hasOwnProperty('error') && data.error === null) {
- return handleError(self, data);
- } else {
- return handleResult(self, data);
- }
- } catch (exception) {
- var err = {
- error: exception.message,
- result: null,
- id: data ? data.id : null
- };
- self.socket.send(JSON.stringify(err));
- console.error(exception);
- }
- };
-
- return ws;
- }
-
- function makeCall(func, args, params) {
- self.id += 2;
- var deferred = new Deferred();
- var callObj = Object.freeze({
- id: self.id,
- method: func,
- params: args
- });
- var state = self.public.state();
-
- if (state === 'OPEN') {
- self.store[self.id] = deferred;
- self.socket.send(JSON.stringify(callObj));
- } else if (state === 'CONNECTING') {
- log('SOCKET IS', state);
- self.store[self.id] = deferred;
- self.callQueue.push(callObj);
- } else {
- log('SOCKET IS', state);
-
- if (params && params['noWait']) {
- deferred.reject("Socket is: ".concat(state));
- } else {
- self.store[self.id] = deferred;
- self.callQueue.push(callObj);
- }
- }
-
- return deferred.promise;
- }
-
- self.asyncRoutes = {};
- self.routes = {};
- self.store = {};
- self.public = Object.freeze({
- call: function call(func, args, params) {
- return makeCall(func, args, params);
- },
- addRoute: function addRoute(route, callback, isAsync) {
- self.asyncRoutes[route] = isAsync || false;
- self.routes[route] = callback;
- },
- deleteRoute: function deleteRoute(route) {
- delete self.asyncRoutes[route];
- return delete self.routes[route];
- },
- addEventListener: function addEventListener(event, func) {
- var eventId = self.eventId++;
- self.eventStore[event][eventId] = func;
- return eventId;
- },
- removeEventListener: function removeEventListener(event, index) {
- if (self.eventStore[event].hasOwnProperty(index)) {
- delete self.eventStore[event][index];
- return true;
- } else {
- return false;
- }
- },
- onEvent: function onEvent(event) {
- var deferred = new Deferred();
- self.oneTimeEventStore[event].push(deferred);
- return deferred.promise;
- },
- destroy: function destroy() {
- return self.socket.close();
- },
- state: function state() {
- return readyState[this.stateCode()];
- },
- stateCode: function stateCode() {
- if (self.socketStarted && self.socket) return self.socket.readyState;
- return 3;
- },
- connect: function connect() {
- self.socketStarted = true;
- self.socket = createSocket();
- }
- });
- self.public.addRoute('log', function (argsObj) {
- //console.info("Websocket sent: ".concat(argsObj));
- });
- self.public.addRoute('ping', function (data) {
- return data;
- });
- return self.public;
- };
-
- WSRPC.DEBUG = false;
- WSRPC.TRACE = false;
-
- return WSRPC;
-
-}));
-//# sourceMappingURL=wsrpc.js.map
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/wsrpc.min.js b/server_addon/photoshop/client/ayon_photoshop/api/extension/client/wsrpc.min.js
deleted file mode 100644
index f1264b91c4..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/wsrpc.min.js
+++ /dev/null
@@ -1 +0,0 @@
-!function(global,factory){"object"==typeof exports&&"undefined"!=typeof module?module.exports=factory():"function"==typeof define&&define.amd?define(factory):(global=global||self).WSRPC=factory()}(this,function(){"use strict";function _classCallCheck(instance,Constructor){if(!(instance instanceof Constructor))throw new TypeError("Cannot call a class as a function")}function Deferred(){_classCallCheck(this,Deferred);var self=this;function wrapper(func){return function(){if(!self.done)return self.done=!0,func.apply(this,arguments);console.error(new Error("Promise already done"))}}return self.resolve=null,self.reject=null,self.done=!1,self.promise=new Promise(function(resolve,reject){self.resolve=wrapper(resolve),self.reject=wrapper(reject)}),self.promise.isPending=function(){return!self.done},self}function logGroup(group,level,args){console.group(group),console[level].apply(this,args),console.groupEnd()}function log(){WSRPC.DEBUG&&logGroup("WSRPC.DEBUG","trace",arguments)}function trace(msg){if(WSRPC.TRACE){var payload=msg;"data"in msg&&(payload=JSON.parse(msg.data)),logGroup("WSRPC.TRACE","trace",[payload])}}var readyState=Object.freeze({0:"CONNECTING",1:"OPEN",2:"CLOSING",3:"CLOSED"}),WSRPC=function WSRPC(URL){var reconnectTimeout=1 //
-// forceEval is now by default true //
-// It wraps the scripts in a try catch and an eval providing useful error handling //
-// One can set in the jsx engine $.includeStack = true to return the call stack in the event of an error //
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-// JSX.js for calling jsx code from the js engine //
-// 2 methods included //
-// 1) jsx.evalScript AKA jsx.eval //
-// 2) jsx.evalFile AKA jsx.file //
-// Special features //
-// 1) Allows all changes in your jsx code to be reloaded into your extension at the click of a button //
-// 2) Can enable the $.fileName property to work and provides a $.__fileName() method as an alternative //
-// 3) Can force a callBack result from InDesign //
-// 4) No more csInterface.evalScript('alert("hello "' + title + " " + name + '");') //
-// use jsx.evalScript('alert("hello __title__ __name__");', {title: title, name: name}); //
-// 5) execute jsx files from your jsx folder like this jsx.evalFile('myFabJsxScript.jsx'); //
-// or from a relative path jsx.evalFile('../myFabScripts/myFabJsxScript.jsx'); //
-// or from an absolute url jsx.evalFile('/Path/to/my/FabJsxScript.jsx'); (mac) //
-// or from an absolute url jsx.evalFile('C:Path/to/my/FabJsxScript.jsx'); (windows) //
-// 6) Parameter can be entered in the from of a parameter list which can be in any order or as an object //
-// 7) Not camelCase sensitive (very useful for the illiterate) //
-// Dead easy to use BUT SPEND THE 3 TO 5 MINUTES IT SHOULD TAKE TO READ THE INSTRUCTIONS //
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-/* jshint undef:true, unused:true, esversion:6 */
-
-//////////////////////////////////////
-// jsx is the interface for the API //
-//////////////////////////////////////
-
-var jsx;
-
-// Wrap everything in an anonymous function to prevent leeks
-(function() {
- /////////////////////////////////////////////////////////////////////
- // Substitute some CSInterface functions to avoid dependency on it //
- /////////////////////////////////////////////////////////////////////
-
- var __dirname = (function() {
- var path, isMac;
- path = decodeURI(window.__adobe_cep__.getSystemPath('extension'));
- isMac = navigator.platform[0] === 'M'; // [M]ac
- path = path.replace('file://' + (isMac ? '' : '/'), '');
- return path;
- })();
-
- var evalScript = function(script, callback) {
- callback = callback || function() {};
- window.__adobe_cep__.evalScript(script, callback);
- };
-
-
- ////////////////////////////////////////////
- // In place of using the node path module //
- ////////////////////////////////////////////
-
- // jshint undef: true, unused: true
-
- // A very minified version of the NodeJs Path module!!
- // For use outside of NodeJs
- // Majorly nicked by Trevor from Joyent
- var path = (function() {
-
- var isString = function(arg) {
- return typeof arg === 'string';
- };
-
- // var isObject = function(arg) {
- // return typeof arg === 'object' && arg !== null;
- // };
-
- var basename = function(path) {
- if (!isString(path)) {
- throw new TypeError('Argument to path.basename must be a string');
- }
- var bits = path.split(/[\/\\]/g);
- return bits[bits.length - 1];
- };
-
- // jshint undef: true
- // Regex to split a windows path into three parts: [*, device, slash,
- // tail] windows-only
- var splitDeviceRe =
- /^([a-zA-Z]:|[\\\/]{2}[^\\\/]+[\\\/]+[^\\\/]+)?([\\\/])?([\s\S]*?)$/;
-
- // Regex to split the tail part of the above into [*, dir, basename, ext]
- // var splitTailRe =
- // /^([\s\S]*?)((?:\.{1,2}|[^\\\/]+?|)(\.[^.\/\\]*|))(?:[\\\/]*)$/;
-
- var win32 = {};
- // Function to split a filename into [root, dir, basename, ext]
- // var win32SplitPath = function(filename) {
- // // Separate device+slash from tail
- // var result = splitDeviceRe.exec(filename),
- // device = (result[1] || '') + (result[2] || ''),
- // tail = result[3] || '';
- // // Split the tail into dir, basename and extension
- // var result2 = splitTailRe.exec(tail),
- // dir = result2[1],
- // basename = result2[2],
- // ext = result2[3];
- // return [device, dir, basename, ext];
- // };
-
- var win32StatPath = function(path) {
- var result = splitDeviceRe.exec(path),
- device = result[1] || '',
- isUnc = !!device && device[1] !== ':';
- return {
- device: device,
- isUnc: isUnc,
- isAbsolute: isUnc || !!result[2], // UNC paths are always absolute
- tail: result[3]
- };
- };
-
- var normalizeUNCRoot = function(device) {
- return '\\\\' + device.replace(/^[\\\/]+/, '').replace(/[\\\/]+/g, '\\');
- };
-
- var normalizeArray = function(parts, allowAboveRoot) {
- var res = [];
- for (var i = 0; i < parts.length; i++) {
- var p = parts[i];
-
- // ignore empty parts
- if (!p || p === '.')
- continue;
-
- if (p === '..') {
- if (res.length && res[res.length - 1] !== '..') {
- res.pop();
- } else if (allowAboveRoot) {
- res.push('..');
- }
- } else {
- res.push(p);
- }
- }
-
- return res;
- };
-
- win32.normalize = function(path) {
- var result = win32StatPath(path),
- device = result.device,
- isUnc = result.isUnc,
- isAbsolute = result.isAbsolute,
- tail = result.tail,
- trailingSlash = /[\\\/]$/.test(tail);
-
- // Normalize the tail path
- tail = normalizeArray(tail.split(/[\\\/]+/), !isAbsolute).join('\\');
-
- if (!tail && !isAbsolute) {
- tail = '.';
- }
- if (tail && trailingSlash) {
- tail += '\\';
- }
-
- // Convert slashes to backslashes when `device` points to an UNC root.
- // Also squash multiple slashes into a single one where appropriate.
- if (isUnc) {
- device = normalizeUNCRoot(device);
- }
-
- return device + (isAbsolute ? '\\' : '') + tail;
- };
- win32.join = function() {
- var paths = [];
- for (var i = 0; i < arguments.length; i++) {
- var arg = arguments[i];
- if (!isString(arg)) {
- throw new TypeError('Arguments to path.join must be strings');
- }
- if (arg) {
- paths.push(arg);
- }
- }
-
- var joined = paths.join('\\');
-
- // Make sure that the joined path doesn't start with two slashes, because
- // normalize() will mistake it for an UNC path then.
- //
- // This step is skipped when it is very clear that the user actually
- // intended to point at an UNC path. This is assumed when the first
- // non-empty string arguments starts with exactly two slashes followed by
- // at least one more non-slash character.
- //
- // Note that for normalize() to treat a path as an UNC path it needs to
- // have at least 2 components, so we don't filter for that here.
- // This means that the user can use join to construct UNC paths from
- // a server name and a share name; for example:
- // path.join('//server', 'share') -> '\\\\server\\share\')
- if (!/^[\\\/]{2}[^\\\/]/.test(paths[0])) {
- joined = joined.replace(/^[\\\/]{2,}/, '\\');
- }
- return win32.normalize(joined);
- };
-
- var posix = {};
-
- // posix version
- posix.join = function() {
- var path = '';
- for (var i = 0; i < arguments.length; i++) {
- var segment = arguments[i];
- if (!isString(segment)) {
- throw new TypeError('Arguments to path.join must be strings');
- }
- if (segment) {
- if (!path) {
- path += segment;
- } else {
- path += '/' + segment;
- }
- }
- }
- return posix.normalize(path);
- };
-
- // path.normalize(path)
- // posix version
- posix.normalize = function(path) {
- var isAbsolute = path.charAt(0) === '/',
- trailingSlash = path && path[path.length - 1] === '/';
-
- // Normalize the path
- path = normalizeArray(path.split('/'), !isAbsolute).join('/');
-
- if (!path && !isAbsolute) {
- path = '.';
- }
- if (path && trailingSlash) {
- path += '/';
- }
-
- return (isAbsolute ? '/' : '') + path;
- };
-
- win32.basename = posix.basename = basename;
-
- this.win32 = win32;
- this.posix = posix;
- return (navigator.platform[0] === 'M') ? posix : win32;
- })();
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////
- // The is the "main" function which is to be prototyped //
- // It run a small snippet in the jsx engine that //
- // 1) Assigns $.__dirname with the value of the extensions __dirname base path //
- // 2) Sets up a method $.__fileName() for retrieving from within the jsx script it's $.fileName value //
- // more on that method later //
- // At the end of the script the global declaration jsx = new Jsx(); has been made. //
- // If you like you can remove that and include in your relevant functions //
- // var jsx = new Jsx(); You would never call the Jsx function without the "new" declaration //
- ////////////////////////////////////////////////////////////////////////////////////////////////////////
- var Jsx = function() {
- var jsxScript;
- // Setup jsx function to enable the jsx scripts to easily retrieve their file location
- jsxScript = [
- '$.level = 0;',
- 'if(!$.__fileNames){',
- ' $.__fileNames = {};',
- ' $.__dirname = "__dirname__";'.replace('__dirname__', __dirname),
- ' $.__fileName = function(name){',
- ' name = name || $.fileName;',
- ' return ($.__fileNames && $.__fileNames[name]) || $.fileName;',
- ' };',
- '}'
- ].join('');
- evalScript(jsxScript);
- return this;
- };
-
- /**
- * [evalScript] For calling jsx scripts from the js engine
- *
- * The jsx.evalScript method is used for calling jsx scripts directly from the js engine
- * Allows for easy replacement i.e. variable insertions and for forcing eval.
- * For convenience jsx.eval or jsx.script or jsx.evalscript can be used instead of calling jsx.evalScript
- *
- * @param {String} jsxScript
- * The string that makes up the jsx script
- * it can contain a simple template like syntax for replacements
- * 'alert("__foo__");'
- * the __foo__ will be replaced as per the replacements parameter
- *
- * @param {Function} callback
- * The callback function you want the jsx script to trigger on completion
- * The result of the jsx script is passed as the argument to that function
- * The function can exist in some other file.
- * Note that InDesign does not automatically pass the callBack as a string.
- * Either write your InDesign in a way that it returns a sting the form of
- * return 'this is my result surrounded by quotes'
- * or use the force eval option
- * [Optional DEFAULT no callBack]
- *
- * @param {Object} replacements
- * The replacements to make on the jsx script
- * given the following script (template)
- * 'alert("__message__: " + __val__);'
- * and we want to change the script to
- * 'alert("I was born in the year: " + 1234);'
- * we would pass the following object
- * {"message": 'I was born in the year', "val": 1234}
- * or if not using reserved words like do we can leave out the key quotes
- * {message: 'I was born in the year', val: 1234}
- * [Optional DEFAULT no replacements]
- *
- * @param {Bolean} forceEval
- * If the script should be wrapped in an eval and try catch
- * This will 1) provide useful error feedback if heaven forbid it is needed
- * 2) The result will be a string which is required for callback results in InDesign
- * [Optional DEFAULT true]
- *
- * Note 1) The order of the parameters is irrelevant
- * Note 2) One can pass the arguments as an object if desired
- * jsx.evalScript(myCallBackFunction, 'alert("__myMessage__");', true);
- * is the same as
- * jsx.evalScript({
- * script: 'alert("__myMessage__");',
- * replacements: {myMessage: 'Hi there'},
- * callBack: myCallBackFunction,
- * eval: true
- * });
- * note that either lower or camelCase key names are valid
- * i.e. both callback or callBack will work
- *
- * The following keys are the same jsx || script || jsxScript || jsxscript || file
- * The following keys are the same callBack || callback
- * The following keys are the same replacements || replace
- * The following keys are the same eval || forceEval || forceeval
- * The following keys are the same forceEvalScript || forceevalscript || evalScript || evalscript;
- *
- * @return {Boolean} if the jsxScript was executed or not
- */
-
- Jsx.prototype.evalScript = function() {
- var arg, i, key, replaceThis, withThis, args, callback, forceEval, replacements, jsxScript, isBin;
-
- //////////////////////////////////////////////////////////////////////////////////////
- // sort out order which arguments into jsxScript, callback, replacements, forceEval //
- //////////////////////////////////////////////////////////////////////////////////////
-
- args = arguments;
-
- // Detect if the parameters were passed as an object and if so allow for various keys
- if (args.length === 1 && (arg = args[0]) instanceof Object) {
- jsxScript = arg.jsxScript || arg.jsx || arg.script || arg.file || arg.jsxscript;
- callback = arg.callBack || arg.callback;
- replacements = arg.replacements || arg.replace;
- forceEval = arg.eval || arg.forceEval || arg.forceeval;
- } else {
- for (i = 0; i < 4; i++) {
- arg = args[i];
- if (arg === undefined) {
- continue;
- }
- if (arg.constructor === String) {
- jsxScript = arg;
- continue;
- }
- if (arg.constructor === Object) {
- replacements = arg;
- continue;
- }
- if (arg.constructor === Function) {
- callback = arg;
- continue;
- }
- if (arg === false) {
- forceEval = false;
- }
- }
- }
-
- // If no script provide then not too much to do!
- if (!jsxScript) {
- return false;
- }
-
- // Have changed the forceEval default to be true as I prefer the error handling
- if (forceEval !== false) {
- forceEval = true;
- }
-
- //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // On Illustrator and other apps the result of the jsx script is automatically passed as a string //
- // if you have a "script" containing the single number 1 and nothing else then the callBack will register as "1" //
- // On InDesign that same script will provide a blank callBack //
- // Let's say we have a callBack function var callBack = function(result){alert(result);} //
- // On Ai your see the 1 in the alert //
- // On ID your just see a blank alert //
- // To see the 1 in the alert you need to convert the result to a string and then it will show //
- // So if we rewrite out 1 byte script to '1' i.e. surround the 1 in quotes then the call back alert will show 1 //
- // If the scripts planed one can make sure that the results always passed as a string (including errors) //
- // otherwise one can wrap the script in an eval and then have the result passed as a string //
- // I have not gone through all the apps but can say //
- // for Ai you never need to set the forceEval to true //
- // for ID you if you have not coded your script appropriately and your want to send a result to the callBack then set forceEval to true //
- // I changed this that even on Illustrator it applies the try catch, Note the try catch will fail if $.level is set to 1 //
- //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- if (forceEval) {
-
- isBin = (jsxScript.substring(0, 10) === '@JSXBIN@ES') ? '' : '\n';
- jsxScript = (
- // "\n''') + '';} catch(e){(function(e){var n, a=[]; for (n in e){a.push(n + ': ' + e[n])}; return a.join('\n')})(e)}");
- // "\n''') + '';} catch(e){e + (e.line ? ('\\nLine ' + (+e.line - 1)) : '')}");
- [
- "$.level = 0;",
- "try{eval('''" + isBin, // need to add an extra line otherwise #targetengine doesn't work ;-]
- jsxScript.replace(/\\/g, '\\\\').replace(/'/g, "\\'").replace(/"/g, '\\"') + "\n''') + '';",
- "} catch (e) {",
- " (function(e) {",
- " var line, sourceLine, name, description, ErrorMessage, fileName, start, end, bug;",
- " line = +e.line" + (isBin === '' ? ';' : ' - 1;'), // To take into account the extra line added
- " fileName = File(e.fileName).fsName;",
- " sourceLine = line && e.source.split(/[\\r\\n]/)[line];",
- " name = e.name;",
- " description = e.description;",
- " ErrorMessage = name + ' ' + e.number + ': ' + description;",
- " if (fileName.length && !(/[\\/\\\\]\\d+$/.test(fileName))) {",
- " ErrorMessage += '\\nFile: ' + fileName;",
- " line++;",
- " }",
- " if (line){",
- " ErrorMessage += '\\nLine: ' + line +",
- " '-> ' + ((sourceLine.length < 300) ? sourceLine : sourceLine.substring(0,300) + '...');",
- " }",
- " if (e.start) {ErrorMessage += '\\nBug: ' + e.source.substring(e.start - 1, e.end)}",
- " if ($.includeStack) {ErrorMessage += '\\nStack:' + $.stack;}",
- " return ErrorMessage;",
- " })(e);",
- "}"
- ].join('')
- );
-
- }
-
- /////////////////////////////////////////////////////////////
- // deal with the replacements //
- // Note it's probably better to use ${template} `literals` //
- /////////////////////////////////////////////////////////////
-
- if (replacements) {
- for (key in replacements) {
- if (replacements.hasOwnProperty(key)) {
- replaceThis = new RegExp('__' + key + '__', 'g');
- withThis = replacements[key];
- jsxScript = jsxScript.replace(replaceThis, withThis + '');
- }
- }
- }
-
-
- try {
- evalScript(jsxScript, callback);
- return true;
- } catch (err) {
- ////////////////////////////////////////////////
- // Do whatever error handling you want here ! //
- ////////////////////////////////////////////////
- var newErr;
- newErr = new Error(err);
- alert('Error Eek: ' + newErr.stack);
- return false;
- }
-
- };
-
-
- /**
- * [evalFile] For calling jsx scripts from the js engine
- *
- * The jsx.evalFiles method is used for executing saved jsx scripts
- * where the jsxScript parameter is a string of the jsx scripts file location.
- * For convenience jsx.file or jsx.evalfile can be used instead of jsx.evalFile
- *
- * @param {String} file
- * The path to jsx script
- * If only the base name is provided then the path will be presumed to be the
- * To execute files stored in the jsx folder located in the __dirname folder use
- * jsx.evalFile('myFabJsxScript.jsx');
- * To execute files stored in the a folder myFabScripts located in the __dirname folder use
- * jsx.evalFile('./myFabScripts/myFabJsxScript.jsx');
- * To execute files stored in the a folder myFabScripts located at an absolute url use
- * jsx.evalFile('/Path/to/my/FabJsxScript.jsx'); (mac)
- * or jsx.evalFile('C:Path/to/my/FabJsxScript.jsx'); (windows)
- *
- * @param {Function} callback
- * The callback function you want the jsx script to trigger on completion
- * The result of the jsx script is passed as the argument to that function
- * The function can exist in some other file.
- * Note that InDesign does not automatically pass the callBack as a string.
- * Either write your InDesign in a way that it returns a sting the form of
- * return 'this is my result surrounded by quotes'
- * or use the force eval option
- * [Optional DEFAULT no callBack]
- *
- * @param {Object} replacements
- * The replacements to make on the jsx script
- * give the following script (template)
- * 'alert("__message__: " + __val__);'
- * and we want to change the script to
- * 'alert("I was born in the year: " + 1234);'
- * we would pass the following object
- * {"message": 'I was born in the year', "val": 1234}
- * or if not using reserved words like do we can leave out the key quotes
- * {message: 'I was born in the year', val: 1234}
- * By default when possible the forceEvalScript will be set to true
- * The forceEvalScript option cannot be true when there are replacements
- * To force the forceEvalScript to be false you can send a blank set of replacements
- * jsx.evalFile('myFabScript.jsx', {}); Will NOT be executed using the $.evalScript method
- * jsx.evalFile('myFabScript.jsx'); Will YES be executed using the $.evalScript method
- * see the forceEvalScript parameter for details on this
- * [Optional DEFAULT no replacements]
- *
- * @param {Bolean} forceEval
- * If the script should be wrapped in an eval and try catch
- * This will 1) provide useful error feedback if heaven forbid it is needed
- * 2) The result will be a string which is required for callback results in InDesign
- * [Optional DEFAULT true]
- *
- * If no replacements are needed then the jsx script is be executed by using the $.evalFile method
- * This exposes the true value of the $.fileName property
- * In such a case it's best to avoid using the $.__fileName() with no base name as it won't work
- * BUT one can still use the $.__fileName('baseName') method which is more accurate than the standard $.fileName property
- * Let's say you have a Drive called "Graphics" AND YOU HAVE a root folder on your "main" drive called "Graphics"
- * You call a script jsx.evalFile('/Volumes/Graphics/myFabScript.jsx');
- * $.fileName will give you '/Graphics/myFabScript.jsx' which is wrong
- * $.__fileName('myFabScript.jsx') will give you '/Volumes/Graphics/myFabScript.jsx' which is correct
- * $.__fileName() will not give you a reliable result
- * Note that if your calling multiple versions of myFabScript.jsx stored in multiple folders then you can get stuffed!
- * i.e. if the fileName is important to you then don't do that.
- * It also will force the result of the jsx file as a string which is particularly useful for InDesign callBacks
- *
- * Note 1) The order of the parameters is irrelevant
- * Note 2) One can pass the arguments as an object if desired
- * jsx.evalScript(myCallBackFunction, 'alert("__myMessage__");', true);
- * is the same as
- * jsx.evalScript({
- * script: 'alert("__myMessage__");',
- * replacements: {myMessage: 'Hi there'},
- * callBack: myCallBackFunction,
- * eval: false,
- * });
- * note that either lower or camelCase key names or valid
- * i.e. both callback or callBack will work
- *
- * The following keys are the same file || jsx || script || jsxScript || jsxscript
- * The following keys are the same callBack || callback
- * The following keys are the same replacements || replace
- * The following keys are the same eval || forceEval || forceeval
- *
- * @return {Boolean} if the jsxScript was executed or not
- */
-
- Jsx.prototype.evalFile = function() {
- var arg, args, callback, fileName, fileNameScript, forceEval, forceEvalScript,
- i, jsxFolder, jsxScript, newLine, replacements, success;
-
- success = true; // optimistic
- args = arguments;
-
- jsxFolder = path.join(__dirname, 'jsx');
- //////////////////////////////////////////////////////////////////////////////////////////////////////////
- // $.fileName does not return it's correct path in the jsx engine for files called from the js engine //
- // In Illustrator it returns an integer in InDesign it returns an empty string //
- // This script injection allows for the script to know it's path by calling //
- // $.__fileName(); //
- // on Illustrator this works pretty well //
- // on InDesign it's best to use with a bit of care //
- // If the a second script has been called the InDesing will "forget" the path to the first script //
- // 2 work-arounds for this //
- // 1) at the beginning of your script add var thePathToMeIs = $.fileName(); //
- // thePathToMeIs will not be forgotten after running the second script //
- // 2) $.__fileName('myBaseName.jsx'); //
- // for example you have file with the following path //
- // /path/to/me.jsx //
- // Call $.__fileName('me.jsx') and you will get /path/to/me.jsx even after executing a second script //
- // Note When the forceEvalScript option is used then you just use the regular $.fileName property //
- //////////////////////////////////////////////////////////////////////////////////////////////////////////
- fileNameScript = [
- // The if statement should not normally be executed
- 'if(!$.__fileNames){',
- ' $.__fileNames = {};',
- ' $.__dirname = "__dirname__";'.replace('__dirname__', __dirname),
- ' $.__fileName = function(name){',
- ' name = name || $.fileName;',
- ' return ($.__fileNames && $.__fileNames[name]) || $.fileName;',
- ' };',
- '}',
- '$.__fileNames["__basename__"] = $.__fileNames["" + $.fileName] = "__fileName__";'
- ].join('');
-
- //////////////////////////////////////////////////////////////////////////////////////
- // sort out order which arguments into jsxScript, callback, replacements, forceEval //
- //////////////////////////////////////////////////////////////////////////////////////
-
-
- // Detect if the parameters were passed as an object and if so allow for various keys
- if (args.length === 1 && (arg = args[0]) instanceof Object) {
- jsxScript = arg.jsxScript || arg.jsx || arg.script || arg.file || arg.jsxscript;
- callback = arg.callBack || arg.callback;
- replacements = arg.replacements || arg.replace;
- forceEval = arg.eval || arg.forceEval || arg.forceeval;
- } else {
- for (i = 0; i < 5; i++) {
- arg = args[i];
- if (arg === undefined) {
- continue;
- }
- if (arg.constructor.name === 'String') {
- jsxScript = arg;
- continue;
- }
- if (arg.constructor.name === 'Object') {
- //////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // If no replacements are provided then the $.evalScript method will be used //
- // This will allow directly for the $.fileName property to be used //
- // If one does not want the $.evalScript method to be used then //
- // either send a blank object as the replacements {} //
- // or explicitly set the forceEvalScript option to false //
- // This can only be done if the parameters are passed as an object //
- // i.e. jsx.evalFile({file:'myFabScript.jsx', forceEvalScript: false}); //
- // if the file was called using //
- // i.e. jsx.evalFile('myFabScript.jsx'); //
- // then the following jsx code is called $.evalFile(new File('Path/to/myFabScript.jsx', 10000000000)) + ''; //
- // forceEval is never needed if the forceEvalScript is triggered //
- //////////////////////////////////////////////////////////////////////////////////////////////////////////////
- replacements = arg;
- continue;
- }
- if (arg.constructor === Function) {
- callback = arg;
- continue;
- }
- if (arg === false) {
- forceEval = false;
- }
- }
- }
-
- // If no script provide then not too much to do!
- if (!jsxScript) {
- return false;
- }
-
- forceEvalScript = !replacements;
-
-
- //////////////////////////////////////////////////////
- // Get path of script //
- // Check if it's literal, relative or in jsx folder //
- //////////////////////////////////////////////////////
-
- if (/^\/|[a-zA-Z]+:/.test(jsxScript)) { // absolute path Mac | Windows
- jsxScript = path.normalize(jsxScript);
- } else if (/^\.+\//.test(jsxScript)) {
- jsxScript = path.join(__dirname, jsxScript); // relative path
- } else {
- jsxScript = path.join(jsxFolder, jsxScript); // files in the jsxFolder
- }
-
- if (forceEvalScript) {
- jsxScript = jsxScript.replace(/"/g, '\\"');
- // Check that the path exist, should change this to asynchronous at some point
- if (!window.cep.fs.stat(jsxScript).err) {
- jsxScript = fileNameScript.replace(/__fileName__/, jsxScript).replace(/__basename__/, path.basename(jsxScript)) +
- '$.evalFile(new File("' + jsxScript.replace(/\\/g, '\\\\') + '")) + "";';
- return this.evalScript(jsxScript, callback, forceEval);
- } else {
- throw new Error(`The file: {jsxScript} could not be found / read`);
- }
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////////
- // Replacements made so we can't use $.evalFile and need to read the jsx script for ourselves //
- ////////////////////////////////////////////////////////////////////////////////////////////////
-
- fileName = jsxScript.replace(/\\/g, '\\\\').replace(/"/g, '\\"');
- try {
- jsxScript = window.cep.fs.readFile(jsxScript).data;
- } catch (er) {
- throw new Error(`The file: ${fileName} could not be read`);
- }
- // It is desirable that the injected fileNameScript is on the same line as the 1st line of the script
- // This is so that the $.line or error.line returns the same value as the actual file
- // However if the 1st line contains a # directive then we need to insert a new line and stuff the above problem
- // When possible i.e. when there's no replacements then $.evalFile will be used and then the whole issue is avoided
- newLine = /^\s*#/.test(jsxScript) ? '\n' : '';
- jsxScript = fileNameScript.replace(/__fileName__/, fileName).replace(/__basename__/, path.basename(fileName)) + newLine + jsxScript;
-
- try {
- // evalScript(jsxScript, callback);
- return this.evalScript(jsxScript, callback, replacements, forceEval);
- } catch (err) {
- ////////////////////////////////////////////////
- // Do whatever error handling you want here ! //
- ////////////////////////////////////////////////
- var newErr;
- newErr = new Error(err);
- alert('Error Eek: ' + newErr.stack);
- return false;
- }
-
- return success; // success should be an array but for now it's a Boolean
- };
-
-
- ////////////////////////////////////
- // Setup alternative method names //
- ////////////////////////////////////
- Jsx.prototype.eval = Jsx.prototype.script = Jsx.prototype.evalscript = Jsx.prototype.evalScript;
- Jsx.prototype.file = Jsx.prototype.evalfile = Jsx.prototype.evalFile;
-
- ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // Examples //
- // jsx.evalScript('alert("foo");'); //
- // jsx.evalFile('foo.jsx'); // where foo.jsx is stored in the jsx folder at the base of the extensions directory //
- // jsx.evalFile('../myFolder/foo.jsx'); // where a relative or absolute file path is given //
- // //
- // using conventional methods one would use in the case were the values to swap were supplied by variables //
- // csInterface.evalScript('var q = "' + name + '"; alert("' + myString + '" ' + myOp + ' q);q;', callback); //
- // Using all the '' + foo + '' is very error prone //
- // jsx.evalScript('var q = "__name__"; alert(__string__ __opp__ q);q;',{'name':'Fred', 'string':'Hello ', 'opp':'+'}, callBack); //
- // is much simpler and less error prone //
- // //
- // more readable to use object //
- // jsx.evalFile({ //
- // file: 'yetAnotherFabScript.jsx', //
- // replacements: {"this": foo, That: bar, and: "&&", the: foo2, other: bar2}, //
- // eval: true //
- // }) //
- // Enjoy //
- ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-
- jsx = new Jsx();
-})();
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/host/index.jsx b/server_addon/photoshop/client/ayon_photoshop/api/extension/host/index.jsx
deleted file mode 100644
index b697ee65ab..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/extension/host/index.jsx
+++ /dev/null
@@ -1,484 +0,0 @@
-#include "json.js";
-#target photoshop
-
-var LogFactory=function(file,write,store,level,defaultStatus,continuing){if(file&&(file.constructor===String||file.constructor===File)){file={file:file};}else if(!file)file={file:{}};write=(file.write!==undefined)?file.write:write;if(write===undefined){write=true;}store=(file.store!==undefined)?file.store||false:store||false;level=(file.level!==undefined)?file.level:level;defaultStatus=(file.defaultStatus!==undefined)?file.defaultStatus:defaultStatus;if(defaultStatus===undefined){defaultStatus='LOG';}continuing=(file.continuing!==undefined)?file.continuing:continuing||false;file=file.file||{};var stack,times,logTime,logPoint,icons,statuses,LOG_LEVEL,LOG_STATUS;stack=[];times=[];logTime=new Date();logPoint='Log Factory Start';icons={"1":"\ud83d\udd50","130":"\ud83d\udd5c","2":"\ud83d\udd51","230":"\ud83d\udd5d","3":"\ud83d\udd52","330":"\ud83d\udd5e","4":"\ud83d\udd53","430":"\ud83d\udd5f","5":"\ud83d\udd54","530":"\ud83d\udd60","6":"\ud83d\udd55","630":"\ud83d\udd61","7":"\ud83d\udd56","730":"\ud83d\udd62","8":"\ud83d\udd57","830":"\ud83d\udd63","9":"\ud83d\udd58","930":"\ud83d\udd64","10":"\ud83d\udd59","1030":"\ud83d\udd65","11":"\ud83d\udd5a","1130":"\ud83d\udd66","12":"\ud83d\udd5b","1230":"\ud83d\udd67","AIRPLANE":"\ud83d\udee9","ALARM":"\u23f0","AMBULANCE":"\ud83d\ude91","ANCHOR":"\u2693","ANGRY":"\ud83d\ude20","ANGUISHED":"\ud83d\ude27","ANT":"\ud83d\udc1c","ANTENNA":"\ud83d\udce1","APPLE":"\ud83c\udf4f","APPLE2":"\ud83c\udf4e","ATM":"\ud83c\udfe7","ATOM":"\u269b","BABYBOTTLE":"\ud83c\udf7c","BAD:":"\ud83d\udc4e","BANANA":"\ud83c\udf4c","BANDAGE":"\ud83e\udd15","BANK":"\ud83c\udfe6","BATTERY":"\ud83d\udd0b","BED":"\ud83d\udecf","BEE":"\ud83d\udc1d","BEER":"\ud83c\udf7a","BELL":"\ud83d\udd14","BELLOFF":"\ud83d\udd15","BIRD":"\ud83d\udc26","BLACKFLAG":"\ud83c\udff4","BLUSH":"\ud83d\ude0a","BOMB":"\ud83d\udca3","BOOK":"\ud83d\udcd5","BOOKMARK":"\ud83d\udd16","BOOKS":"\ud83d\udcda","BOW":"\ud83c\udff9","BOWLING":"\ud83c\udfb3","BRIEFCASE":"\ud83d\udcbc","BROKEN":"\ud83d\udc94","BUG":"\ud83d\udc1b","BUILDING":"\ud83c\udfdb","BUILDINGS":"\ud83c\udfd8","BULB":"\ud83d\udca1","BUS":"\ud83d\ude8c","CACTUS":"\ud83c\udf35","CALENDAR":"\ud83d\udcc5","CAMEL":"\ud83d\udc2a","CAMERA":"\ud83d\udcf7","CANDLE":"\ud83d\udd6f","CAR":"\ud83d\ude98","CAROUSEL":"\ud83c\udfa0","CASTLE":"\ud83c\udff0","CATEYES":"\ud83d\ude3b","CATJOY":"\ud83d\ude39","CATMOUTH":"\ud83d\ude3a","CATSMILE":"\ud83d\ude3c","CD":"\ud83d\udcbf","CHECK":"\u2714","CHEQFLAG":"\ud83c\udfc1","CHICK":"\ud83d\udc25","CHICKEN":"\ud83d\udc14","CHICKHEAD":"\ud83d\udc24","CIRCLEBLACK":"\u26ab","CIRCLEBLUE":"\ud83d\udd35","CIRCLERED":"\ud83d\udd34","CIRCLEWHITE":"\u26aa","CIRCUS":"\ud83c\udfaa","CLAPPER":"\ud83c\udfac","CLAPPING":"\ud83d\udc4f","CLIP":"\ud83d\udcce","CLIPBOARD":"\ud83d\udccb","CLOUD":"\ud83c\udf28","CLOVER":"\ud83c\udf40","CLOWN":"\ud83e\udd21","COLDSWEAT":"\ud83d\ude13","COLDSWEAT2":"\ud83d\ude30","COMPRESS":"\ud83d\udddc","CONFOUNDED":"\ud83d\ude16","CONFUSED":"\ud83d\ude15","CONSTRUCTION":"\ud83d\udea7","CONTROL":"\ud83c\udf9b","COOKIE":"\ud83c\udf6a","COOKING":"\ud83c\udf73","COOL":"\ud83d\ude0e","COOLBOX":"\ud83c\udd92","COPYRIGHT":"\u00a9","CRANE":"\ud83c\udfd7","CRAYON":"\ud83d\udd8d","CREDITCARD":"\ud83d\udcb3","CROSS":"\u2716","CROSSBOX:":"\u274e","CRY":"\ud83d\ude22","CRYCAT":"\ud83d\ude3f","CRYSTALBALL":"\ud83d\udd2e","CUSTOMS":"\ud83d\udec3","DELICIOUS":"\ud83d\ude0b","DERELICT":"\ud83c\udfda","DESKTOP":"\ud83d\udda5","DIAMONDLB":"\ud83d\udd37","DIAMONDLO":"\ud83d\udd36","DIAMONDSB":"\ud83d\udd39","DIAMONDSO":"\ud83d\udd38","DICE":"\ud83c\udfb2","DISAPPOINTED":"\ud83d\ude1e","CRY2":"\ud83d\ude25","DIVISION":"\u2797","DIZZY":"\ud83d\ude35","DOLLAR":"\ud83d\udcb5","DOLLAR2":"\ud83d\udcb2","DOWNARROW":"\u2b07","DVD":"\ud83d\udcc0","EJECT":"\u23cf","ELEPHANT":"\ud83d\udc18","EMAIL":"\ud83d\udce7","ENVELOPE":"\ud83d\udce8","ENVELOPE2":"\u2709","ENVELOPE_DOWN":"\ud83d\udce9","EURO":"\ud83d\udcb6","EVIL":"\ud83d\ude08","EXPRESSIONLESS":"\ud83d\ude11","EYES":"\ud83d\udc40","FACTORY":"\ud83c\udfed","FAX":"\ud83d\udce0","FEARFUL":"\ud83d\ude28","FILEBOX":"\ud83d\uddc3","FILECABINET":"\ud83d\uddc4","FIRE":"\ud83d\udd25","FIREENGINE":"\ud83d\ude92","FIST":"\ud83d\udc4a","FLOWER":"\ud83c\udf37","FLOWER2":"\ud83c\udf38","FLUSHED":"\ud83d\ude33","FOLDER":"\ud83d\udcc1","FOLDER2":"\ud83d\udcc2","FREE":"\ud83c\udd93","FROG":"\ud83d\udc38","FROWN":"\ud83d\ude41","GEAR":"\u2699","GLOBE":"\ud83c\udf0d","GLOWINGSTAR":"\ud83c\udf1f","GOOD:":"\ud83d\udc4d","GRIMACING":"\ud83d\ude2c","GRIN":"\ud83d\ude00","GRINNINGCAT":"\ud83d\ude38","HALO":"\ud83d\ude07","HAMMER":"\ud83d\udd28","HAMSTER":"\ud83d\udc39","HAND":"\u270b","HANDDOWN":"\ud83d\udc47","HANDLEFT":"\ud83d\udc48","HANDRIGHT":"\ud83d\udc49","HANDUP":"\ud83d\udc46","HATCHING":"\ud83d\udc23","HAZARD":"\u2623","HEADPHONE":"\ud83c\udfa7","HEARNOEVIL":"\ud83d\ude49","HEARTBLUE":"\ud83d\udc99","HEARTEYES":"\ud83d\ude0d","HEARTGREEN":"\ud83d\udc9a","HEARTYELLOW":"\ud83d\udc9b","HELICOPTER":"\ud83d\ude81","HERB":"\ud83c\udf3f","HIGH_BRIGHTNESS":"\ud83d\udd06","HIGHVOLTAGE":"\u26a1","HIT":"\ud83c\udfaf","HONEY":"\ud83c\udf6f","HOT":"\ud83c\udf36","HOURGLASS":"\u23f3","HOUSE":"\ud83c\udfe0","HUGGINGFACE":"\ud83e\udd17","HUNDRED":"\ud83d\udcaf","HUSHED":"\ud83d\ude2f","ID":"\ud83c\udd94","INBOX":"\ud83d\udce5","INDEX":"\ud83d\uddc2","JOY":"\ud83d\ude02","KEY":"\ud83d\udd11","KISS":"\ud83d\ude18","KISS2":"\ud83d\ude17","KISS3":"\ud83d\ude19","KISS4":"\ud83d\ude1a","KISSINGCAT":"\ud83d\ude3d","KNIFE":"\ud83d\udd2a","LABEL":"\ud83c\udff7","LADYBIRD":"\ud83d\udc1e","LANDING":"\ud83d\udeec","LAPTOP":"\ud83d\udcbb","LEFTARROW":"\u2b05","LEMON":"\ud83c\udf4b","LIGHTNINGCLOUD":"\ud83c\udf29","LINK":"\ud83d\udd17","LITTER":"\ud83d\udeae","LOCK":"\ud83d\udd12","LOLLIPOP":"\ud83c\udf6d","LOUDSPEAKER":"\ud83d\udce2","LOW_BRIGHTNESS":"\ud83d\udd05","MAD":"\ud83d\ude1c","MAGNIFYING_GLASS":"\ud83d\udd0d","MASK":"\ud83d\ude37","MEDAL":"\ud83c\udf96","MEMO":"\ud83d\udcdd","MIC":"\ud83c\udfa4","MICROSCOPE":"\ud83d\udd2c","MINUS":"\u2796","MOBILE":"\ud83d\udcf1","MONEY":"\ud83d\udcb0","MONEYMOUTH":"\ud83e\udd11","MONKEY":"\ud83d\udc35","MOUSE":"\ud83d\udc2d","MOUSE2":"\ud83d\udc01","MOUTHLESS":"\ud83d\ude36","MOVIE":"\ud83c\udfa5","MUGS":"\ud83c\udf7b","NERD":"\ud83e\udd13","NEUTRAL":"\ud83d\ude10","NEW":"\ud83c\udd95","NOENTRY":"\ud83d\udeab","NOTEBOOK":"\ud83d\udcd4","NOTEPAD":"\ud83d\uddd2","NUTANDBOLT":"\ud83d\udd29","O":"\u2b55","OFFICE":"\ud83c\udfe2","OK":"\ud83c\udd97","OKHAND":"\ud83d\udc4c","OLDKEY":"\ud83d\udddd","OPENLOCK":"\ud83d\udd13","OPENMOUTH":"\ud83d\ude2e","OUTBOX":"\ud83d\udce4","PACKAGE":"\ud83d\udce6","PAGE":"\ud83d\udcc4","PAINTBRUSH":"\ud83d\udd8c","PALETTE":"\ud83c\udfa8","PANDA":"\ud83d\udc3c","PASSPORT":"\ud83d\udec2","PAWS":"\ud83d\udc3e","PEN":"\ud83d\udd8a","PEN2":"\ud83d\udd8b","PENSIVE":"\ud83d\ude14","PERFORMING":"\ud83c\udfad","PHONE":"\ud83d\udcde","PILL":"\ud83d\udc8a","PING":"\u2757","PLATE":"\ud83c\udf7d","PLUG":"\ud83d\udd0c","PLUS":"\u2795","POLICE":"\ud83d\ude93","POLICELIGHT":"\ud83d\udea8","POSTOFFICE":"\ud83c\udfe4","POUND":"\ud83d\udcb7","POUTING":"\ud83d\ude21","POUTINGCAT":"\ud83d\ude3e","PRESENT":"\ud83c\udf81","PRINTER":"\ud83d\udda8","PROJECTOR":"\ud83d\udcfd","PUSHPIN":"\ud83d\udccc","QUESTION":"\u2753","RABBIT":"\ud83d\udc30","RADIOACTIVE":"\u2622","RADIOBUTTON":"\ud83d\udd18","RAINCLOUD":"\ud83c\udf27","RAT":"\ud83d\udc00","RECYCLE":"\u267b","REGISTERED":"\u00ae","RELIEVED":"\ud83d\ude0c","ROBOT":"\ud83e\udd16","ROCKET":"\ud83d\ude80","ROLLING":"\ud83d\ude44","ROOSTER":"\ud83d\udc13","RULER":"\ud83d\udccf","SATELLITE":"\ud83d\udef0","SAVE":"\ud83d\udcbe","SCHOOL":"\ud83c\udfeb","SCISSORS":"\u2702","SCREAMING":"\ud83d\ude31","SCROLL":"\ud83d\udcdc","SEAT":"\ud83d\udcba","SEEDLING":"\ud83c\udf31","SEENOEVIL":"\ud83d\ude48","SHIELD":"\ud83d\udee1","SHIP":"\ud83d\udea2","SHOCKED":"\ud83d\ude32","SHOWER":"\ud83d\udebf","SLEEPING":"\ud83d\ude34","SLEEPY":"\ud83d\ude2a","SLIDER":"\ud83c\udf9a","SLOT":"\ud83c\udfb0","SMILE":"\ud83d\ude42","SMILING":"\ud83d\ude03","SMILINGCLOSEDEYES":"\ud83d\ude06","SMILINGEYES":"\ud83d\ude04","SMILINGSWEAT":"\ud83d\ude05","SMIRK":"\ud83d\ude0f","SNAIL":"\ud83d\udc0c","SNAKE":"\ud83d\udc0d","SOCCER":"\u26bd","SOS":"\ud83c\udd98","SPEAKER":"\ud83d\udd08","SPEAKEROFF":"\ud83d\udd07","SPEAKNOEVIL":"\ud83d\ude4a","SPIDER":"\ud83d\udd77","SPIDERWEB":"\ud83d\udd78","STAR":"\u2b50","STOP":"\u26d4","STOPWATCH":"\u23f1","SULK":"\ud83d\ude26","SUNFLOWER":"\ud83c\udf3b","SUNGLASSES":"\ud83d\udd76","SYRINGE":"\ud83d\udc89","TAKEOFF":"\ud83d\udeeb","TAXI":"\ud83d\ude95","TELESCOPE":"\ud83d\udd2d","TEMPORATURE":"\ud83e\udd12","TENNIS":"\ud83c\udfbe","THERMOMETER":"\ud83c\udf21","THINKING":"\ud83e\udd14","THUNDERCLOUD":"\u26c8","TICKBOX":"\u2705","TICKET":"\ud83c\udf9f","TIRED":"\ud83d\ude2b","TOILET":"\ud83d\udebd","TOMATO":"\ud83c\udf45","TONGUE":"\ud83d\ude1b","TOOLS":"\ud83d\udee0","TORCH":"\ud83d\udd26","TORNADO":"\ud83c\udf2a","TOUNG2":"\ud83d\ude1d","TRADEMARK":"\u2122","TRAFFICLIGHT":"\ud83d\udea6","TRASH":"\ud83d\uddd1","TREE":"\ud83c\udf32","TRIANGLE_LEFT":"\u25c0","TRIANGLE_RIGHT":"\u25b6","TRIANGLEDOWN":"\ud83d\udd3b","TRIANGLEUP":"\ud83d\udd3a","TRIANGULARFLAG":"\ud83d\udea9","TROPHY":"\ud83c\udfc6","TRUCK":"\ud83d\ude9a","TRUMPET":"\ud83c\udfba","TURKEY":"\ud83e\udd83","TURTLE":"\ud83d\udc22","UMBRELLA":"\u26f1","UNAMUSED":"\ud83d\ude12","UPARROW":"\u2b06","UPSIDEDOWN":"\ud83d\ude43","WARNING":"\u26a0","WATCH":"\u231a","WAVING":"\ud83d\udc4b","WEARY":"\ud83d\ude29","WEARYCAT":"\ud83d\ude40","WHITEFLAG":"\ud83c\udff3","WINEGLASS":"\ud83c\udf77","WINK":"\ud83d\ude09","WORRIED":"\ud83d\ude1f","WRENCH":"\ud83d\udd27","X":"\u274c","YEN":"\ud83d\udcb4","ZIPPERFACE":"\ud83e\udd10","UNDEFINED":"","":""};statuses={F:'FATAL',B:'BUG',C:'CRITICAL',E:'ERROR',W:'WARNING',I:'INFO',IM:'IMPORTANT',D:'DEBUG',L:'LOG',CO:'CONSTANT',FU:'FUNCTION',R:'RETURN',V:'VARIABLE',S:'STACK',RE:'RESULT',ST:'STOPPER',TI:'TIMER',T:'TRACE'};LOG_LEVEL={NONE:7,OFF:7,FATAL:6,ERROR:5,WARN:4,INFO:3,UNDEFINED:2,'':2,DEFAULT:2,DEBUG:2,TRACE:1,ON:0,ALL:0,};LOG_STATUS={OFF:LOG_LEVEL.OFF,NONE:LOG_LEVEL.OFF,NO:LOG_LEVEL.OFF,NOPE:LOG_LEVEL.OFF,FALSE:LOG_LEVEL.OFF,FATAL:LOG_LEVEL.FATAL,BUG:LOG_LEVEL.ERROR,CRITICAL:LOG_LEVEL.ERROR,ERROR:LOG_LEVEL.ERROR,WARNING:LOG_LEVEL.WARN,INFO:LOG_LEVEL.INFO,IMPORTANT:LOG_LEVEL.INFO,DEBUG:LOG_LEVEL.DEBUG,LOG:LOG_LEVEL.DEBUG,STACK:LOG_LEVEL.DEBUG,CONSTANT:LOG_LEVEL.DEBUG,FUNCTION:LOG_LEVEL.DEBUG,VARIABLE:LOG_LEVEL.DEBUG,RETURN:LOG_LEVEL.DEBUG,RESULT:LOG_LEVEL.TRACE,STOPPER:LOG_LEVEL.TRACE,TIMER:LOG_LEVEL.TRACE,TRACE:LOG_LEVEL.TRACE,ALL:LOG_LEVEL.ALL,YES:LOG_LEVEL.ALL,YEP:LOG_LEVEL.ALL,TRUE:LOG_LEVEL.ALL};var logFile,logFolder;var LOG=function(message,status,icon){if(LOG.level!==LOG_LEVEL.OFF&&(LOG.write||LOG.store)&&LOG.arguments.length)return LOG.addMessage(message,status,icon);};LOG.logDecodeLevel=function(level){if(level==~~level)return Math.abs(level);var lev;level+='';level=level.toUpperCase();if(level in statuses){level=statuses[level];}lev=LOG_LEVEL[level];if(lev!==undefined)return lev;lev=LOG_STATUS[level];if(lev!==undefined)return lev;return LOG_LEVEL.DEFAULT;};LOG.write=write;LOG.store=store;LOG.level=LOG.logDecodeLevel(level);LOG.status=defaultStatus;LOG.addMessage=function(message,status,icon){var date=new Date(),count,bool,logStatus;if(status&&status.constructor.name==='String'){status=status.toUpperCase();status=statuses[status]||status;}else status=LOG.status;logStatus=LOG_STATUS[status]||LOG_STATUS.ALL;if(logStatus999)?'['+LOG.count+'] ':(' ['+LOG.count+'] ').slice(-7);message=count+status+icon+(message instanceof Object?message.toSource():message)+date;if(LOG.store){stack.push(message);}if(LOG.write){bool=file&&file.writable&&logFile.writeln(message);if(!bool){file.writable=true;LOG.setFile(logFile);logFile.writeln(message);}}LOG.count++;return true;};var logNewFile=function(file,isCookie,overwrite){file.encoding='UTF-8';file.lineFeed=($.os[0]=='M')?'Macintosh':' Windows';if(isCookie)return file.open(overwrite?'w':'e')&&file;file.writable=LOG.write;logFile=file;logFolder=file.parent;if(continuing){LOG.count=LOG.setCount(file);}return(!LOG.write&&file||(file.open('a')&&file));};LOG.setFile=function(file,isCookie,overwrite){var bool,folder,fileName,suffix,newFileName,f,d,safeFileName;d=new Date();f=$.stack.split("\n")[0].replace(/^\[\(?/,'').replace(/\)?\]$/,'');if(f==~~f){f=$.fileName.replace(/[^\/]+\//g,'');}safeFileName=File.encode((isCookie?'/COOKIE_':'/LOG_')+f.replace(/^\//,'')+'_'+(1900+d.getYear())+(''+d).replace(/...(...)(..).+/,'_$1_$2')+(isCookie?'.txt':'.log'));if(file&&file.constructor.name=='String'){file=(file.match('/'))?new File(file):new File((logFolder||Folder.temp)+'/'+file);}if(file instanceof File){folder=file.parent;bool=folder.exists||folder.create();if(!bool)folder=Folder.temp;fileName=File.decode(file.name);suffix=fileName.match(/\.[^.]+$/);suffix=suffix?suffix[0]:'';fileName='/'+fileName;newFileName=fileName.replace(/\.[^.]+$/,'')+'_'+(+(new Date())+suffix);f=logNewFile(file,isCookie,overwrite);if(f)return f;f=logNewFile(new File(folder+newFileName),isCookie,overwrite);if(f)return f;f=logNewFile(new File(folder+safeFileName),isCookie,overwrite);if(f)return f;if(folder!=Folder.temp){f=logNewFile(new File(Folder.temp+fileName),isCookie,overwrite);if(f)return f;f=logNewFile(new File(Folder.temp+safeFileName),isCookie,overwrite);return f||new File(Folder.temp+safeFileName);}}return LOG.setFile(((logFile&&!isCookie)?new File(logFile):new File(Folder.temp+safeFileName)),isCookie,overwrite );};LOG.setCount=function(file){if(~~file===file){LOG.count=file;return LOG.count;}if(file===undefined){file=logFile;}if(file&&file.constructor===String){file=new File(file);}var logNumbers,contents;if(!file.length||!file.exists){LOG.count=1;return 1;}file.open('r');file.encoding='utf-8';file.seek(10000,2);contents='\n'+file.read();logNumbers=contents.match(/\n{0,3}\[\d+\] \[\w+\]+/g);if(logNumbers){logNumbers=+logNumbers[logNumbers.length-1].match(/\d+/)+1;file.close();LOG.count=logNumbers;return logNumbers;}if(file.length<10001){file.close();LOG.count=1;return 1;}file.seek(10000000,2);contents='\n'+file.read();logNumbers=contents.match(/\n{0,3}\[\d+\] \[\w+\]+/g);if(logNumbers){logNumbers=+logNumbers[logNumbers.length-1].match(/\d+/)+1;file.close();LOG.count=logNumbers;return logNumbers;}file.close();LOG.count=1;return 1;};LOG.setLevel=function(level){LOG.level=LOG.logDecodeLevel(level);return LOG.level;};LOG.setStatus=function(status){status=(''+status).toUpperCase();LOG.status=statuses[status]||status;return LOG.status;};LOG.cookie=function(file,level,overwrite,setLevel){var log,cookie;if(!file){file={file:file};}if(file&&(file.constructor===String||file.constructor===File)){file={file:file};}log=file;if(log.level===undefined){log.level=(level!==undefined)?level:'NONE';}if(log.overwrite===undefined){log.overwrite=(overwrite!==undefined)?overwrite:false;}if(log.setLevel===undefined){log.setLevel=(setLevel!==undefined)?setLevel:true;}setLevel=log.setLevel;overwrite=log.overwrite;level=log.level;file=log.file;file=LOG.setFile(file,true,overwrite);if(overwrite){file.write(level);}else{cookie=file.read();if(cookie.length){level=cookie;}else{file.write(level);}}file.close();if(setLevel){LOG.setLevel(level);}return{path:file,level:level};};LOG.args=function(args,funct,line){if(LOG.level>LOG_STATUS.FUNCTION)return;if(!(args&&(''+args.constructor).replace(/\s+/g,'')==='functionObject(){[nativecode]}'))return;if(!LOG.args.STRIP_COMMENTS){LOG.args.STRIP_COMMENTS=/((\/.*$)|(\/\*[\s\S]*?\*\/))/mg;}if(!LOG.args.ARGUMENT_NAMES){LOG.args.ARGUMENT_NAMES=/([^\s,]+)/g;}if(!LOG.args.OUTER_BRACKETS){LOG.args.OUTER_BRACKETS=/^\((.+)?\)$/;}if(!LOG.args.NEW_SOMETHING){LOG.args.NEW_SOMETHING=/^new \w+\((.+)?\)$/;}var functionString,argumentNames,stackInfo,report,functionName,arg,argsL,n,argName,argValue,argsTotal;if(funct===~~funct){line=funct;}if(!(funct instanceof Function)){funct=args.callee;}if(!(funct instanceof Function))return;functionName=funct.name;functionString=(''+funct).replace(LOG.args.STRIP_COMMENTS,'');argumentNames=functionString.slice(functionString.indexOf('(')+1,functionString.indexOf(')')).match(LOG.args.ARGUMENT_NAMES);argumentNames=argumentNames||[];report=[];report.push('--------------');report.push('Function Data:');report.push('--------------');report.push('Function Name:'+functionName);argsL=args.length;stackInfo=$.stack.split(/[\n\r]/);stackInfo.pop();stackInfo=stackInfo.join('\n ');report.push('Call stack:'+stackInfo);if(line){report.push('Function Line around:'+line);}report.push('Arguments Provided:'+argsL);report.push('Named Arguments:'+argumentNames.length);if(argumentNames.length){report.push('Arguments Names:'+argumentNames.join(','));}if(argsL){report.push('----------------');report.push('Argument Values:');report.push('----------------');}argsTotal=Math.max(argsL,argumentNames.length);for(n=0;n=argsL){argValue='NO VALUE PROVIDED';}else if(arg===undefined){argValue='undefined';}else if(arg===null){argValue='null';}else{argValue=arg.toSource().replace(LOG.args.OUTER_BRACKETS,'$1').replace(LOG.args.NEW_SOMETHING,'$1');}report.push((argName?argName:'arguments['+n+']')+':'+argValue);}report.push('');report=report.join('\n ');LOG(report,'f');return report;};LOG.stack=function(reverse){var st=$.stack.split('\n');st.pop();st.pop();if(reverse){st.reverse();}return LOG(st.join('\n '),'s');};LOG.values=function(values){var n,value,map=[];if(!(values instanceof Object||values instanceof Array)){return;}if(!LOG.values.OUTER_BRACKETS){LOG.values.OUTER_BRACKETS=/^\((.+)?\)$/;}if(!LOG.values.NEW_SOMETHING){LOG.values.NEW_SOMETHING=/^new \w+\((.+)?\)$/;}for(n in values){try{value=values[n];if(value===undefined){value='undefined';}else if(value===null){value='null';}else{value=value.toSource().replace(LOG.values.OUTER_BRACKETS,'$1').replace(LOG.values.NEW_SOMETHING,'$1');}}catch(e){value='\uD83D\uDEAB '+e;}map.push(n+':'+value);}if(map.length){map=map.join('\n ')+'\n ';return LOG(map,'v');}};LOG.reset=function(all){stack.length=0;LOG.count=1;if(all!==false){if(logFile instanceof File){logFile.close();}logFile=LOG.store=LOG.writeToFile=undefined;LOG.write=true;logFolder=Folder.temp;logTime=new Date();logPoint='After Log Reset';}};LOG.stopper=function(message){var newLogTime,t,m,newLogPoint;newLogTime=new Date();newLogPoint=(LOG.count!==undefined)?'LOG#'+LOG.count:'BEFORE LOG#1';LOG.time=t=newLogTime-logTime;if(message===false){return;}message=message||'Stopper start point';t=LOG.prettyTime(t);m=message+'\n '+'From '+logPoint+' to '+newLogPoint+' took '+t+' Starting '+logTime+' '+logTime.getMilliseconds()+'ms'+' Ending '+newLogTime+' '+newLogTime.getMilliseconds()+'ms';LOG(m,'st');logPoint=newLogPoint;logTime=newLogTime;return m;};LOG.start=function(message){var t=new Date();times.push([t,(message!==undefined)?message+'':'']);};LOG.stop=function(message){if(!times.length)return;message=(message)?message+' ':'';var nt,startLog,ot,om,td,m;nt=new Date();startLog=times.pop();ot=startLog[0];om=startLog[1];td=nt-ot;if(om.length){om+=' ';}m=om+'STARTED ['+ot+' '+ot.getMilliseconds()+'ms]\n '+message+'FINISHED ['+nt+' '+nt.getMilliseconds()+'ms]\n TOTAL TIME ['+LOG.prettyTime(td)+']';LOG(m,'ti');return m;};LOG.prettyTime=function(t){var h,m,s,ms;h=Math.floor(t / 3600000);m=Math.floor((t % 3600000)/ 60000);s=Math.floor((t % 60000)/ 1000);ms=t % 1000;t=(!t)?'<1ms':((h)?h+' hours ':'')+((m)?m+' minutes ':'')+((s)?s+' seconds ':'')+((ms&&(h||m||s))?'&':'')+((ms)?ms+'ms':'');return t;};LOG.get=function(){if(!stack.length)return 'THE LOG IS NOT SET TO STORE';var a=fetchLogLines(arguments);return a?'\n'+a.join('\n'):'NO LOGS AVAILABLE';};var fetchLogLines=function(){var args=arguments[0];if(!args.length)return stack;var c,n,l,a=[],ln,start,end,j,sl;l=args.length;sl=stack.length-1;n=0;for(c=0;cln)?sl+ln+1:ln-1;if(ln>=0&&ln<=sl)a[n++]=stack[ln];}else if(ln instanceof Array&&ln.length===2){start=ln[0];end=ln[1];if(!(~~start===start&&~~end===end))continue;start=(0>start)?sl+start+1:start-1;end=(0>end)?sl+end+1:end-1;start=Math.max(Math.min(sl,start),0);end=Math.min(Math.max(end,0),sl);if(start<=end)for(j=start;j<=end;j++)a[n++]=stack[j];else for(j=start;j>=end;j--)a[n++]=stack[j];}}return(n)?a:false;};LOG.file=function(){return logFile;};LOG.openFolder=function(){if(logFolder)return logFolder.execute();};LOG.show=LOG.execute=function(){if(logFile)return logFile.execute();};LOG.close=function(){if(logFile)return logFile.close();};LOG.setFile(file);if(!$.summary.difference){$.summary.difference=function(){return $.summary().replace(/ *([0-9]+)([^ ]+)(\n?)/g,$.summary.updateSnapshot );};}if(!$.summary.updateSnapshot){$.summary.updateSnapshot=function(full,count,name,lf){var snapshot=$.summary.snapshot;count=Number(count);var prev=snapshot[name]?snapshot[name]:0;snapshot[name]=count;var diff=count-prev;if(diff===0)return "";return " ".substring(String(diff).length)+diff+" "+name+lf;};}if(!$.summary.snapshot){$.summary.snapshot=[];$.summary.difference();}$.gc();$.gc();$.summary.difference();LOG.sumDiff=function(message){$.gc();$.gc();var diff=$.summary.difference();if(diff.length<8){diff=' - NONE -';}if(message===undefined){message='';}message+=diff;return LOG('$.summary.difference():'+message,'v');};return LOG;};
-
-var log = new LogFactory('myLog.log'); // =>; creates the new log factory - put full path where
-
-function getEnv(variable){
- return $.getenv(variable);
-}
-
-function fileOpen(path){
- return app.open(new File(path));
-}
-
-function getLayerTypeWithName(layerName) {
- var type = 'NA';
- var nameParts = layerName.split('_');
- var namePrefix = nameParts[0];
- namePrefix = namePrefix.toLowerCase();
- switch (namePrefix) {
- case 'guide':
- case 'tl':
- case 'tr':
- case 'bl':
- case 'br':
- type = 'GUIDE';
- break;
- case 'fg':
- type = 'FG';
- break;
- case 'bg':
- type = 'BG';
- break;
- case 'obj':
- default:
- type = 'OBJ';
- break;
- }
-
- return type;
-}
-
-function getLayers() {
- /**
- * Get json representation of list of layers.
- * Much faster this way than in DOM traversal (2s vs 45s on same file)
- *
- * Format of single layer info:
- * id : number
- * name: string
- * group: boolean - true if layer is a group
- * parents:array - list of ids of parent groups, useful for selection
- * all children layers from parent layerSet (eg. group)
- * type: string - type of layer guessed from its name
- * visible:boolean - true if visible
- **/
- if (documents.length == 0){
- return '[]';
- }
- var ref1 = new ActionReference();
- ref1.putEnumerated(charIDToTypeID('Dcmn'), charIDToTypeID('Ordn'),
- charIDToTypeID('Trgt'));
- var count = executeActionGet(ref1).getInteger(charIDToTypeID('NmbL'));
-
- // get all layer names
- var layers = [];
- var layer = {};
-
- var parents = [];
- for (var i = count; i >= 1; i--) {
- var layer = {};
- var ref2 = new ActionReference();
- ref2.putIndex(charIDToTypeID('Lyr '), i);
-
- var desc = executeActionGet(ref2); // Access layer index #i
- var layerSection = typeIDToStringID(desc.getEnumerationValue(
- stringIDToTypeID('layerSection')));
-
- layer.id = desc.getInteger(stringIDToTypeID("layerID"));
- layer.name = desc.getString(stringIDToTypeID("name"));
- layer.color_code = typeIDToStringID(desc.getEnumerationValue(stringIDToTypeID('color')));
- layer.group = false;
- layer.parents = parents.slice();
- layer.type = getLayerTypeWithName(layer.name);
- layer.visible = desc.getBoolean(stringIDToTypeID("visible"));
- //log(" name: " + layer.name + " groupId " + layer.groupId +
- //" group " + layer.group);
- if (layerSection == 'layerSectionStart') { // Group start and end
- parents.push(layer.id);
- layer.group = true;
- }
- if (layerSection == 'layerSectionEnd') {
- parents.pop();
- continue;
- }
- layers.push(JSON.stringify(layer));
- }
- try{
- var bck = activeDocument.backgroundLayer;
- layer.id = bck.id;
- layer.name = bck.name;
- layer.group = false;
- layer.parents = [];
- layer.type = 'background';
- layer.visible = bck.visible;
- layers.push(JSON.stringify(layer));
- }catch(e){
- // do nothing, no background layer
- };
- //log("layers " + layers);
- return '[' + layers + ']';
-}
-
-function setVisible(layer_id, visibility){
- /**
- * Sets particular 'layer_id' to 'visibility' if true > show
- **/
- var desc = new ActionDescriptor();
- var ref = new ActionReference();
- ref.putIdentifier(stringIDToTypeID("layer"), layer_id);
- desc.putReference(stringIDToTypeID("null"), ref);
-
- executeAction(visibility?stringIDToTypeID("show"):stringIDToTypeID("hide"),
- desc, DialogModes.NO);
-
-}
-
-function getHeadline(){
- /**
- * Returns headline of current document with metadata
- *
- **/
- if (documents.length == 0){
- return '';
- }
- var headline = app.activeDocument.info.headline;
-
- return headline;
-}
-
-function isSaved(){
- return app.activeDocument.saved;
-}
-
-function save(){
- /** Saves active document **/
- return app.activeDocument.save();
-}
-
-function saveAs(output_path, ext, as_copy){
- /** Exports scene to various formats
- *
- * Currently implemented: 'jpg', 'png', 'psd'
- *
- * output_path - escaped file path on local system
- * ext - extension for export
- * as_copy - create copy, do not overwrite
- *
- * */
- var saveName = output_path;
- var saveOptions;
- if (ext == 'jpg'){
- saveOptions = new JPEGSaveOptions();
- saveOptions.quality = 12;
- saveOptions.embedColorProfile = true;
- saveOptions.formatOptions = FormatOptions.PROGRESSIVE;
- if(saveOptions.formatOptions == FormatOptions.PROGRESSIVE){
- saveOptions.scans = 5};
- saveOptions.matte = MatteType.NONE;
- }
- if (ext == 'png'){
- saveOptions = new PNGSaveOptions();
- saveOptions.interlaced = true;
- saveOptions.transparency = true;
- }
- if (ext == 'psd'){
- saveOptions = null;
- return app.activeDocument.saveAs(new File(saveName));
- }
- if (ext == 'psb'){
- return savePSB(output_path);
- }
-
- return app.activeDocument.saveAs(new File(saveName), saveOptions, as_copy);
-
-}
-
-function getActiveDocumentName(){
- /**
- * Returns file name of active document
- * */
- if (documents.length == 0){
- return null;
- }
- return app.activeDocument.name;
-}
-
-function getActiveDocumentFullName(){
- /**
- * Returns file name of active document with file path.
- * activeDocument.fullName returns path in URI (eg /c/.. instead of c:/)
- * */
- if (documents.length == 0){
- return null;
- }
- var f = new File(app.activeDocument.fullName);
- var path = f.fsName;
- f.close();
- return path;
-}
-
-function imprint(payload){
- /**
- * Sets headline content of current document with metadata. Stores
- * information about assets created through AYON.
- * Content accessible in PS through File > File Info
- *
- **/
- app.activeDocument.info.headline = payload;
-}
-
-function getSelectedLayers(doc) {
- /**
- * Returns json representation of currently selected layers.
- * Works in three steps - 1) creates new group with selected layers
- * 2) traverses this group
- * 3) deletes newly created group, not needed
- * Bit weird, but Adobe..
- **/
- if (doc == null){
- doc = app.activeDocument;
- }
-
- var selLayers = [];
- _grp = groupSelectedLayers(doc);
-
- var group = doc.activeLayer;
- var layers = group.layers;
-
- // // group is fake at this point
- // var itself_name = '';
- // if (layers){
- // itself_name = layers[0].name;
- // }
-
-
- for (var i = 0; i < layers.length; i++) {
- var layer = {};
- layer.id = layers[i].id;
- layer.name = layers[i].name;
- long_names =_get_parents_names(group.parent, layers[i].name);
- var t = layers[i].kind;
- if ((typeof t !== 'undefined') &&
- (layers[i].kind.toString() == 'LayerKind.NORMAL')){
- layer.group = false;
- }else{
- layer.group = true;
- }
- layer.long_name = long_names;
-
- selLayers.push(layer);
- }
-
- _undo();
-
- return JSON.stringify(selLayers);
-};
-
-function selectLayers(selectedLayers){
- /**
- * Selects layers from list of ids
- **/
- selectedLayers = JSON.parse(selectedLayers);
- var layers = new Array();
- var id54 = charIDToTypeID( "slct" );
- var desc12 = new ActionDescriptor();
- var id55 = charIDToTypeID( "null" );
- var ref9 = new ActionReference();
-
- var existing_layers = JSON.parse(getLayers());
- var existing_ids = [];
- for (var y = 0; y < existing_layers.length; y++){
- existing_ids.push(existing_layers[y]["id"]);
- }
- for (var i = 0; i < selectedLayers.length; i++) {
- // a check to see if the id still exists
- var id = selectedLayers[i];
- if(existing_ids.toString().indexOf(id)>=0){
- layers[i] = charIDToTypeID( "Lyr " );
- ref9.putIdentifier(layers[i], id);
- }
- }
- desc12.putReference( id55, ref9 );
- var id58 = charIDToTypeID( "MkVs" );
- desc12.putBoolean( id58, false );
- executeAction( id54, desc12, DialogModes.NO );
-}
-
-function groupSelectedLayers(doc, name) {
- /**
- * Groups selected layers into new group.
- * Returns json representation of Layer for server to consume
- *
- * Args:
- * doc(activeDocument)
- * name (str): new name of created group
- **/
- if (doc == null){
- doc = app.activeDocument;
- }
-
- var desc = new ActionDescriptor();
- var ref = new ActionReference();
- ref.putClass( stringIDToTypeID('layerSection') );
- desc.putReference( charIDToTypeID('null'), ref );
- var lref = new ActionReference();
- lref.putEnumerated( charIDToTypeID('Lyr '), charIDToTypeID('Ordn'),
- charIDToTypeID('Trgt') );
- desc.putReference( charIDToTypeID('From'), lref);
- executeAction( charIDToTypeID('Mk '), desc, DialogModes.NO );
-
- var group = doc.activeLayer;
- if (name){
- // Add special character to highlight group that will be published
- group.name = name;
- }
- var layer = {};
- layer.id = group.id;
- layer.name = name; // keep name clean
- layer.group = true;
-
- layer.long_name = _get_parents_names(group, name);
-
- return JSON.stringify(layer);
-};
-
-function importSmartObject(path, name, link){
- /**
- * Creates new layer with an image from 'path'
- *
- * path: absolute path to loaded file
- * name: sets name of newly created laye
- *
- **/
- var desc1 = new ActionDescriptor();
- desc1.putPath( app.charIDToTypeID("null"), new File(path) );
- link = link || false;
- if (link) {
- desc1.putBoolean( app.charIDToTypeID('Lnkd'), true );
- }
-
- desc1.putEnumerated(app.charIDToTypeID("FTcs"), app.charIDToTypeID("QCSt"),
- app.charIDToTypeID("Qcsa"));
- var desc2 = new ActionDescriptor();
- desc2.putUnitDouble(app.charIDToTypeID("Hrzn"),
- app.charIDToTypeID("#Pxl"), 0.0);
- desc2.putUnitDouble(app.charIDToTypeID("Vrtc"),
- app.charIDToTypeID("#Pxl"), 0.0);
-
- desc1.putObject(charIDToTypeID("Ofst"), charIDToTypeID("Ofst"), desc2);
- executeAction(charIDToTypeID("Plc " ), desc1, DialogModes.NO);
-
- var docRef = app.activeDocument
- var currentActivelayer = app.activeDocument.activeLayer;
- if (name){
- currentActivelayer.name = name;
- }
- var layer = {}
- layer.id = currentActivelayer.id;
- layer.name = currentActivelayer.name;
- return JSON.stringify(layer);
-}
-
-function replaceSmartObjects(layer_id, path, name){
- /**
- * Updates content of 'layer' with an image from 'path'
- *
- **/
-
- var desc = new ActionDescriptor();
- var ref = new ActionReference();
- ref.putIdentifier(stringIDToTypeID("layer"), layer_id);
- desc.putReference(stringIDToTypeID("null"), ref);
-
- desc.putPath(charIDToTypeID('null'), new File(path) );
- desc.putInteger(charIDToTypeID("PgNm"), 1);
-
- executeAction(stringIDToTypeID('placedLayerReplaceContents'),
- desc, DialogModes.NO );
- var currentActivelayer = app.activeDocument.activeLayer;
- if (name){
- currentActivelayer.name = name;
- }
-}
-
-function createGroup(name){
- /**
- * Creates new group with a 'name'
- * Because of asynchronous nature, only group.id is available
- **/
- group = app.activeDocument.layerSets.add();
- // Add special character to highlight group that will be published
- group.name = name;
-
- return group.id; // only id available at this time :|
-}
-
-function deleteLayer(layer_id){
- /***
- * Deletes layer by its layer_id
- *
- * layer_id (int)
- **/
- var d = new ActionDescriptor();
- var r = new ActionReference();
-
- r.putIdentifier(stringIDToTypeID("layer"), layer_id);
- d.putReference(stringIDToTypeID("null"), r);
- executeAction(stringIDToTypeID("delete"), d, DialogModes.NO);
-}
-
-function _undo() {
- executeAction(charIDToTypeID("undo", undefined, DialogModes.NO));
-};
-
-function savePSB(output_path){
- /***
- * Saves file as .psb to 'output_path'
- *
- * output_path (str)
- **/
- var desc1 = new ActionDescriptor();
- var desc2 = new ActionDescriptor();
- desc2.putBoolean( stringIDToTypeID('maximizeCompatibility'), true );
- desc1.putObject( charIDToTypeID('As '), charIDToTypeID('Pht8'), desc2 );
- desc1.putPath( charIDToTypeID('In '), new File(output_path) );
- desc1.putBoolean( charIDToTypeID('LwCs'), true );
- executeAction( charIDToTypeID('save'), desc1, DialogModes.NO );
-}
-
-function close(){
- executeAction(stringIDToTypeID("quit"), undefined, DialogModes.NO );
-}
-
-function renameLayer(layer_id, new_name){
- /***
- * Renames 'layer_id' to 'new_name'
- *
- * Via Action (fast)
- *
- * Args:
- * layer_id(int)
- * new_name(str)
- *
- * output_path (str)
- **/
- doc = app.activeDocument;
- selectLayers('['+layer_id+']');
-
- doc.activeLayer.name = new_name;
-}
-
-function _get_parents_names(layer, itself_name){
- var long_names = [itself_name];
- while (layer.parent){
- if (layer.typename != "LayerSet"){
- break;
- }
- long_names.push(layer.name);
- layer = layer.parent;
- }
- return long_names;
-}
-
-// triggers when panel is opened, good for debugging
-//log(getActiveDocumentName());
-// log.show();
-// var a = app.activeDocument.activeLayer;
-// log(a);
-//getSelectedLayers();
-// importSmartObject("c:/projects/test.jpg", "a aaNewLayer", true);
-// log("dpc");
-// replaceSmartObjects(153, "â–¼Jungle_imageTest_001", "c:/projects/test_project_test_asset_TestTask_v001.png");
\ No newline at end of file
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/host/json.js b/server_addon/photoshop/client/ayon_photoshop/api/extension/host/json.js
deleted file mode 100644
index 397349bbfd..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/extension/host/json.js
+++ /dev/null
@@ -1,530 +0,0 @@
-// json2.js
-// 2017-06-12
-// Public Domain.
-// NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
-
-// USE YOUR OWN COPY. IT IS EXTREMELY UNWISE TO LOAD CODE FROM SERVERS YOU DO
-// NOT CONTROL.
-
-// This file creates a global JSON object containing two methods: stringify
-// and parse. This file provides the ES5 JSON capability to ES3 systems.
-// If a project might run on IE8 or earlier, then this file should be included.
-// This file does nothing on ES5 systems.
-
-// JSON.stringify(value, replacer, space)
-// value any JavaScript value, usually an object or array.
-// replacer an optional parameter that determines how object
-// values are stringified for objects. It can be a
-// function or an array of strings.
-// space an optional parameter that specifies the indentation
-// of nested structures. If it is omitted, the text will
-// be packed without extra whitespace. If it is a number,
-// it will specify the number of spaces to indent at each
-// level. If it is a string (such as "\t" or " "),
-// it contains the characters used to indent at each level.
-// This method produces a JSON text from a JavaScript value.
-// When an object value is found, if the object contains a toJSON
-// method, its toJSON method will be called and the result will be
-// stringified. A toJSON method does not serialize: it returns the
-// value represented by the name/value pair that should be serialized,
-// or undefined if nothing should be serialized. The toJSON method
-// will be passed the key associated with the value, and this will be
-// bound to the value.
-
-// For example, this would serialize Dates as ISO strings.
-
-// Date.prototype.toJSON = function (key) {
-// function f(n) {
-// // Format integers to have at least two digits.
-// return (n < 10)
-// ? "0" + n
-// : n;
-// }
-// return this.getUTCFullYear() + "-" +
-// f(this.getUTCMonth() + 1) + "-" +
-// f(this.getUTCDate()) + "T" +
-// f(this.getUTCHours()) + ":" +
-// f(this.getUTCMinutes()) + ":" +
-// f(this.getUTCSeconds()) + "Z";
-// };
-
-// You can provide an optional replacer method. It will be passed the
-// key and value of each member, with this bound to the containing
-// object. The value that is returned from your method will be
-// serialized. If your method returns undefined, then the member will
-// be excluded from the serialization.
-
-// If the replacer parameter is an array of strings, then it will be
-// used to select the members to be serialized. It filters the results
-// such that only members with keys listed in the replacer array are
-// stringified.
-
-// Values that do not have JSON representations, such as undefined or
-// functions, will not be serialized. Such values in objects will be
-// dropped; in arrays they will be replaced with null. You can use
-// a replacer function to replace those with JSON values.
-
-// JSON.stringify(undefined) returns undefined.
-
-// The optional space parameter produces a stringification of the
-// value that is filled with line breaks and indentation to make it
-// easier to read.
-
-// If the space parameter is a non-empty string, then that string will
-// be used for indentation. If the space parameter is a number, then
-// the indentation will be that many spaces.
-
-// Example:
-
-// text = JSON.stringify(["e", {pluribus: "unum"}]);
-// // text is '["e",{"pluribus":"unum"}]'
-
-// text = JSON.stringify(["e", {pluribus: "unum"}], null, "\t");
-// // text is '[\n\t"e",\n\t{\n\t\t"pluribus": "unum"\n\t}\n]'
-
-// text = JSON.stringify([new Date()], function (key, value) {
-// return this[key] instanceof Date
-// ? "Date(" + this[key] + ")"
-// : value;
-// });
-// // text is '["Date(---current time---)"]'
-
-// JSON.parse(text, reviver)
-// This method parses a JSON text to produce an object or array.
-// It can throw a SyntaxError exception.
-
-// The optional reviver parameter is a function that can filter and
-// transform the results. It receives each of the keys and values,
-// and its return value is used instead of the original value.
-// If it returns what it received, then the structure is not modified.
-// If it returns undefined then the member is deleted.
-
-// Example:
-
-// // Parse the text. Values that look like ISO date strings will
-// // be converted to Date objects.
-
-// myData = JSON.parse(text, function (key, value) {
-// var a;
-// if (typeof value === "string") {
-// a =
-// /^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:\.\d*)?)Z$/.exec(value);
-// if (a) {
-// return new Date(Date.UTC(
-// +a[1], +a[2] - 1, +a[3], +a[4], +a[5], +a[6]
-// ));
-// }
-// return value;
-// }
-// });
-
-// myData = JSON.parse(
-// "[\"Date(09/09/2001)\"]",
-// function (key, value) {
-// var d;
-// if (
-// typeof value === "string"
-// && value.slice(0, 5) === "Date("
-// && value.slice(-1) === ")"
-// ) {
-// d = new Date(value.slice(5, -1));
-// if (d) {
-// return d;
-// }
-// }
-// return value;
-// }
-// );
-
-// This is a reference implementation. You are free to copy, modify, or
-// redistribute.
-
-/*jslint
- eval, for, this
-*/
-
-/*property
- JSON, apply, call, charCodeAt, getUTCDate, getUTCFullYear, getUTCHours,
- getUTCMinutes, getUTCMonth, getUTCSeconds, hasOwnProperty, join,
- lastIndex, length, parse, prototype, push, replace, slice, stringify,
- test, toJSON, toString, valueOf
-*/
-
-
-// Create a JSON object only if one does not already exist. We create the
-// methods in a closure to avoid creating global variables.
-
-if (typeof JSON !== "object") {
- JSON = {};
-}
-
-(function () {
- "use strict";
-
- var rx_one = /^[\],:{}\s]*$/;
- var rx_two = /\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g;
- var rx_three = /"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g;
- var rx_four = /(?:^|:|,)(?:\s*\[)+/g;
- var rx_escapable = /[\\"\u0000-\u001f\u007f-\u009f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g;
- var rx_dangerous = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g;
-
- function f(n) {
- // Format integers to have at least two digits.
- return (n < 10)
- ? "0" + n
- : n;
- }
-
- function this_value() {
- return this.valueOf();
- }
-
- if (typeof Date.prototype.toJSON !== "function") {
-
- Date.prototype.toJSON = function () {
-
- return isFinite(this.valueOf())
- ? (
- this.getUTCFullYear()
- + "-"
- + f(this.getUTCMonth() + 1)
- + "-"
- + f(this.getUTCDate())
- + "T"
- + f(this.getUTCHours())
- + ":"
- + f(this.getUTCMinutes())
- + ":"
- + f(this.getUTCSeconds())
- + "Z"
- )
- : null;
- };
-
- Boolean.prototype.toJSON = this_value;
- Number.prototype.toJSON = this_value;
- String.prototype.toJSON = this_value;
- }
-
- var gap;
- var indent;
- var meta;
- var rep;
-
-
- function quote(string) {
-
-// If the string contains no control characters, no quote characters, and no
-// backslash characters, then we can safely slap some quotes around it.
-// Otherwise we must also replace the offending characters with safe escape
-// sequences.
-
- rx_escapable.lastIndex = 0;
- return rx_escapable.test(string)
- ? "\"" + string.replace(rx_escapable, function (a) {
- var c = meta[a];
- return typeof c === "string"
- ? c
- : "\\u" + ("0000" + a.charCodeAt(0).toString(16)).slice(-4);
- }) + "\""
- : "\"" + string + "\"";
- }
-
-
- function str(key, holder) {
-
-// Produce a string from holder[key].
-
- var i; // The loop counter.
- var k; // The member key.
- var v; // The member value.
- var length;
- var mind = gap;
- var partial;
- var value = holder[key];
-
-// If the value has a toJSON method, call it to obtain a replacement value.
-
- if (
- value
- && typeof value === "object"
- && typeof value.toJSON === "function"
- ) {
- value = value.toJSON(key);
- }
-
-// If we were called with a replacer function, then call the replacer to
-// obtain a replacement value.
-
- if (typeof rep === "function") {
- value = rep.call(holder, key, value);
- }
-
-// What happens next depends on the value's type.
-
- switch (typeof value) {
- case "string":
- return quote(value);
-
- case "number":
-
-// JSON numbers must be finite. Encode non-finite numbers as null.
-
- return (isFinite(value))
- ? String(value)
- : "null";
-
- case "boolean":
- case "null":
-
-// If the value is a boolean or null, convert it to a string. Note:
-// typeof null does not produce "null". The case is included here in
-// the remote chance that this gets fixed someday.
-
- return String(value);
-
-// If the type is "object", we might be dealing with an object or an array or
-// null.
-
- case "object":
-
-// Due to a specification blunder in ECMAScript, typeof null is "object",
-// so watch out for that case.
-
- if (!value) {
- return "null";
- }
-
-// Make an array to hold the partial results of stringifying this object value.
-
- gap += indent;
- partial = [];
-
-// Is the value an array?
-
- if (Object.prototype.toString.apply(value) === "[object Array]") {
-
-// The value is an array. Stringify every element. Use null as a placeholder
-// for non-JSON values.
-
- length = value.length;
- for (i = 0; i < length; i += 1) {
- partial[i] = str(i, value) || "null";
- }
-
-// Join all of the elements together, separated with commas, and wrap them in
-// brackets.
-
- v = partial.length === 0
- ? "[]"
- : gap
- ? (
- "[\n"
- + gap
- + partial.join(",\n" + gap)
- + "\n"
- + mind
- + "]"
- )
- : "[" + partial.join(",") + "]";
- gap = mind;
- return v;
- }
-
-// If the replacer is an array, use it to select the members to be stringified.
-
- if (rep && typeof rep === "object") {
- length = rep.length;
- for (i = 0; i < length; i += 1) {
- if (typeof rep[i] === "string") {
- k = rep[i];
- v = str(k, value);
- if (v) {
- partial.push(quote(k) + (
- (gap)
- ? ": "
- : ":"
- ) + v);
- }
- }
- }
- } else {
-
-// Otherwise, iterate through all of the keys in the object.
-
- for (k in value) {
- if (Object.prototype.hasOwnProperty.call(value, k)) {
- v = str(k, value);
- if (v) {
- partial.push(quote(k) + (
- (gap)
- ? ": "
- : ":"
- ) + v);
- }
- }
- }
- }
-
-// Join all of the member texts together, separated with commas,
-// and wrap them in braces.
-
- v = partial.length === 0
- ? "{}"
- : gap
- ? "{\n" + gap + partial.join(",\n" + gap) + "\n" + mind + "}"
- : "{" + partial.join(",") + "}";
- gap = mind;
- return v;
- }
- }
-
-// If the JSON object does not yet have a stringify method, give it one.
-
- if (typeof JSON.stringify !== "function") {
- meta = { // table of character substitutions
- "\b": "\\b",
- "\t": "\\t",
- "\n": "\\n",
- "\f": "\\f",
- "\r": "\\r",
- "\"": "\\\"",
- "\\": "\\\\"
- };
- JSON.stringify = function (value, replacer, space) {
-
-// The stringify method takes a value and an optional replacer, and an optional
-// space parameter, and returns a JSON text. The replacer can be a function
-// that can replace values, or an array of strings that will select the keys.
-// A default replacer method can be provided. Use of the space parameter can
-// produce text that is more easily readable.
-
- var i;
- gap = "";
- indent = "";
-
-// If the space parameter is a number, make an indent string containing that
-// many spaces.
-
- if (typeof space === "number") {
- for (i = 0; i < space; i += 1) {
- indent += " ";
- }
-
-// If the space parameter is a string, it will be used as the indent string.
-
- } else if (typeof space === "string") {
- indent = space;
- }
-
-// If there is a replacer, it must be a function or an array.
-// Otherwise, throw an error.
-
- rep = replacer;
- if (replacer && typeof replacer !== "function" && (
- typeof replacer !== "object"
- || typeof replacer.length !== "number"
- )) {
- throw new Error("JSON.stringify");
- }
-
-// Make a fake root object containing our value under the key of "".
-// Return the result of stringifying the value.
-
- return str("", {"": value});
- };
- }
-
-
-// If the JSON object does not yet have a parse method, give it one.
-
- if (typeof JSON.parse !== "function") {
- JSON.parse = function (text, reviver) {
-
-// The parse method takes a text and an optional reviver function, and returns
-// a JavaScript value if the text is a valid JSON text.
-
- var j;
-
- function walk(holder, key) {
-
-// The walk method is used to recursively walk the resulting structure so
-// that modifications can be made.
-
- var k;
- var v;
- var value = holder[key];
- if (value && typeof value === "object") {
- for (k in value) {
- if (Object.prototype.hasOwnProperty.call(value, k)) {
- v = walk(value, k);
- if (v !== undefined) {
- value[k] = v;
- } else {
- delete value[k];
- }
- }
- }
- }
- return reviver.call(holder, key, value);
- }
-
-
-// Parsing happens in four stages. In the first stage, we replace certain
-// Unicode characters with escape sequences. JavaScript handles many characters
-// incorrectly, either silently deleting them, or treating them as line endings.
-
- text = String(text);
- rx_dangerous.lastIndex = 0;
- if (rx_dangerous.test(text)) {
- text = text.replace(rx_dangerous, function (a) {
- return (
- "\\u"
- + ("0000" + a.charCodeAt(0).toString(16)).slice(-4)
- );
- });
- }
-
-// In the second stage, we run the text against regular expressions that look
-// for non-JSON patterns. We are especially concerned with "()" and "new"
-// because they can cause invocation, and "=" because it can cause mutation.
-// But just to be safe, we want to reject all unexpected forms.
-
-// We split the second stage into 4 regexp operations in order to work around
-// crippling inefficiencies in IE's and Safari's regexp engines. First we
-// replace the JSON backslash pairs with "@" (a non-JSON character). Second, we
-// replace all simple value tokens with "]" characters. Third, we delete all
-// open brackets that follow a colon or comma or that begin the text. Finally,
-// we look to see that the remaining characters are only whitespace or "]" or
-// "," or ":" or "{" or "}". If that is so, then the text is safe for eval.
-
- if (
- rx_one.test(
- text
- .replace(rx_two, "@")
- .replace(rx_three, "]")
- .replace(rx_four, "")
- )
- ) {
-
-// In the third stage we use the eval function to compile the text into a
-// JavaScript structure. The "{" operator is subject to a syntactic ambiguity
-// in JavaScript: it can begin a block or an object literal. We wrap the text
-// in parens to eliminate the ambiguity.
-
- j = eval("(" + text + ")");
-
-// In the optional fourth stage, we recursively walk the new structure, passing
-// each name/value pair to a reviver function for possible transformation.
-
- return (typeof reviver === "function")
- ? walk({"": j}, "")
- : j;
- }
-
-// If the text is not JSON parseable, then a SyntaxError is thrown.
-
- throw new SyntaxError("JSON.parse");
- };
- }
-}());
\ No newline at end of file
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/icons/ayon_logo.png b/server_addon/photoshop/client/ayon_photoshop/api/extension/icons/ayon_logo.png
deleted file mode 100644
index 3a96f8e2b4..0000000000
Binary files a/server_addon/photoshop/client/ayon_photoshop/api/extension/icons/ayon_logo.png and /dev/null differ
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/index.html b/server_addon/photoshop/client/ayon_photoshop/api/extension/index.html
deleted file mode 100644
index 9d7363e62d..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/extension/index.html
+++ /dev/null
@@ -1,95 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/launch_logic.py b/server_addon/photoshop/client/ayon_photoshop/api/launch_logic.py
deleted file mode 100644
index 04401a0972..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/launch_logic.py
+++ /dev/null
@@ -1,406 +0,0 @@
-import os
-import subprocess
-import collections
-import asyncio
-
-from wsrpc_aiohttp import (
- WebSocketRoute,
- WebSocketAsync
-)
-
-import ayon_api
-from qtpy import QtCore
-
-from ayon_core.lib import Logger
-from ayon_core.pipeline import (
- registered_host,
- Anatomy,
-)
-from ayon_core.pipeline.workfile import (
- get_workfile_template_key_from_context,
- get_last_workfile,
-)
-from ayon_core.pipeline.template_data import get_template_data_with_names
-from ayon_core.tools.utils import host_tools
-from ayon_core.pipeline.context_tools import change_current_context
-
-from .webserver import WebServerTool
-from .ws_stub import PhotoshopServerStub
-
-log = Logger.get_logger(__name__)
-
-
-class ConnectionNotEstablishedYet(Exception):
- pass
-
-
-class MainThreadItem:
- """Structure to store information about callback in main thread.
-
- Item should be used to execute callback in main thread which may be needed
- for execution of Qt objects.
-
- Item store callback (callable variable), arguments and keyword arguments
- for the callback. Item hold information about it's process.
- """
- not_set = object()
-
- def __init__(self, callback, *args, **kwargs):
- self._done = False
- self._exception = self.not_set
- self._result = self.not_set
- self._callback = callback
- self._args = args
- self._kwargs = kwargs
-
- @property
- def done(self):
- return self._done
-
- @property
- def exception(self):
- return self._exception
-
- @property
- def result(self):
- return self._result
-
- def execute(self):
- """Execute callback and store its result.
-
- Method must be called from main thread. Item is marked as `done`
- when callback execution finished. Store output of callback of exception
- information when callback raises one.
- """
- log.debug("Executing process in main thread")
- if self.done:
- log.warning("- item is already processed")
- return
-
- log.info("Running callback: {}".format(str(self._callback)))
- try:
- result = self._callback(*self._args, **self._kwargs)
- self._result = result
-
- except Exception as exc:
- self._exception = exc
-
- finally:
- self._done = True
-
-
-def stub():
- """
- Convenience function to get server RPC stub to call methods directed
- for host (Photoshop).
- It expects already created connection, started from client.
- Currently created when panel is opened (PS: Window>Extensions>Avalon)
- :return: where functions could be called from
- """
- ps_stub = PhotoshopServerStub()
- if not ps_stub.client:
- raise ConnectionNotEstablishedYet("Connection is not created yet")
-
- return ps_stub
-
-
-def show_tool_by_name(tool_name):
- kwargs = {}
- if tool_name == "loader":
- kwargs["use_context"] = True
-
- host_tools.show_tool_by_name(tool_name, **kwargs)
-
-
-class ProcessLauncher(QtCore.QObject):
- route_name = "Photoshop"
- _main_thread_callbacks = collections.deque()
-
- def __init__(self, subprocess_args):
- self._subprocess_args = subprocess_args
- self._log = None
-
- super(ProcessLauncher, self).__init__()
-
- # Keep track if launcher was already started
- self._started = False
-
- self._process = None
- self._websocket_server = None
-
- start_process_timer = QtCore.QTimer()
- start_process_timer.setInterval(100)
-
- loop_timer = QtCore.QTimer()
- loop_timer.setInterval(200)
-
- start_process_timer.timeout.connect(self._on_start_process_timer)
- loop_timer.timeout.connect(self._on_loop_timer)
-
- self._start_process_timer = start_process_timer
- self._loop_timer = loop_timer
-
- @property
- def log(self):
- if self._log is None:
- self._log = Logger.get_logger(
- "{}-launcher".format(self.route_name)
- )
- return self._log
-
- @property
- def websocket_server_is_running(self):
- if self._websocket_server is not None:
- return self._websocket_server.is_running
- return False
-
- @property
- def is_process_running(self):
- if self._process is not None:
- return self._process.poll() is None
- return False
-
- @property
- def is_host_connected(self):
- """Returns True if connected, False if app is not running at all."""
- if not self.is_process_running:
- return False
-
- try:
- _stub = stub()
- if _stub:
- return True
- except Exception:
- pass
-
- return None
-
- @classmethod
- def execute_in_main_thread(cls, callback, *args, **kwargs):
- item = MainThreadItem(callback, *args, **kwargs)
- cls._main_thread_callbacks.append(item)
- return item
-
- def start(self):
- if self._started:
- return
- self.log.info("Started launch logic of Photoshop")
- self._started = True
- self._start_process_timer.start()
-
- def exit(self):
- """ Exit whole application. """
- if self._start_process_timer.isActive():
- self._start_process_timer.stop()
- if self._loop_timer.isActive():
- self._loop_timer.stop()
-
- if self._websocket_server is not None:
- self._websocket_server.stop()
-
- if self._process:
- self._process.kill()
- self._process.wait()
-
- QtCore.QCoreApplication.exit()
-
- def _on_loop_timer(self):
- # TODO find better way and catch errors
- # Run only callbacks that are in queue at the moment
- cls = self.__class__
- for _ in range(len(cls._main_thread_callbacks)):
- if cls._main_thread_callbacks:
- item = cls._main_thread_callbacks.popleft()
- item.execute()
-
- if not self.is_process_running:
- self.log.info("Host process is not running. Closing")
- self.exit()
-
- elif not self.websocket_server_is_running:
- self.log.info("Websocket server is not running. Closing")
- self.exit()
-
- def _on_start_process_timer(self):
- # TODO add try except validations for each part in this method
- # Start server as first thing
- if self._websocket_server is None:
- self._init_server()
- return
-
- # TODO add waiting time
- # Wait for webserver
- if not self.websocket_server_is_running:
- return
-
- # Start application process
- if self._process is None:
- self._start_process()
- self.log.info("Waiting for host to connect")
- return
-
- # TODO add waiting time
- # Wait until host is connected
- if self.is_host_connected:
- self._start_process_timer.stop()
- self._loop_timer.start()
- elif (
- not self.is_process_running
- or not self.websocket_server_is_running
- ):
- self.exit()
-
- def _init_server(self):
- if self._websocket_server is not None:
- return
-
- self.log.debug(
- "Initialization of websocket server for host communication"
- )
-
- self._websocket_server = websocket_server = WebServerTool()
- if websocket_server.port_occupied(
- websocket_server.host_name,
- websocket_server.port
- ):
- self.log.info(
- "Server already running, sending actual context and exit."
- )
- asyncio.run(websocket_server.send_context_change(self.route_name))
- self.exit()
- return
-
- # Add Websocket route
- websocket_server.add_route("*", "/ws/", WebSocketAsync)
- # Add after effects route to websocket handler
-
- print("Adding {} route".format(self.route_name))
- WebSocketAsync.add_route(
- self.route_name, PhotoshopRoute
- )
- self.log.info("Starting websocket server for host communication")
- websocket_server.start_server()
-
- def _start_process(self):
- if self._process is not None:
- return
- self.log.info("Starting host process")
- try:
- self._process = subprocess.Popen(
- self._subprocess_args,
- stdout=subprocess.DEVNULL,
- stderr=subprocess.DEVNULL
- )
- except Exception:
- self.log.info("exce", exc_info=True)
- self.exit()
-
-
-class PhotoshopRoute(WebSocketRoute):
- """
- One route, mimicking external application (like Harmony, etc).
- All functions could be called from client.
- 'do_notify' function calls function on the client - mimicking
- notification after long running job on the server or similar
- """
- instance = None
-
- def init(self, **kwargs):
- # Python __init__ must be return "self".
- # This method might return anything.
- log.debug("someone called Photoshop route")
- self.instance = self
- return kwargs
-
- # server functions
- async def ping(self):
- log.debug("someone called Photoshop route ping")
-
- # This method calls function on the client side
- # client functions
- async def set_context(self, project, folder, task):
- """
- Sets 'project' and 'folder' to envs, eg. setting context.
-
- Opens last workile from that context if exists.
-
- Args:
- project (str)
- folder (str)
- task (str
- """
- log.info("Setting context change")
- log.info(f"project {project} folder {folder} task {task}")
-
- folder_entity = ayon_api.get_folder_by_path(project, folder)
- task_entity = ayon_api.get_task_by_name(
- project, folder_entity["id"], task
- )
- change_current_context(folder_entity, task_entity)
-
- last_workfile_path = self._get_last_workfile_path(project,
- folder,
- task)
- if last_workfile_path and os.path.exists(last_workfile_path):
- ProcessLauncher.execute_in_main_thread(
- lambda: stub().open(last_workfile_path))
-
-
- async def read(self):
- log.debug("photoshop.read client calls server server calls "
- "photoshop client")
- return await self.socket.call('photoshop.read')
-
- # panel routes for tools
- async def workfiles_route(self):
- self._tool_route("workfiles")
-
- async def loader_route(self):
- self._tool_route("loader")
-
- async def publish_route(self):
- self._tool_route("publisher")
-
- async def sceneinventory_route(self):
- self._tool_route("sceneinventory")
-
- async def experimental_tools_route(self):
- self._tool_route("experimental_tools")
-
- def _tool_route(self, _tool_name):
- """The address accessed when clicking on the buttons."""
-
- ProcessLauncher.execute_in_main_thread(show_tool_by_name, _tool_name)
-
- # Required return statement.
- return "nothing"
-
- def _get_last_workfile_path(self, project_name, folder_path, task_name):
- """Returns last workfile path if exists"""
- host = registered_host()
- host_name = "photoshop"
- template_key = get_workfile_template_key_from_context(
- project_name,
- folder_path,
- task_name,
- host_name,
- )
- anatomy = Anatomy(project_name)
-
- data = get_template_data_with_names(
- project_name, folder_path, task_name, host_name
- )
- data["root"] = anatomy.roots
-
- work_template = anatomy.get_template_item("work", template_key)
-
- # Define saving file extension
- extensions = host.get_workfile_extensions()
-
- work_root = work_template["directory"].format_strict(data)
- file_template = work_template["file"].template
- last_workfile_path = get_last_workfile(
- work_root, file_template, data, extensions, True
- )
-
- return last_workfile_path
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/launch_script.py b/server_addon/photoshop/client/ayon_photoshop/api/launch_script.py
deleted file mode 100644
index de7fc8ba48..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/launch_script.py
+++ /dev/null
@@ -1,93 +0,0 @@
-"""Script wraps launch mechanism of Photoshop implementations.
-
-Arguments passed to the script are passed to launch function in host
-implementation. In all cases requires host app executable and may contain
-workfile or others.
-"""
-
-import os
-import sys
-
-from ayon_photoshop.api.lib import main as host_main
-
-# Get current file to locate start point of sys.argv
-CURRENT_FILE = os.path.abspath(__file__)
-
-
-def show_error_messagebox(title, message, detail_message=None):
- """Function will show message and process ends after closing it."""
- from qtpy import QtWidgets, QtCore
- from ayon_core import style
-
- app = QtWidgets.QApplication([])
- app.setStyleSheet(style.load_stylesheet())
-
- msgbox = QtWidgets.QMessageBox()
- msgbox.setWindowTitle(title)
- msgbox.setText(message)
-
- if detail_message:
- msgbox.setDetailedText(detail_message)
-
- msgbox.setWindowModality(QtCore.Qt.ApplicationModal)
- msgbox.show()
-
- sys.exit(app.exec_())
-
-
-def on_invalid_args(script_not_found):
- """Show to user message box saying that something went wrong.
-
- Tell user that arguments to launch implementation are invalid with
- arguments details.
-
- Args:
- script_not_found (bool): Use different message based on this value.
- """
-
- title = "Invalid arguments"
- joined_args = ", ".join("\"{}\"".format(arg) for arg in sys.argv)
- if script_not_found:
- submsg = "Where couldn't find script path:\n\"{}\""
- else:
- submsg = "Expected Host executable after script path:\n\"{}\""
-
- message = "BUG: Got invalid arguments so can't launch Host application."
- detail_message = "Process was launched with arguments:\n{}\n\n{}".format(
- joined_args,
- submsg.format(CURRENT_FILE)
- )
-
- show_error_messagebox(title, message, detail_message)
-
-
-def main(argv):
- # Modify current file path to find match in sys.argv which may be different
- # on windows (different letter cases and slashes).
- modified_current_file = CURRENT_FILE.replace("\\", "/").lower()
-
- # Create a copy of sys argv
- sys_args = list(argv)
- after_script_idx = None
- # Find script path in sys.argv to know index of argv where host
- # executable should be.
- for idx, item in enumerate(sys_args):
- if item.replace("\\", "/").lower() == modified_current_file:
- after_script_idx = idx + 1
- break
-
- # Validate that there is at least one argument after script path
- launch_args = None
- if after_script_idx is not None:
- launch_args = sys_args[after_script_idx:]
-
- if launch_args:
- # Launch host implementation
- host_main(*launch_args)
- else:
- # Show message box
- on_invalid_args(after_script_idx is None)
-
-
-if __name__ == "__main__":
- main(sys.argv)
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/lib.py b/server_addon/photoshop/client/ayon_photoshop/api/lib.py
deleted file mode 100644
index fd003919ce..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/lib.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import os
-import sys
-import contextlib
-import traceback
-
-from ayon_core.lib import env_value_to_bool, Logger, is_in_tests
-from ayon_core.addon import AddonsManager
-from ayon_core.pipeline import install_host
-from ayon_core.tools.utils import host_tools
-from ayon_core.tools.utils import get_ayon_qt_app
-
-from .launch_logic import ProcessLauncher, stub
-
-log = Logger.get_logger(__name__)
-
-
-def safe_excepthook(*args):
- traceback.print_exception(*args)
-
-
-def main(*subprocess_args):
- from ayon_photoshop.api import PhotoshopHost
-
- host = PhotoshopHost()
- install_host(host)
-
- sys.excepthook = safe_excepthook
-
- # coloring in StdOutBroker
- os.environ["AYON_LOG_NO_COLORS"] = "0"
- app = get_ayon_qt_app()
- app.setQuitOnLastWindowClosed(False)
-
- launcher = ProcessLauncher(subprocess_args)
- launcher.start()
-
- if env_value_to_bool("HEADLESS_PUBLISH"):
- manager = AddonsManager()
- webpublisher_addon = manager["webpublisher"]
- launcher.execute_in_main_thread(
- webpublisher_addon.headless_publish,
- log,
- "ClosePS",
- is_in_tests()
- )
- elif env_value_to_bool("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH",
- default=True):
-
- launcher.execute_in_main_thread(
- host_tools.show_workfiles,
- save=env_value_to_bool("WORKFILES_SAVE_AS")
- )
-
- sys.exit(app.exec_())
-
-
-@contextlib.contextmanager
-def maintained_selection():
- """Maintain selection during context."""
- selection = stub().get_selected_layers()
- try:
- yield selection
- finally:
- stub().select_layers(selection)
-
-
-@contextlib.contextmanager
-def maintained_visibility(layers=None):
- """Maintain visibility during context.
-
- Args:
- layers (list) of PSItem (used for caching)
- """
- visibility = {}
- if not layers:
- layers = stub().get_layers()
- for layer in layers:
- visibility[layer.id] = layer.visible
- try:
- yield
- finally:
- for layer in layers:
- stub().set_visible(layer.id, visibility[layer.id])
- pass
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/panel.png b/server_addon/photoshop/client/ayon_photoshop/api/panel.png
deleted file mode 100644
index be5db3b8df..0000000000
Binary files a/server_addon/photoshop/client/ayon_photoshop/api/panel.png and /dev/null differ
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/panel_failure.png b/server_addon/photoshop/client/ayon_photoshop/api/panel_failure.png
deleted file mode 100644
index 6e52a77d22..0000000000
Binary files a/server_addon/photoshop/client/ayon_photoshop/api/panel_failure.png and /dev/null differ
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/pipeline.py b/server_addon/photoshop/client/ayon_photoshop/api/pipeline.py
deleted file mode 100644
index d399bb25e2..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/pipeline.py
+++ /dev/null
@@ -1,285 +0,0 @@
-import os
-
-from qtpy import QtWidgets
-
-import pyblish.api
-
-from ayon_core.lib import register_event_callback, Logger
-from ayon_core.pipeline import (
- register_loader_plugin_path,
- register_creator_plugin_path,
- AVALON_CONTAINER_ID,
- AYON_INSTANCE_ID,
- AVALON_INSTANCE_ID,
-)
-
-from ayon_core.host import (
- HostBase,
- IWorkfileHost,
- ILoadHost,
- IPublishHost
-)
-
-from ayon_core.pipeline.load import any_outdated_containers
-from ayon_core.tools.utils import get_ayon_qt_app
-from ayon_photoshop import PHOTOSHOP_ADDON_ROOT
-
-from . import lib
-
-log = Logger.get_logger(__name__)
-
-PLUGINS_DIR = os.path.join(PHOTOSHOP_ADDON_ROOT, "plugins")
-PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
-LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
-CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
-INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
-
-
-class PhotoshopHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
- name = "photoshop"
-
- def install(self):
- """Install Photoshop-specific functionality needed for integration.
-
- This function is called automatically on calling
- `api.install(photoshop)`.
- """
- log.info("Installing OpenPype Photoshop...")
- pyblish.api.register_host("photoshop")
-
- pyblish.api.register_plugin_path(PUBLISH_PATH)
- register_loader_plugin_path(LOAD_PATH)
- register_creator_plugin_path(CREATE_PATH)
-
- register_event_callback("application.launched", on_application_launch)
-
- def current_file(self):
- try:
- full_name = lib.stub().get_active_document_full_name()
- if full_name and full_name != "null":
- return os.path.normpath(full_name).replace("\\", "/")
- except Exception:
- pass
-
- return None
-
- def work_root(self, session):
- return os.path.normpath(session["AYON_WORKDIR"]).replace("\\", "/")
-
- def open_workfile(self, filepath):
- lib.stub().open(filepath)
-
- return True
-
- def save_workfile(self, filepath=None):
- _, ext = os.path.splitext(filepath)
- lib.stub().saveAs(filepath, ext[1:], True)
-
- def get_current_workfile(self):
- return self.current_file()
-
- def workfile_has_unsaved_changes(self):
- if self.current_file():
- return not lib.stub().is_saved()
-
- return False
-
- def get_workfile_extensions(self):
- return [".psd", ".psb"]
-
- def get_containers(self):
- return ls()
-
- def get_context_data(self):
- """Get stored values for context (validation enable/disable etc)"""
- meta = _get_stub().get_layers_metadata()
- for item in meta:
- if item.get("id") == "publish_context":
- item.pop("id")
- return item
-
- return {}
-
- def update_context_data(self, data, changes):
- """Store value needed for context"""
- item = data
- item["id"] = "publish_context"
- _get_stub().imprint(item["id"], item)
-
- def list_instances(self):
- """List all created instances to publish from current workfile.
-
- Pulls from File > File Info
-
- Returns:
- (list) of dictionaries matching instances format
- """
- stub = _get_stub()
-
- if not stub:
- return []
-
- instances = []
- layers_meta = stub.get_layers_metadata()
- if layers_meta:
- for instance in layers_meta:
- if instance.get("id") in {
- AYON_INSTANCE_ID, AVALON_INSTANCE_ID
- }:
- instances.append(instance)
-
- return instances
-
- def remove_instance(self, instance):
- """Remove instance from current workfile metadata.
-
- Updates metadata of current file in File > File Info and removes
- icon highlight on group layer.
-
- Args:
- instance (dict): instance representation from subsetmanager model
- """
- stub = _get_stub()
-
- if not stub:
- return
-
- inst_id = instance.get("instance_id") or instance.get("uuid") # legacy
- if not inst_id:
- log.warning("No instance identifier for {}".format(instance))
- return
-
- stub.remove_instance(inst_id)
-
- if instance.get("members"):
- item = stub.get_layer(instance["members"][0])
- if item:
- stub.rename_layer(item.id,
- item.name.replace(stub.PUBLISH_ICON, ''))
-
-
-def check_inventory():
- if not any_outdated_containers():
- return
-
- # Warn about outdated containers.
- _app = get_ayon_qt_app()
-
- message_box = QtWidgets.QMessageBox()
- message_box.setIcon(QtWidgets.QMessageBox.Warning)
- msg = "There are outdated containers in the scene."
- message_box.setText(msg)
- message_box.exec_()
-
-
-def on_application_launch():
- check_inventory()
-
-
-def ls():
- """Yields containers from active Photoshop document
-
- This is the host-equivalent of api.ls(), but instead of listing
- assets on disk, it lists assets already loaded in Photoshop; once loaded
- they are called 'containers'
-
- Yields:
- dict: container
-
- """
- try:
- stub = lib.stub() # only after Photoshop is up
- except lib.ConnectionNotEstablishedYet:
- print("Not connected yet, ignoring")
- return
-
- if not stub.get_active_document_name():
- return
-
- layers_meta = stub.get_layers_metadata() # minimalize calls to PS
- for layer in stub.get_layers():
- data = stub.read(layer, layers_meta)
-
- # Skip non-tagged layers.
- if not data:
- continue
-
- # Filter to only containers.
- if "container" not in data["id"]:
- continue
-
- # Append transient data
- data["objectName"] = layer.name.replace(stub.LOADED_ICON, '')
- data["layer"] = layer
-
- yield data
-
-
-def _get_stub():
- """Handle pulling stub from PS to run operations on host
-
- Returns:
- (PhotoshopServerStub) or None
- """
- try:
- stub = lib.stub() # only after Photoshop is up
- except lib.ConnectionNotEstablishedYet:
- print("Not connected yet, ignoring")
- return
-
- if not stub.get_active_document_name():
- return
-
- return stub
-
-
-def containerise(
- name, namespace, layer, context, loader=None, suffix="_CON"
-):
- """Imprint layer with metadata
-
- Containerisation enables a tracking of version, author and origin
- for loaded assets.
-
- Arguments:
- name (str): Name of resulting assembly
- namespace (str): Namespace under which to host container
- layer (PSItem): Layer to containerise
- context (dict): Asset information
- loader (str, optional): Name of loader used to produce this container.
- suffix (str, optional): Suffix of container, defaults to `_CON`.
-
- Returns:
- container (str): Name of container assembly
- """
- layer.name = name + suffix
-
- data = {
- "schema": "openpype:container-2.0",
- "id": AVALON_CONTAINER_ID,
- "name": name,
- "namespace": namespace,
- "loader": str(loader),
- "representation": context["representation"]["id"],
- "members": [str(layer.id)]
- }
- stub = lib.stub()
- stub.imprint(layer.id, data)
-
- return layer
-
-
-def cache_and_get_instances(creator):
- """Cache instances in shared data.
-
- Storing all instances as a list as legacy instances might be still present.
- Args:
- creator (Creator): Plugin which would like to get instances from host.
- Returns:
- List[]: list of all instances stored in metadata
- """
- shared_key = "openpype.photoshop.instances"
- if shared_key not in creator.collection_shared_data:
- creator.collection_shared_data[shared_key] = \
- creator.host.list_instances()
- return creator.collection_shared_data[shared_key]
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/plugin.py b/server_addon/photoshop/client/ayon_photoshop/api/plugin.py
deleted file mode 100644
index c11a206834..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/plugin.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import re
-
-from ayon_core.pipeline import LoaderPlugin
-from .launch_logic import stub
-
-
-def get_unique_layer_name(layers, container_name, product_name):
- """Prepare unique layer name.
-
- Gets all layer names and if '_' is present,
- it adds suffix '1', or increases the suffix by 1.
-
- Args:
- layers (list) of dict with layers info (name, id etc.)
- container_name (str):
- product_name (str):
-
- Returns:
- str: name_00X (without version)
- """
- name = "{}_{}".format(container_name, product_name)
- names = {}
- for layer in layers:
- layer_name = re.sub(r'_\d{3}$', '', layer.name)
- if layer_name in names.keys():
- names[layer_name] = names[layer_name] + 1
- else:
- names[layer_name] = 1
- occurrences = names.get(name, 0)
-
- return "{}_{:0>3d}".format(name, occurrences + 1)
-
-
-class PhotoshopLoader(LoaderPlugin):
- @staticmethod
- def get_stub():
- return stub()
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/webserver.py b/server_addon/photoshop/client/ayon_photoshop/api/webserver.py
deleted file mode 100644
index cd229c65ad..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/webserver.py
+++ /dev/null
@@ -1,241 +0,0 @@
-"""Webserver for communication with photoshop.
-
-Aiohttp (Asyncio) based websocket server used for communication with host
-application.
-
-This webserver is started in spawned Python process that opens DCC during
-its launch, waits for connection from DCC and handles communication going
-forward. Server is closed before Python process is killed.
-"""
-import os
-import logging
-import urllib
-import threading
-import asyncio
-import socket
-
-from aiohttp import web
-
-from wsrpc_aiohttp import WSRPCClient
-
-from ayon_core.pipeline import get_global_context
-
-log = logging.getLogger(__name__)
-
-
-class WebServerTool:
- """
- Basic POC implementation of asychronic websocket RPC server.
- Uses class in external_app_1.py to mimic implementation for single
- external application.
- 'test_client' folder contains two test implementations of client
- """
- _instance = None
-
- def __init__(self):
- WebServerTool._instance = self
-
- self.client = None
- self.handlers = {}
- self.on_stop_callbacks = []
-
- port = None
- host_name = "localhost"
- websocket_url = os.getenv("WEBSOCKET_URL")
- if websocket_url:
- parsed = urllib.parse.urlparse(websocket_url)
- port = parsed.port
- host_name = parsed.netloc.split(":")[0]
- if not port:
- port = 8098 # fallback
-
- self.port = port
- self.host_name = host_name
-
- self.app = web.Application()
-
- # add route with multiple methods for single "external app"
- self.webserver_thread = WebServerThread(self, self.port)
-
- def add_route(self, *args, **kwargs):
- self.app.router.add_route(*args, **kwargs)
-
- def add_static(self, *args, **kwargs):
- self.app.router.add_static(*args, **kwargs)
-
- def start_server(self):
- if self.webserver_thread and not self.webserver_thread.is_alive():
- self.webserver_thread.start()
-
- def stop_server(self):
- self.stop()
-
- async def send_context_change(self, host):
- """
- Calls running webserver to inform about context change
-
- Used when new PS/AE should be triggered,
- but one already running, without
- this publish would point to old context.
- """
- client = WSRPCClient(os.getenv("WEBSOCKET_URL"),
- loop=asyncio.get_event_loop())
- await client.connect()
-
- context = get_global_context()
- project_name = context["project_name"]
- folder_path = context["folder_path"]
- task_name = context["task_name"]
- log.info("Sending context change to {}{}/{}".format(
- project_name, folder_path, task_name
- ))
-
- await client.call(
- '{}.set_context'.format(host),
- project=project_name,
- folder=folder_path,
- task=task_name
- )
- await client.close()
-
- def port_occupied(self, host_name, port):
- """
- Check if 'url' is already occupied.
-
- This could mean, that app is already running and we are trying open it
- again. In that case, use existing running webserver.
- Check here is easier than capturing exception from thread.
- """
- with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as con:
- result = con.connect_ex((host_name, port)) == 0
-
- if result:
- print(f"Port {port} is already in use")
- return result
-
- def call(self, func):
- log.debug("websocket.call {}".format(func))
- future = asyncio.run_coroutine_threadsafe(
- func,
- self.webserver_thread.loop
- )
- result = future.result()
- return result
-
- @staticmethod
- def get_instance():
- if WebServerTool._instance is None:
- WebServerTool()
- return WebServerTool._instance
-
- @property
- def is_running(self):
- if not self.webserver_thread:
- return False
- return self.webserver_thread.is_running
-
- def stop(self):
- if not self.is_running:
- return
- try:
- log.debug("Stopping websocket server")
- self.webserver_thread.is_running = False
- self.webserver_thread.stop()
- except Exception:
- log.warning(
- "Error has happened during Killing websocket server",
- exc_info=True
- )
-
- def thread_stopped(self):
- for callback in self.on_stop_callbacks:
- callback()
-
-
-class WebServerThread(threading.Thread):
- """ Listener for websocket rpc requests.
-
- It would be probably better to "attach" this to main thread (as for
- example Harmony needs to run something on main thread), but currently
- it creates separate thread and separate asyncio event loop
- """
- def __init__(self, module, port):
- super(WebServerThread, self).__init__()
-
- self.is_running = False
- self.port = port
- self.module = module
- self.loop = None
- self.runner = None
- self.site = None
- self.tasks = []
-
- def run(self):
- self.is_running = True
-
- try:
- log.info("Starting web server")
- self.loop = asyncio.new_event_loop() # create new loop for thread
- asyncio.set_event_loop(self.loop)
-
- self.loop.run_until_complete(self.start_server())
-
- websocket_url = "ws://localhost:{}/ws".format(self.port)
-
- log.debug(
- "Running Websocket server on URL: \"{}\"".format(websocket_url)
- )
-
- asyncio.ensure_future(self.check_shutdown(), loop=self.loop)
- self.loop.run_forever()
- except Exception:
- self.is_running = False
- log.warning(
- "Websocket Server service has failed", exc_info=True
- )
- raise
- finally:
- self.loop.close() # optional
-
- self.is_running = False
- self.module.thread_stopped()
- log.info("Websocket server stopped")
-
- async def start_server(self):
- """ Starts runner and TCPsite """
- self.runner = web.AppRunner(self.module.app)
- await self.runner.setup()
- self.site = web.TCPSite(self.runner, 'localhost', self.port)
- await self.site.start()
-
- def stop(self):
- """Sets is_running flag to false, 'check_shutdown' shuts server down"""
- self.is_running = False
-
- async def check_shutdown(self):
- """ Future that is running and checks if server should be running
- periodically.
- """
- while self.is_running:
- while self.tasks:
- task = self.tasks.pop(0)
- log.debug("waiting for task {}".format(task))
- await task
- log.debug("returned value {}".format(task.result))
-
- await asyncio.sleep(0.5)
-
- log.debug("Starting shutdown")
- await self.site.stop()
- log.debug("Site stopped")
- await self.runner.cleanup()
- log.debug("Runner stopped")
- tasks = [task for task in asyncio.all_tasks() if
- task is not asyncio.current_task()]
- list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks
- results = await asyncio.gather(*tasks, return_exceptions=True)
- log.debug(f'Finished awaiting cancelled tasks, results: {results}...')
- await self.loop.shutdown_asyncgens()
- # to really make sure everything else has time to stop
- await asyncio.sleep(0.07)
- self.loop.stop()
diff --git a/server_addon/photoshop/client/ayon_photoshop/api/ws_stub.py b/server_addon/photoshop/client/ayon_photoshop/api/ws_stub.py
deleted file mode 100644
index 3619fa4b7a..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/api/ws_stub.py
+++ /dev/null
@@ -1,571 +0,0 @@
-"""
- Stub handling connection from server to client.
- Used anywhere solution is calling client methods.
-"""
-import json
-import attr
-from wsrpc_aiohttp import WebSocketAsync
-
-from .webserver import WebServerTool
-
-
-@attr.s
-class PSItem(object):
- """
- Object denoting layer or group item in PS. Each item is created in
- PS by any Loader, but contains same fields, which are being used
- in later processing.
- """
- # metadata
- id = attr.ib() # id created by AE, could be used for querying
- name = attr.ib() # name of item
- group = attr.ib(default=None) # item type (footage, folder, comp)
- parents = attr.ib(factory=list)
- visible = attr.ib(default=True)
- type = attr.ib(default=None)
- # all imported elements, single for
- members = attr.ib(factory=list)
- long_name = attr.ib(default=None)
- color_code = attr.ib(default=None) # color code of layer
- instance_id = attr.ib(default=None)
-
- @property
- def clean_name(self):
- """Returns layer name without publish icon highlight
-
- Returns:
- (str)
- """
- return (self.name.replace(PhotoshopServerStub.PUBLISH_ICON, '')
- .replace(PhotoshopServerStub.LOADED_ICON, ''))
-
-
-class PhotoshopServerStub:
- """
- Stub for calling function on client (Photoshop js) side.
- Expects that client is already connected (started when avalon menu
- is opened).
- 'self.websocketserver.call' is used as async wrapper
- """
- PUBLISH_ICON = '\u2117 '
- LOADED_ICON = '\u25bc'
-
- def __init__(self):
- self.websocketserver = WebServerTool.get_instance()
- self.client = self.get_client()
-
- @staticmethod
- def get_client():
- """
- Return first connected client to WebSocket
- TODO implement selection by Route
- :return: client
- """
- clients = WebSocketAsync.get_clients()
- client = None
- if len(clients) > 0:
- key = list(clients.keys())[0]
- client = clients.get(key)
-
- return client
-
- def open(self, path):
- """Open file located at 'path' (local).
-
- Args:
- path(string): file path locally
- Returns: None
- """
- self.websocketserver.call(
- self.client.call('Photoshop.open', path=path)
- )
-
- def read(self, layer, layers_meta=None):
- """Parses layer metadata from Headline field of active document.
-
- Args:
- layer: (PSItem)
- layers_meta: full list from Headline (for performance in loops)
- Returns:
- (dict) of layer metadata stored in PS file
-
- Example:
- {
- 'id': 'pyblish.avalon.container',
- 'loader': 'ImageLoader',
- 'members': ['64'],
- 'name': 'imageMainMiddle',
- 'namespace': 'Hero_imageMainMiddle_001',
- 'representation': '6203dc91e80934d9f6ee7d96',
- 'schema': 'openpype:container-2.0'
- }
- """
- if layers_meta is None:
- layers_meta = self.get_layers_metadata()
-
- for layer_meta in layers_meta:
- layer_id = layer_meta.get("uuid") # legacy
- if layer_meta.get("members"):
- layer_id = layer_meta["members"][0]
- if str(layer.id) == str(layer_id):
- return layer_meta
- print("Unable to find layer metadata for {}".format(layer.id))
-
- def imprint(self, item_id, data, all_layers=None, items_meta=None):
- """Save layer metadata to Headline field of active document
-
- Stores metadata in format:
- [{
- "active":true,
- "productName":"imageBG",
- "productType":"image",
- "id":"ayon.create.instance",
- "folderPath":"Town",
- "uuid": "8"
- }] - for created instances
- OR
- [{
- "schema": "openpype:container-2.0",
- "id": "ayon.create.instance",
- "name": "imageMG",
- "namespace": "Jungle_imageMG_001",
- "loader": "ImageLoader",
- "representation": "5fbfc0ee30a946093c6ff18a",
- "members": [
- "40"
- ]
- }] - for loaded instances
-
- Args:
- item_id (str):
- data(string): json representation for single layer
- all_layers (list of PSItem): for performance, could be
- injected for usage in loop, if not, single call will be
- triggered
- items_meta(string): json representation from Headline
- (for performance - provide only if imprint is in
- loop - value should be same)
- Returns: None
- """
- if not items_meta:
- items_meta = self.get_layers_metadata()
-
- # json.dumps writes integer values in a dictionary to string, so
- # anticipating it here.
- item_id = str(item_id)
- is_new = True
- result_meta = []
- for item_meta in items_meta:
- if ((item_meta.get('members') and
- item_id == str(item_meta.get('members')[0])) or
- item_meta.get("instance_id") == item_id):
- is_new = False
- if data:
- item_meta.update(data)
- result_meta.append(item_meta)
- else:
- result_meta.append(item_meta)
-
- if is_new:
- result_meta.append(data)
-
- # Ensure only valid ids are stored.
- if not all_layers:
- all_layers = self.get_layers()
- layer_ids = [layer.id for layer in all_layers]
- cleaned_data = []
-
- for item in result_meta:
- if item.get("members"):
- if int(item["members"][0]) not in layer_ids:
- continue
-
- cleaned_data.append(item)
-
- payload = json.dumps(cleaned_data, indent=4)
- self.websocketserver.call(
- self.client.call('Photoshop.imprint', payload=payload)
- )
-
- def get_layers(self):
- """Returns JSON document with all(?) layers in active document.
-
- Returns:
- Format of tuple: { 'id':'123',
- 'name': 'My Layer 1',
- 'type': 'GUIDE'|'FG'|'BG'|'OBJ'
- 'visible': 'true'|'false'
- """
- res = self.websocketserver.call(
- self.client.call('Photoshop.get_layers')
- )
-
- return self._to_records(res)
-
- def get_layer(self, layer_id):
- """
- Returns PSItem for specific 'layer_id' or None if not found
- Args:
- layer_id (string): unique layer id, stored in 'uuid' field
-
- Returns:
- (PSItem) or None
- """
- layers = self.get_layers()
- for layer in layers:
- if str(layer.id) == str(layer_id):
- return layer
-
- def get_layers_in_layers(self, layers):
- """Return all layers that belong to layers (might be groups).
-
- Args:
- layers :
-
- Returns:
-
- """
- parent_ids = set([lay.id for lay in layers])
-
- return self._get_layers_in_layers(parent_ids)
-
- def get_layers_in_layers_ids(self, layers_ids, layers=None):
- """Return all layers that belong to layers (might be groups).
-
- Args:
- layers_ids
- layers :
-
- Returns:
-
- """
- parent_ids = set(layers_ids)
-
- return self._get_layers_in_layers(parent_ids, layers)
-
- def _get_layers_in_layers(self, parent_ids, layers=None):
- if not layers:
- layers = self.get_layers()
-
- all_layers = layers
- ret = []
-
- for layer in all_layers:
- parents = set(layer.parents)
- if len(parent_ids & parents) > 0:
- ret.append(layer)
- if layer.id in parent_ids:
- ret.append(layer)
-
- return ret
-
- def create_group(self, name):
- """Create new group (eg. LayerSet)
-
- Returns:
-
- """
- enhanced_name = self.PUBLISH_ICON + name
- ret = self.websocketserver.call(
- self.client.call('Photoshop.create_group', name=enhanced_name)
- )
- # create group on PS is asynchronous, returns only id
- return PSItem(id=ret, name=name, group=True)
-
- def group_selected_layers(self, name):
- """Group selected layers into new LayerSet (eg. group)
-
- Returns:
- (Layer)
- """
- enhanced_name = self.PUBLISH_ICON + name
- res = self.websocketserver.call(
- self.client.call(
- 'Photoshop.group_selected_layers', name=enhanced_name
- )
- )
- res = self._to_records(res)
- if res:
- rec = res.pop()
- rec.name = rec.name.replace(self.PUBLISH_ICON, '')
- return rec
- raise ValueError("No group record returned")
-
- def get_selected_layers(self):
- """Get a list of actually selected layers.
-
- Returns:
- """
- res = self.websocketserver.call(
- self.client.call('Photoshop.get_selected_layers')
- )
- return self._to_records(res)
-
- def select_layers(self, layers):
- """Selects specified layers in Photoshop by its ids.
-
- Args:
- layers:
- """
- layers_id = [str(lay.id) for lay in layers]
- self.websocketserver.call(
- self.client.call(
- 'Photoshop.select_layers',
- layers=json.dumps(layers_id)
- )
- )
-
- def get_active_document_full_name(self):
- """Returns full name with path of active document via ws call
-
- Returns(string):
- full path with name
- """
- res = self.websocketserver.call(
- self.client.call('Photoshop.get_active_document_full_name')
- )
-
- return res
-
- def get_active_document_name(self):
- """Returns just a name of active document via ws call
-
- Returns(string):
- file name
- """
- return self.websocketserver.call(
- self.client.call('Photoshop.get_active_document_name')
- )
-
- def is_saved(self):
- """Returns true if no changes in active document
-
- Returns:
-
- """
- return self.websocketserver.call(
- self.client.call('Photoshop.is_saved')
- )
-
- def save(self):
- """Saves active document"""
- self.websocketserver.call(
- self.client.call('Photoshop.save')
- )
-
- def saveAs(self, image_path, ext, as_copy):
- """Saves active document to psd (copy) or png or jpg
-
- Args:
- image_path(string): full local path
- ext:
- as_copy:
- Returns: None
- """
- self.websocketserver.call(
- self.client.call(
- 'Photoshop.saveAs',
- image_path=image_path,
- ext=ext,
- as_copy=as_copy
- )
- )
-
- def set_visible(self, layer_id, visibility):
- """Set layer with 'layer_id' to 'visibility'
-
- Args:
- layer_id:
- visibility:
- Returns: None
- """
- self.websocketserver.call(
- self.client.call(
- 'Photoshop.set_visible',
- layer_id=layer_id,
- visibility=visibility
- )
- )
-
- def hide_all_others_layers(self, layers):
- """hides all layers that are not part of the list or that are not
- children of this list
-
- Args:
- layers (list): list of PSItem - highest hierarchy
- """
- extract_ids = set([ll.id for ll in self.get_layers_in_layers(layers)])
-
- self.hide_all_others_layers_ids(extract_ids)
-
- def hide_all_others_layers_ids(self, extract_ids, layers=None):
- """hides all layers that are not part of the list or that are not
- children of this list
-
- Args:
- extract_ids (list): list of integer that should be visible
- layers (list) of PSItem (used for caching)
- """
- if not layers:
- layers = self.get_layers()
- for layer in layers:
- if layer.visible and layer.id not in extract_ids:
- self.set_visible(layer.id, False)
-
- def get_layers_metadata(self):
- """Reads layers metadata from Headline from active document in PS.
- (Headline accessible by File > File Info)
-
- Returns:
- (list)
- example:
- {"8":{"active":true,"productName":"imageBG",
- "productType":"image","id":"ayon.create.instance",
- "folderPath":"/Town"}}
- 8 is layer(group) id - used for deletion, update etc.
- """
- res = self.websocketserver.call(self.client.call('Photoshop.read'))
- layers_data = []
- try:
- if res:
- layers_data = json.loads(res)
- except json.decoder.JSONDecodeError:
- raise ValueError("{} cannot be parsed, recreate meta".format(res))
- # format of metadata changed from {} to [] because of standardization
- # keep current implementation logic as its working
- if isinstance(layers_data, dict):
- for layer_id, layer_meta in layers_data.items():
- if layer_meta.get("schema") != "openpype:container-2.0":
- layer_meta["members"] = [str(layer_id)]
- layers_data = list(layers_data.values())
- return layers_data
-
- def import_smart_object(self, path, layer_name, as_reference=False):
- """Import the file at `path` as a smart object to active document.
-
- Args:
- path (str): File path to import.
- layer_name (str): Unique layer name to differentiate how many times
- same smart object was loaded
- as_reference (bool): pull in content or reference
- """
- enhanced_name = self.LOADED_ICON + layer_name
- res = self.websocketserver.call(
- self.client.call(
- 'Photoshop.import_smart_object',
- path=path,
- name=enhanced_name,
- as_reference=as_reference
- )
- )
- rec = self._to_records(res).pop()
- if rec:
- rec.name = rec.name.replace(self.LOADED_ICON, '')
- return rec
-
- def replace_smart_object(self, layer, path, layer_name):
- """Replace the smart object `layer` with file at `path`
-
- Args:
- layer (PSItem):
- path (str): File to import.
- layer_name (str): Unique layer name to differentiate how many times
- same smart object was loaded
- """
- enhanced_name = self.LOADED_ICON + layer_name
- self.websocketserver.call(
- self.client.call(
- 'Photoshop.replace_smart_object',
- layer_id=layer.id,
- path=path,
- name=enhanced_name
- )
- )
-
- def delete_layer(self, layer_id):
- """Deletes specific layer by it's id.
-
- Args:
- layer_id (int): id of layer to delete
- """
- self.websocketserver.call(
- self.client.call('Photoshop.delete_layer', layer_id=layer_id)
- )
-
- def rename_layer(self, layer_id, name):
- """Renames specific layer by it's id.
-
- Args:
- layer_id (int): id of layer to delete
- name (str): new name
- """
- self.websocketserver.call(
- self.client.call(
- 'Photoshop.rename_layer',
- layer_id=layer_id,
- name=name
- )
- )
-
- def remove_instance(self, instance_id):
- cleaned_data = []
-
- for item in self.get_layers_metadata():
- inst_id = item.get("instance_id") or item.get("uuid")
- if inst_id != instance_id:
- cleaned_data.append(item)
-
- payload = json.dumps(cleaned_data, indent=4)
-
- self.websocketserver.call(
- self.client.call('Photoshop.imprint', payload=payload)
- )
-
- def get_extension_version(self):
- """Returns version number of installed extension."""
- return self.websocketserver.call(
- self.client.call('Photoshop.get_extension_version')
- )
-
- def close(self):
- """Shutting down PS and process too.
-
- For webpublishing only.
- """
- # TODO change client.call to method with checks for client
- self.websocketserver.call(self.client.call('Photoshop.close'))
-
- def _to_records(self, res):
- """Converts string json representation into list of PSItem for
- dot notation access to work.
-
- Args:
- res (string): valid json
-
- Returns:
-
- """
- try:
- layers_data = json.loads(res)
- except json.decoder.JSONDecodeError:
- raise ValueError("Received broken JSON {}".format(res))
- ret = []
-
- # convert to AEItem to use dot donation
- if isinstance(layers_data, dict):
- layers_data = [layers_data]
- for d in layers_data:
- # currently implemented and expected fields
- ret.append(PSItem(
- d.get('id'),
- d.get('name'),
- d.get('group'),
- d.get('parents'),
- d.get('visible'),
- d.get('type'),
- d.get('members'),
- d.get('long_name'),
- d.get("color_code"),
- d.get("instance_id")
- ))
- return ret
diff --git a/server_addon/photoshop/client/ayon_photoshop/hooks/pre_launch_args.py b/server_addon/photoshop/client/ayon_photoshop/hooks/pre_launch_args.py
deleted file mode 100644
index ff60c2f40d..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/hooks/pre_launch_args.py
+++ /dev/null
@@ -1,88 +0,0 @@
-import os
-import platform
-import subprocess
-
-from ayon_core.lib import (
- get_ayon_launcher_args,
- is_using_ayon_console,
-)
-from ayon_applications import PreLaunchHook, LaunchTypes
-from ayon_photoshop import get_launch_script_path
-
-
-def get_launch_kwargs(kwargs):
- """Explicit setting of kwargs for Popen for Photoshop.
-
- Expected behavior
- - ayon_console opens window with logs
- - ayon has stdout/stderr available for capturing
-
- Args:
- kwargs (Union[dict, None]): Current kwargs or None.
-
- """
- if kwargs is None:
- kwargs = {}
-
- if platform.system().lower() != "windows":
- return kwargs
-
- if not is_using_ayon_console():
- kwargs.update({
- "creationflags": subprocess.CREATE_NEW_CONSOLE
- })
- else:
- kwargs.update({
- "creationflags": subprocess.CREATE_NO_WINDOW,
- "stdout": subprocess.DEVNULL,
- "stderr": subprocess.DEVNULL
- })
- return kwargs
-
-
-class PhotoshopPrelaunchHook(PreLaunchHook):
- """Launch arguments preparation.
-
- Hook add python executable and script path to Photoshop implementation
- before Photoshop executable and add last workfile path to launch arguments.
-
- Existence of last workfile is checked. If workfile does not exists tries
- to copy templated workfile from predefined path.
- """
- app_groups = {"photoshop"}
-
- order = 20
- launch_types = {LaunchTypes.local}
-
- def execute(self):
- # Pop executable
- executable_path = self.launch_context.launch_args.pop(0)
-
- # Pop rest of launch arguments - There should not be other arguments!
- remainders = []
- while self.launch_context.launch_args:
- remainders.append(self.launch_context.launch_args.pop(0))
-
- script_path = get_launch_script_path()
-
- new_launch_args = get_ayon_launcher_args(
- "run", script_path, executable_path
- )
- # Add workfile path if exists
- workfile_path = self.data["last_workfile_path"]
- if (
- self.data.get("start_last_workfile")
- and workfile_path
- and os.path.exists(workfile_path)
- ):
- new_launch_args.append(workfile_path)
-
- # Append as whole list as these arguments should not be separated
- self.launch_context.launch_args.append(new_launch_args)
-
- if remainders:
- self.launch_context.launch_args.extend(remainders)
-
- self.launch_context.kwargs = get_launch_kwargs(
- self.launch_context.kwargs
- )
diff --git a/server_addon/photoshop/client/ayon_photoshop/lib.py b/server_addon/photoshop/client/ayon_photoshop/lib.py
deleted file mode 100644
index 9dc90953c5..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/lib.py
+++ /dev/null
@@ -1,127 +0,0 @@
-import re
-
-import ayon_api
-
-from ayon_core.lib import prepare_template_data
-from ayon_core.pipeline import (
- AutoCreator,
- CreatedInstance
-)
-from ayon_photoshop import api
-from ayon_photoshop.api.pipeline import cache_and_get_instances
-
-
-class PSAutoCreator(AutoCreator):
- """Generic autocreator to extend."""
- def get_instance_attr_defs(self):
- return []
-
- def collect_instances(self):
- for instance_data in cache_and_get_instances(self):
- creator_id = instance_data.get("creator_identifier")
-
- if creator_id == self.identifier:
- instance = CreatedInstance.from_existing(
- instance_data, self
- )
- self._add_instance_to_context(instance)
-
- def update_instances(self, update_list):
- self.log.debug("update_list:: {}".format(update_list))
- for created_inst, _changes in update_list:
- api.stub().imprint(created_inst.get("instance_id"),
- created_inst.data_to_store())
-
- def create(self, options=None):
- existing_instance = None
- for instance in self.create_context.instances:
- if instance.product_type == self.product_type:
- existing_instance = instance
- break
-
- context = self.create_context
- project_name = context.get_current_project_name()
- folder_path = context.get_current_folder_path()
- task_name = context.get_current_task_name()
- host_name = context.host_name
-
- if existing_instance is None:
- existing_instance_folder = None
- else:
- existing_instance_folder = existing_instance["folderPath"]
-
- if existing_instance is None:
- folder_entity = ayon_api.get_folder_by_path(
- project_name, folder_path
- )
- task_entity = ayon_api.get_task_by_name(
- project_name, folder_entity["id"], task_name
- )
- product_name = self.get_product_name(
- project_name,
- folder_entity,
- task_entity,
- self.default_variant,
- host_name,
- )
- data = {
- "folderPath": folder_path,
- "task": task_name,
- "variant": self.default_variant
- }
- data.update(self.get_dynamic_data(
- project_name,
- folder_entity,
- task_entity,
- self.default_variant,
- host_name,
- None
- ))
-
- if not self.active_on_create:
- data["active"] = False
-
- new_instance = CreatedInstance(
- self.product_type, product_name, data, self
- )
- self._add_instance_to_context(new_instance)
- api.stub().imprint(new_instance.get("instance_id"),
- new_instance.data_to_store())
-
- elif (
- existing_instance_folder != folder_path
- or existing_instance["task"] != task_name
- ):
- folder_entity = ayon_api.get_folder_by_path(
- project_name, folder_path
- )
- task_entity = ayon_api.get_task_by_name(
- project_name, folder_entity["id"], task_name
- )
- product_name = self.get_product_name(
- project_name,
- folder_entity,
- task_entity,
- self.default_variant,
- host_name,
- )
- existing_instance["folderPath"] = folder_path
- existing_instance["task"] = task_name
- existing_instance["productName"] = product_name
-
-
-def clean_product_name(product_name):
- """Clean all variants leftover {layer} from product name."""
- dynamic_data = prepare_template_data({"layer": "{layer}"})
- for value in dynamic_data.values():
- if value in product_name:
- product_name = (
- product_name
- .replace(value, "")
- .replace("__", "_")
- .replace("..", ".")
- )
- # clean trailing separator as Main_
- pattern = r'[\W_]+$'
- replacement = ''
- return re.sub(pattern, replacement, product_name)
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_flatten_image.py b/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_flatten_image.py
deleted file mode 100644
index a467a5ecaa..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_flatten_image.py
+++ /dev/null
@@ -1,156 +0,0 @@
-import ayon_api
-
-from ayon_photoshop import api
-from ayon_photoshop.lib import PSAutoCreator, clean_product_name
-from ayon_core.lib import BoolDef, prepare_template_data
-from ayon_core.pipeline.create import get_product_name, CreatedInstance
-
-
-class AutoImageCreator(PSAutoCreator):
- """Creates flatten image from all visible layers.
-
- Used in simplified publishing as auto created instance.
- Must be enabled in Setting and template for product name provided
- """
- identifier = "auto_image"
- product_type = "image"
-
- # Settings
- default_variant = ""
- # - Mark by default instance for review
- mark_for_review = True
- active_on_create = True
-
- def create(self, options=None):
- existing_instance = None
- for instance in self.create_context.instances:
- if instance.creator_identifier == self.identifier:
- existing_instance = instance
- break
-
- context = self.create_context
- project_name = context.get_current_project_name()
- folder_path = context.get_current_folder_path()
- task_name = context.get_current_task_name()
- host_name = context.host_name
- folder_entity = ayon_api.get_folder_by_path(project_name, folder_path)
- task_entity = ayon_api.get_task_by_name(
- project_name, folder_entity["id"], task_name
- )
-
- existing_folder_path = None
- if existing_instance is not None:
- existing_folder_path = existing_instance["folderPath"]
-
- if existing_instance is None:
- product_name = self.get_product_name(
- project_name,
- folder_entity,
- task_entity,
- self.default_variant,
- host_name,
- )
-
- data = {
- "folderPath": folder_path,
- "task": task_name,
- }
-
- if not self.active_on_create:
- data["active"] = False
-
- creator_attributes = {"mark_for_review": self.mark_for_review}
- data.update({"creator_attributes": creator_attributes})
-
- new_instance = CreatedInstance(
- self.product_type, product_name, data, self
- )
- self._add_instance_to_context(new_instance)
- api.stub().imprint(new_instance.get("instance_id"),
- new_instance.data_to_store())
-
- elif ( # existing instance from different context
- existing_folder_path != folder_path
- or existing_instance["task"] != task_name
- ):
- product_name = self.get_product_name(
- project_name,
- folder_entity,
- task_entity,
- self.default_variant,
- host_name,
- )
- existing_instance["folderPath"] = folder_path
- existing_instance["task"] = task_name
- existing_instance["productName"] = product_name
-
- api.stub().imprint(existing_instance.get("instance_id"),
- existing_instance.data_to_store())
-
- def get_pre_create_attr_defs(self):
- return [
- BoolDef(
- "mark_for_review",
- label="Review",
- default=self.mark_for_review
- )
- ]
-
- def get_instance_attr_defs(self):
- return [
- BoolDef(
- "mark_for_review",
- label="Review"
- )
- ]
-
- def apply_settings(self, project_settings):
- plugin_settings = (
- project_settings["photoshop"]["create"]["AutoImageCreator"]
- )
-
- self.active_on_create = plugin_settings["active_on_create"]
- self.default_variant = plugin_settings["default_variant"]
- self.mark_for_review = plugin_settings["mark_for_review"]
- self.enabled = plugin_settings["enabled"]
-
- def get_detail_description(self):
- return """Creator for flatten image.
-
- Studio might configure simple publishing workflow. In that case
- `image` instance is automatically created which will publish flat
- image from all visible layers.
-
- Artist might disable this instance from publishing or from creating
- review for it though.
- """
-
- def get_product_name(
- self,
- project_name,
- folder_entity,
- task_entity,
- variant,
- host_name=None,
- instance=None
- ):
- if host_name is None:
- host_name = self.create_context.host_name
-
- task_name = task_type = None
- if task_entity:
- task_name = task_entity["name"]
- task_type = task_entity["taskType"]
-
- dynamic_data = prepare_template_data({"layer": "{layer}"})
-
- product_name = get_product_name(
- project_name,
- task_name,
- task_type,
- host_name,
- self.product_type,
- variant,
- dynamic_data=dynamic_data
- )
- return clean_product_name(product_name)
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_image.py b/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_image.py
deleted file mode 100644
index 0170306301..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_image.py
+++ /dev/null
@@ -1,265 +0,0 @@
-import re
-
-from ayon_core.lib import BoolDef
-from ayon_core.pipeline import (
- Creator,
- CreatedInstance,
- CreatorError
-)
-from ayon_core.lib import prepare_template_data
-from ayon_core.pipeline.create import PRODUCT_NAME_ALLOWED_SYMBOLS
-from ayon_photoshop import api
-from ayon_photoshop.api.pipeline import cache_and_get_instances
-from ayon_photoshop.lib import clean_product_name
-
-
-class ImageCreator(Creator):
- """Creates image instance for publishing.
-
- Result of 'image' instance is image of all visible layers, or image(s) of
- selected layers.
- """
- identifier = "image"
- label = "Image"
- product_type = "image"
- description = "Image creator"
-
- # Settings
- default_variants = ""
- mark_for_review = False
- active_on_create = True
-
- def create(self, product_name_from_ui, data, pre_create_data):
- groups_to_create = []
- top_layers_to_wrap = []
- create_empty_group = False
-
- stub = api.stub() # only after PS is up
- if pre_create_data.get("use_selection"):
- try:
- top_level_selected_items = stub.get_selected_layers()
- except ValueError:
- raise CreatorError("Cannot group locked Background layer!")
-
- only_single_item_selected = len(top_level_selected_items) == 1
- if (
- only_single_item_selected or
- pre_create_data.get("create_multiple")):
- for selected_item in top_level_selected_items:
- if selected_item.group:
- groups_to_create.append(selected_item)
- else:
- top_layers_to_wrap.append(selected_item)
- else:
- group = stub.group_selected_layers(product_name_from_ui)
- groups_to_create.append(group)
- else:
- try:
- stub.select_layers(stub.get_layers())
- group = stub.group_selected_layers(product_name_from_ui)
- except ValueError:
- raise CreatorError("Cannot group locked Background layer!")
-
- groups_to_create.append(group)
-
- # create empty group if nothing selected
- if not groups_to_create and not top_layers_to_wrap:
- group = stub.create_group(product_name_from_ui)
- groups_to_create.append(group)
-
- # wrap each top level layer into separate new group
- for layer in top_layers_to_wrap:
- stub.select_layers([layer])
- group = stub.group_selected_layers(layer.name)
- groups_to_create.append(group)
-
- layer_name = ''
- # use artist chosen option OR force layer if more products are created
- # to differentiate them
- use_layer_name = (pre_create_data.get("use_layer_name") or
- len(groups_to_create) > 1)
- for group in groups_to_create:
- product_name = product_name_from_ui # reset to name from creator UI
- layer_names_in_hierarchy = []
- created_group_name = self._clean_highlights(stub, group.name)
-
- if use_layer_name:
- layer_name = re.sub(
- "[^{}]+".format(PRODUCT_NAME_ALLOWED_SYMBOLS),
- "",
- group.name
- )
- if "{layer}" not in product_name.lower():
- product_name += "{Layer}"
-
- layer_fill = prepare_template_data({"layer": layer_name})
- product_name = product_name.format(**layer_fill)
- product_name = clean_product_name(product_name)
-
- if group.long_name:
- for directory in group.long_name[::-1]:
- name = self._clean_highlights(stub, directory)
- layer_names_in_hierarchy.append(name)
-
- data_update = {
- "productName": product_name,
- "members": [str(group.id)],
- "layer_name": layer_name,
- "long_name": "_".join(layer_names_in_hierarchy)
- }
- data.update(data_update)
-
- mark_for_review = (pre_create_data.get("mark_for_review") or
- self.mark_for_review)
- creator_attributes = {"mark_for_review": mark_for_review}
- data.update({"creator_attributes": creator_attributes})
-
- if not self.active_on_create:
- data["active"] = False
-
- new_instance = CreatedInstance(
- self.product_type, product_name, data, self
- )
-
- stub.imprint(new_instance.get("instance_id"),
- new_instance.data_to_store())
- self._add_instance_to_context(new_instance)
- # reusing existing group, need to rename afterwards
- if not create_empty_group:
- stub.rename_layer(group.id,
- stub.PUBLISH_ICON + created_group_name)
-
- def collect_instances(self):
- for instance_data in cache_and_get_instances(self):
- # legacy instances have family=='image'
- creator_id = (instance_data.get("creator_identifier") or
- instance_data.get("family"))
-
- if creator_id == self.identifier:
- instance_data = self._handle_legacy(instance_data)
- instance = CreatedInstance.from_existing(
- instance_data, self
- )
- self._add_instance_to_context(instance)
-
- def update_instances(self, update_list):
- self.log.debug("update_list:: {}".format(update_list))
- for created_inst, _changes in update_list:
- if created_inst.get("layer"):
- # not storing PSItem layer to metadata
- created_inst.pop("layer")
- api.stub().imprint(created_inst.get("instance_id"),
- created_inst.data_to_store())
-
- def remove_instances(self, instances):
- for instance in instances:
- self.host.remove_instance(instance)
- self._remove_instance_from_context(instance)
-
- def get_pre_create_attr_defs(self):
- output = [
- BoolDef("use_selection", default=True,
- label="Create only for selected"),
- BoolDef("create_multiple",
- default=True,
- label="Create separate instance for each selected"),
- BoolDef("use_layer_name",
- default=False,
- label="Use layer name in product"),
- BoolDef(
- "mark_for_review",
- label="Create separate review",
- default=False
- )
- ]
- return output
-
- def get_instance_attr_defs(self):
- return [
- BoolDef(
- "mark_for_review",
- label="Review"
- )
- ]
-
- def apply_settings(self, project_settings):
- plugin_settings = (
- project_settings["photoshop"]["create"]["ImageCreator"]
- )
-
- self.active_on_create = plugin_settings["active_on_create"]
- self.default_variants = plugin_settings["default_variants"]
- self.mark_for_review = plugin_settings["mark_for_review"]
- self.enabled = plugin_settings["enabled"]
-
- def get_detail_description(self):
- return """Creator for Image instances
-
- Main publishable item in Photoshop will be of `image` product type.
- Result of this item (instance) is picture that could be loaded and
- used in another DCCs (for example as single layer in composition in
- AfterEffects, reference in Maya etc).
-
- There are couple of options what to publish:
- - separate image per selected layer (or group of layers)
- - one image for all selected layers
- - all visible layers (groups) flattened into single image
-
- In most cases you would like to keep `Create only for selected`
- toggled on and select what you would like to publish.
- Toggling this option off will allow you to create instance for all
- visible layers without a need to select them explicitly.
-
- Use 'Create separate instance for each selected' to create separate
- images per selected layer (group of layers).
-
- 'Use layer name in product' will explicitly add layer name into
- product name. Position of this name is configurable in
- `project_settings/global/tools/creator/product_name_profiles`.
- If layer placeholder ({layer}) is not used in `product_name_profiles`
- but layer name should be used (set explicitly in UI or implicitly if
- multiple images should be created), it is added in capitalized form
- as a suffix to product name.
-
- Each image could have its separate review created if necessary via
- `Create separate review` toggle.
- But more use case is to use separate `review` instance to create review
- from all published items.
- """
-
- def _handle_legacy(self, instance_data):
- """Converts old instances to new format."""
- if not instance_data.get("members"):
- instance_data["members"] = [instance_data.get("uuid")]
-
- if instance_data.get("uuid"):
- # uuid not needed, replaced with unique instance_id
- api.stub().remove_instance(instance_data.get("uuid"))
- instance_data.pop("uuid")
-
- if not instance_data.get("task"):
- instance_data["task"] = self.create_context.get_current_task_name()
-
- if not instance_data.get("variant"):
- instance_data["variant"] = ''
-
- return instance_data
-
- def _clean_highlights(self, stub, item):
- return item.replace(stub.PUBLISH_ICON, '').replace(stub.LOADED_ICON,
- '')
-
- def get_dynamic_data(
- self,
- project_name,
- folder_entity,
- task_entity,
- variant,
- host_name,
- instance
- ):
- if instance is not None:
- layer_name = instance.get("layer_name")
- if layer_name:
- return {"layer": layer_name}
- return {"layer": "{layer}"}
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_review.py b/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_review.py
deleted file mode 100644
index 60c64b3831..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_review.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from ayon_photoshop.lib import PSAutoCreator
-
-
-class ReviewCreator(PSAutoCreator):
- """Creates review instance which might be disabled from publishing."""
- identifier = "review"
- product_type = "review"
-
- default_variant = "Main"
-
- def get_detail_description(self):
- return """Auto creator for review.
-
- Photoshop review is created from all published images or from all
- visible layers if no `image` instances got created.
-
- Review might be disabled by an artist (instance shouldn't be deleted as
- it will get recreated in next publish either way).
- """
-
- def apply_settings(self, project_settings):
- plugin_settings = (
- project_settings["photoshop"]["create"]["ReviewCreator"]
- )
-
- self.default_variant = plugin_settings["default_variant"]
- self.active_on_create = plugin_settings["active_on_create"]
- self.enabled = plugin_settings["enabled"]
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_workfile.py b/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_workfile.py
deleted file mode 100644
index ce44a1ad2d..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_workfile.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from ayon_photoshop.lib import PSAutoCreator
-
-
-class WorkfileCreator(PSAutoCreator):
- identifier = "workfile"
- product_type = "workfile"
-
- default_variant = "Main"
-
- def get_detail_description(self):
- return """Auto creator for workfile.
-
- It is expected that each publish will also publish its source workfile
- for safekeeping. This creator triggers automatically without need for
- an artist to remember and trigger it explicitly.
-
- Workfile instance could be disabled if it is not required to publish
- workfile. (Instance shouldn't be deleted though as it will be recreated
- in next publish automatically).
- """
-
- def apply_settings(self, project_settings):
- plugin_settings = (
- project_settings["photoshop"]["create"]["WorkfileCreator"]
- )
-
- self.active_on_create = plugin_settings["active_on_create"]
- self.enabled = plugin_settings["enabled"]
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_image.py b/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_image.py
deleted file mode 100644
index e3d80f6957..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_image.py
+++ /dev/null
@@ -1,86 +0,0 @@
-import re
-
-from ayon_core.pipeline import get_representation_path
-from ayon_photoshop import api as photoshop
-from ayon_photoshop.api import get_unique_layer_name
-
-
-class ImageLoader(photoshop.PhotoshopLoader):
- """Load images
-
- Stores the imported asset in a container named after the asset.
- """
-
- product_types = {"image", "render"}
- representations = {"*"}
-
- def load(self, context, name=None, namespace=None, data=None):
- stub = self.get_stub()
- layer_name = get_unique_layer_name(
- stub.get_layers(),
- context["folder"]["name"],
- name
- )
- with photoshop.maintained_selection():
- path = self.filepath_from_context(context)
- layer = self.import_layer(path, layer_name, stub)
-
- self[:] = [layer]
- namespace = namespace or layer_name
-
- return photoshop.containerise(
- name,
- namespace,
- layer,
- context,
- self.__class__.__name__
- )
-
- def update(self, container, context):
- """ Switch asset or change version """
- stub = self.get_stub()
-
- layer = container.pop("layer")
-
- repre_entity = context["representation"]
- folder_name = context["folder"]["name"]
- product_name = context["product"]["name"]
-
- namespace_from_container = re.sub(r'_\d{3}$', '',
- container["namespace"])
- layer_name = "{}_{}".format(folder_name, product_name)
- # switching assets
- if namespace_from_container != layer_name:
- layer_name = get_unique_layer_name(
- stub.get_layers(), folder_name, product_name
- )
- else: # switching version - keep same name
- layer_name = container["namespace"]
-
- path = get_representation_path(repre_entity)
- with photoshop.maintained_selection():
- stub.replace_smart_object(
- layer, path, layer_name
- )
-
- stub.imprint(
- layer.id, {"representation": repre_entity["id"]}
- )
-
- def remove(self, container):
- """
- Removes element from scene: deletes layer + removes from Headline
- Args:
- container (dict): container to be removed - used to get layer_id
- """
- stub = self.get_stub()
-
- layer = container.pop("layer")
- stub.imprint(layer.id, {})
- stub.delete_layer(layer.id)
-
- def switch(self, container, context):
- self.update(container, context)
-
- def import_layer(self, file_name, layer_name, stub):
- return stub.import_smart_object(file_name, layer_name)
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_image_from_sequence.py b/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_image_from_sequence.py
deleted file mode 100644
index f69dce26f6..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_image_from_sequence.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import os
-
-import qargparse
-
-from ayon_photoshop import api as photoshop
-from ayon_photoshop.api import get_unique_layer_name
-
-
-class ImageFromSequenceLoader(photoshop.PhotoshopLoader):
- """ Load specific image from sequence
-
- Used only as quick load of reference file from a sequence.
-
- Plain ImageLoader picks first frame from sequence.
-
- Loads only existing files - currently not possible to limit loaders
- to single select - multiselect. If user selects multiple repres, list
- for all of them is provided, but selection is only single file.
- This loader will be triggered multiple times, but selected name will
- match only to proper path.
-
- Loader doesn't do containerization as there is currently no data model
- of 'frame of rendered files' (only rendered sequence), update would be
- difficult.
- """
-
- product_types = {"render"}
- representations = {"*"}
- options = []
-
- def load(self, context, name=None, namespace=None, data=None):
-
- path = self.filepath_from_context(context)
- if data.get("frame"):
- path = os.path.join(
- os.path.dirname(path), data["frame"]
- )
- if not os.path.exists(path):
- return
-
- stub = self.get_stub()
- layer_name = get_unique_layer_name(
- stub.get_layers(), context["folder"]["name"], name
- )
-
- with photoshop.maintained_selection():
- layer = stub.import_smart_object(path, layer_name)
-
- self[:] = [layer]
- namespace = namespace or layer_name
-
- return namespace
-
- @classmethod
- def get_options(cls, repre_contexts):
- """
- Returns list of files for selected 'repre_contexts'.
-
- It returns only files with same extension as in context as it is
- expected that context points to sequence of frames.
-
- Returns:
- (list) of qargparse.Choice
- """
- files = []
- for context in repre_contexts:
- fname = cls.filepath_from_context(context)
- _, file_extension = os.path.splitext(fname)
-
- for file_name in os.listdir(os.path.dirname(fname)):
- if not file_name.endswith(file_extension):
- continue
- files.append(file_name)
-
- # return selection only if there is something
- if not files or len(files) <= 1:
- return []
-
- return [
- qargparse.Choice(
- "frame",
- label="Select specific file",
- items=files,
- default=0,
- help="Which frame should be loaded?"
- )
- ]
-
- def update(self, container, context):
- """No update possible, not containerized."""
- pass
-
- def remove(self, container):
- """No update possible, not containerized."""
- pass
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_reference.py b/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_reference.py
deleted file mode 100644
index 21076f6a4f..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_reference.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import re
-
-from ayon_core.pipeline import get_representation_path
-from ayon_photoshop import api as photoshop
-from ayon_photoshop.api import get_unique_layer_name
-
-
-class ReferenceLoader(photoshop.PhotoshopLoader):
- """Load reference images
-
- Stores the imported asset in a container named after the asset.
-
- Inheriting from 'load_image' didn't work because of
- "Cannot write to closing transport", possible refactor.
- """
-
- product_types = {"image", "render"}
- representations = {"*"}
-
- def load(self, context, name=None, namespace=None, data=None):
- stub = self.get_stub()
- layer_name = get_unique_layer_name(
- stub.get_layers(), context["folder"]["name"], name
- )
- with photoshop.maintained_selection():
- path = self.filepath_from_context(context)
- layer = self.import_layer(path, layer_name, stub)
-
- self[:] = [layer]
- namespace = namespace or layer_name
-
- return photoshop.containerise(
- name,
- namespace,
- layer,
- context,
- self.__class__.__name__
- )
-
- def update(self, container, context):
- """ Switch asset or change version."""
- stub = self.get_stub()
- layer = container.pop("layer")
-
- folder_name = context["folder"]["name"]
- product_name = context["product"]["name"]
- repre_entity = context["representation"]
-
- namespace_from_container = re.sub(r'_\d{3}$', '',
- container["namespace"])
- layer_name = "{}_{}".format(folder_name, product_name)
- # switching assets
- if namespace_from_container != layer_name:
- layer_name = get_unique_layer_name(
- stub.get_layers(), folder_name, product_name
- )
- else: # switching version - keep same name
- layer_name = container["namespace"]
-
- path = get_representation_path(repre_entity)
- with photoshop.maintained_selection():
- stub.replace_smart_object(
- layer, path, layer_name
- )
-
- stub.imprint(
- layer.id, {"representation": repre_entity["id"]}
- )
-
- def remove(self, container):
- """Removes element from scene: deletes layer + removes from Headline
-
- Args:
- container (dict): container to be removed - used to get layer_id
- """
- stub = self.get_stub()
- layer = container.pop("layer")
- stub.imprint(layer.id, {})
- stub.delete_layer(layer.id)
-
- def switch(self, container, context):
- self.update(container, context)
-
- def import_layer(self, file_name, layer_name, stub):
- return stub.import_smart_object(
- file_name, layer_name, as_reference=True
- )
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/closePS.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/closePS.py
deleted file mode 100644
index 2cdc9fa1e8..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/closePS.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Close PS after publish. For Webpublishing only."""
-import pyblish.api
-
-from ayon_photoshop import api as photoshop
-
-
-class ClosePS(pyblish.api.ContextPlugin):
- """Close PS after publish. For Webpublishing only.
- """
-
- order = pyblish.api.IntegratorOrder + 14
- label = "Close PS"
- optional = True
- active = True
-
- hosts = ["photoshop"]
- targets = ["automated"]
-
- def process(self, context):
- self.log.info("ClosePS")
-
- stub = photoshop.stub()
- self.log.info("Shutting down PS")
- stub.save()
- stub.close()
- self.log.info("PS closed")
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_image.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_image.py
deleted file mode 100644
index 23a71bdf46..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_image.py
+++ /dev/null
@@ -1,106 +0,0 @@
-import pyblish.api
-
-from ayon_photoshop import api as photoshop
-from ayon_core.pipeline.create import get_product_name
-
-
-class CollectAutoImage(pyblish.api.ContextPlugin):
- """Creates auto image in non artist based publishes (Webpublisher).
- """
-
- label = "Collect Auto Image"
- hosts = ["photoshop"]
- order = pyblish.api.CollectorOrder + 0.2
-
- targets = ["automated"]
-
- def process(self, context):
- for instance in context:
- creator_identifier = instance.data.get("creator_identifier")
- if creator_identifier and creator_identifier == "auto_image":
- self.log.debug("Auto image instance found, won't create new")
- return
-
- project_name = context.data["projectName"]
- proj_settings = context.data["project_settings"]
- host_name = context.data["hostName"]
- folder_entity = context.data["folderEntity"]
- task_entity = context.data["taskEntity"]
- task_name = task_type = None
- if task_entity:
- task_name = task_entity["name"]
- task_type = task_entity["taskType"]
-
- auto_creator = proj_settings.get(
- "photoshop", {}).get(
- "create", {}).get(
- "AutoImageCreator", {})
-
- if not auto_creator or not auto_creator["enabled"]:
- self.log.debug("Auto image creator disabled, won't create new")
- return
-
- stub = photoshop.stub()
- stored_items = stub.get_layers_metadata()
- for item in stored_items:
- if item.get("creator_identifier") == "auto_image":
- if not item.get("active"):
- self.log.debug("Auto_image instance disabled")
- return
-
- layer_items = stub.get_layers()
-
- publishable_ids = [layer.id for layer in layer_items
- if layer.visible]
-
- # collect stored image instances
- instance_names = []
- for layer_item in layer_items:
- layer_meta_data = stub.read(layer_item, stored_items)
-
- # Skip layers without metadata.
- if layer_meta_data is None:
- continue
-
- # Skip containers.
- if "container" in layer_meta_data["id"]:
- continue
-
- # active might not be in legacy meta
- if layer_meta_data.get("active", True) and layer_item.visible:
- instance_names.append(layer_meta_data["productName"])
-
- if len(instance_names) == 0:
- variants = proj_settings.get(
- "photoshop", {}).get(
- "create", {}).get(
- "CreateImage", {}).get(
- "default_variants", [''])
- product_type = "image"
-
- variant = context.data.get("variant") or variants[0]
-
- product_name = get_product_name(
- project_name,
- task_name,
- task_type,
- host_name,
- product_type,
- variant,
- )
-
- instance = context.create_instance(product_name)
- instance.data["folderPath"] = folder_entity["path"]
- instance.data["productType"] = product_type
- instance.data["productName"] = product_name
- instance.data["ids"] = publishable_ids
- instance.data["publish"] = True
- instance.data["creator_identifier"] = "auto_image"
- instance.data["family"] = product_type
- instance.data["families"] = [product_type]
-
- if auto_creator["mark_for_review"]:
- instance.data["creator_attributes"] = {"mark_for_review": True}
- instance.data["families"].append("review")
-
- self.log.info("auto image instance: {} ".format(instance.data))
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_image_refresh.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_image_refresh.py
deleted file mode 100644
index 108b65232a..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_image_refresh.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import pyblish.api
-
-from ayon_photoshop import api as photoshop
-
-
-class CollectAutoImageRefresh(pyblish.api.ContextPlugin):
- """Refreshes auto_image instance with currently visible layers..
- """
-
- label = "Collect Auto Image Refresh"
- hosts = ["photoshop"]
- order = pyblish.api.CollectorOrder + 0.2
-
- def process(self, context):
- for instance in context:
- creator_identifier = instance.data.get("creator_identifier")
- if creator_identifier and creator_identifier == "auto_image":
- self.log.debug("Auto image instance found, won't create new")
- # refresh existing auto image instance with current visible
- publishable_ids = [layer.id for layer in photoshop.stub().get_layers() # noqa
- if layer.visible]
- instance.data["ids"] = publishable_ids
- return
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_review.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_review.py
deleted file mode 100644
index 8b84e69309..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_review.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-Requires:
- None
-
-Provides:
- instance -> productType ("review")
-"""
-import pyblish.api
-
-from ayon_photoshop import api as photoshop
-from ayon_core.pipeline.create import get_product_name
-
-
-class CollectAutoReview(pyblish.api.ContextPlugin):
- """Create review instance in non artist based workflow.
-
- Called only if PS is triggered in Webpublisher or in tests.
- """
-
- label = "Collect Auto Review"
- hosts = ["photoshop"]
- order = pyblish.api.CollectorOrder + 0.2
- targets = ["automated"]
-
- publish = True
-
- def process(self, context):
- product_type = "review"
- has_review = False
- for instance in context:
- if instance.data["productType"] == product_type:
- self.log.debug("Review instance found, won't create new")
- has_review = True
-
- creator_attributes = instance.data.get("creator_attributes", {})
- if (creator_attributes.get("mark_for_review") and
- "review" not in instance.data["families"]):
- instance.data["families"].append("review")
-
- if has_review:
- return
-
- stub = photoshop.stub()
- stored_items = stub.get_layers_metadata()
- for item in stored_items:
- if item.get("creator_identifier") == product_type:
- if not item.get("active"):
- self.log.debug("Review instance disabled")
- return
-
- auto_creator = context.data["project_settings"].get(
- "photoshop", {}).get(
- "create", {}).get(
- "ReviewCreator", {})
-
- if not auto_creator or not auto_creator["enabled"]:
- self.log.debug("Review creator disabled, won't create new")
- return
-
- variant = (context.data.get("variant") or
- auto_creator["default_variant"])
-
- project_name = context.data["projectName"]
- proj_settings = context.data["project_settings"]
- host_name = context.data["hostName"]
- folder_entity = context.data["folderEntity"]
- task_entity = context.data["taskEntity"]
- task_name = task_type = None
- if task_entity:
- task_name = task_entity["name"]
- task_type = task_entity["taskType"]
-
- product_name = get_product_name(
- project_name,
- task_name,
- task_type,
- host_name,
- product_type,
- variant,
- project_settings=proj_settings
- )
-
- instance = context.create_instance(product_name)
- instance.data.update({
- "label": product_name,
- "name": product_name,
- "productName": product_name,
- "productType": product_type,
- "family": product_type,
- "families": [product_type],
- "representations": [],
- "folderPath": folder_entity["path"],
- "publish": self.publish
- })
-
- self.log.debug("auto review created::{}".format(instance.data))
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_workfile.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_workfile.py
deleted file mode 100644
index 1bf7c1a600..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_workfile.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import os
-import pyblish.api
-
-from ayon_photoshop import api as photoshop
-from ayon_core.pipeline.create import get_product_name
-
-
-class CollectAutoWorkfile(pyblish.api.ContextPlugin):
- """Collect current script for publish."""
-
- order = pyblish.api.CollectorOrder + 0.2
- label = "Collect Workfile"
- hosts = ["photoshop"]
-
- targets = ["automated"]
-
- def process(self, context):
- product_type = "workfile"
- file_path = context.data["currentFile"]
- _, ext = os.path.splitext(file_path)
- staging_dir = os.path.dirname(file_path)
- base_name = os.path.basename(file_path)
- workfile_representation = {
- "name": ext[1:],
- "ext": ext[1:],
- "files": base_name,
- "stagingDir": staging_dir,
- }
-
- for instance in context:
- if instance.data["productType"] == product_type:
- self.log.debug("Workfile instance found, won't create new")
- instance.data.update({
- "label": base_name,
- "name": base_name,
- "representations": [],
- })
-
- # creating representation
- _, ext = os.path.splitext(file_path)
- instance.data["representations"].append(
- workfile_representation)
-
- return
-
- stub = photoshop.stub()
- stored_items = stub.get_layers_metadata()
- for item in stored_items:
- if item.get("creator_identifier") == product_type:
- if not item.get("active"):
- self.log.debug("Workfile instance disabled")
- return
-
- project_name = context.data["projectName"]
- proj_settings = context.data["project_settings"]
- auto_creator = proj_settings.get(
- "photoshop", {}).get(
- "create", {}).get(
- "WorkfileCreator", {})
-
- if not auto_creator or not auto_creator["enabled"]:
- self.log.debug("Workfile creator disabled, won't create new")
- return
-
- # context.data["variant"] might come only from collect_batch_data
- variant = (context.data.get("variant") or
- auto_creator["default_variant"])
-
- task_name = context.data["task"]
- host_name = context.data["hostName"]
- folder_entity = context.data["folderEntity"]
- task_entity = context.data["taskEntity"]
- task_name = task_type = None
- if task_entity:
- task_name = task_entity["name"]
- task_type = task_entity["taskType"]
-
- product_name = get_product_name(
- project_name,
- task_name,
- task_type,
- host_name,
- product_type,
- variant,
- project_settings=proj_settings
- )
-
- # Create instance
- instance = context.create_instance(product_name)
- instance.data.update({
- "label": base_name,
- "name": base_name,
- "productName": product_name,
- "productType": product_type,
- "family": product_type,
- "families": [product_type],
- "representations": [],
- "folderPath": folder_entity["path"]
- })
-
- # creating representation
- instance.data["representations"].append(workfile_representation)
-
- self.log.debug("auto workfile review created:{}".format(instance.data))
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_batch_data.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_batch_data.py
deleted file mode 100644
index 527a7d516a..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_batch_data.py
+++ /dev/null
@@ -1,78 +0,0 @@
-"""Parses batch context from json and continues in publish process.
-
-Provides:
- context -> Loaded batch file.
- - folderPath
- - task (task name)
- - taskType
- - project_name
- - variant
-
-Code is practically copy of `openype/hosts/webpublish/collect_batch_data` as
-webpublisher should be eventually ejected as an addon, eg. mentioned plugin
-shouldn't be pushed into general publish plugins.
-"""
-
-import os
-
-import pyblish.api
-
-from ayon_webpublisher.lib import (
- get_batch_context_info,
- parse_json
-)
-from ayon_core.lib import is_in_tests
-
-
-class CollectBatchData(pyblish.api.ContextPlugin):
- """Collect batch data from json stored in 'AYON_PUBLISH_DATA' env dir.
-
- The directory must contain 'manifest.json' file where batch data should be
- stored.
- """
- # must be really early, context values are only in json file
- order = pyblish.api.CollectorOrder - 0.495
- label = "Collect batch data"
- hosts = ["photoshop"]
- targets = ["webpublish"]
-
- def process(self, context):
- self.log.info("CollectBatchData")
- batch_dir = (
- os.environ.get("AYON_PUBLISH_DATA")
- or os.environ.get("OPENPYPE_PUBLISH_DATA")
- )
- if is_in_tests():
- self.log.debug("Automatic testing, no batch data, skipping")
- return
-
- assert batch_dir, (
- "Missing `AYON_PUBLISH_DATA`")
-
- assert os.path.exists(batch_dir), \
- "Folder {} doesn't exist".format(batch_dir)
-
- project_name = os.environ.get("AYON_PROJECT_NAME")
- if project_name is None:
- raise AssertionError(
- "Environment `AYON_PROJECT_NAME` was not found."
- "Could not set project `root` which may cause issues."
- )
-
- batch_data = parse_json(os.path.join(batch_dir, "manifest.json"))
-
- context.data["batchDir"] = batch_dir
- context.data["batchData"] = batch_data
-
- folder_path, task_name, task_type = get_batch_context_info(
- batch_data["context"]
- )
-
- os.environ["AYON_FOLDER_PATH"] = folder_path
- os.environ["AYON_TASK_NAME"] = task_name
-
- context.data["folderPath"] = folder_path
- context.data["task"] = task_name
- context.data["taskType"] = task_type
- context.data["project_name"] = project_name
- context.data["variant"] = batch_data["variant"]
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_color_coded_instances.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_color_coded_instances.py
deleted file mode 100644
index 072eb82179..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_color_coded_instances.py
+++ /dev/null
@@ -1,269 +0,0 @@
-import os
-import re
-
-import pyblish.api
-
-from ayon_core.lib import prepare_template_data, is_in_tests
-from ayon_core.settings import get_project_settings
-from ayon_photoshop import api as photoshop
-
-
-class CollectColorCodedInstances(pyblish.api.ContextPlugin):
- """Creates instances for layers marked by configurable color.
-
- Used in remote publishing when artists marks publishable layers by color-
- coding. Top level layers (group) must be marked by specific color to be
- published as an instance of 'image' product type.
-
- Can add group for all publishable layers to allow creation of flattened
- image. (Cannot contain special background layer as it cannot be grouped!)
-
- Based on value `create_flatten_image` from Settings:
- - "yes": create flattened 'image' product of all publishable layers + create
- 'image' product per publishable layer
- - "only": create ONLY flattened 'image' product of all publishable layers
- - "no": do not create flattened 'image' product at all,
- only separate products per marked layer.
-
- Identifier:
- id (str): "ayon.create.instance"
- """
-
- label = "Collect Color-coded Instances"
- order = pyblish.api.CollectorOrder
- hosts = ["photoshop"]
- targets = ["automated"]
- settings_category = "photoshop"
-
- # configurable by Settings
- color_code_mapping = []
- create_flatten_image = "no"
- flatten_product_name_template = ""
-
- def process(self, context):
- self.log.info("CollectColorCodedInstances")
- batch_dir = (
- os.environ.get("AYON_PUBLISH_DATA")
- or os.environ.get("OPENPYPE_PUBLISH_DATA")
- )
- if (
- is_in_tests()
- and (
- not batch_dir or not os.path.exists(batch_dir)
- )
- ):
- self.log.debug("Automatic testing, no batch data, skipping")
- return
-
- existing_product_names = self._get_existing_product_names(context)
-
- # from CollectBatchData
- folder_path = context.data["folderPath"]
- task_name = context.data["task"]
- variant = context.data["variant"]
- project_name = context.data["projectEntity"]["name"]
-
- naming_conventions = get_project_settings(project_name).get(
- "photoshop", {}).get(
- "publish", {}).get(
- "ValidateNaming", {})
-
- stub = photoshop.stub()
- layers = stub.get_layers()
-
- publishable_layers = []
- created_instances = []
- product_type_from_settings = None
- for layer in layers:
- self.log.debug("Layer:: {}".format(layer))
- if layer.parents:
- self.log.debug("!!! Not a top layer, skip")
- continue
-
- if not layer.visible:
- self.log.debug("Not visible, skip")
- continue
-
- resolved_product_type, resolved_product_template = (
- self._resolve_mapping(layer)
- )
-
- if not resolved_product_template or not resolved_product_type:
- self.log.debug("!!! Not found product type or template, skip")
- continue
-
- if not product_type_from_settings:
- product_type_from_settings = resolved_product_type
-
- fill_pairs = {
- "variant": variant,
- "family": resolved_product_type,
- "product": {"type": resolved_product_type},
- "task": task_name,
- "layer": layer.clean_name
- }
-
- product_name = resolved_product_template.format(
- **prepare_template_data(fill_pairs))
-
- product_name = self._clean_product_name(
- stub, naming_conventions, product_name, layer
- )
-
- if product_name in existing_product_names:
- self.log.info((
- "Product {} already created, skipping."
- ).format(product_name))
- continue
-
- if self.create_flatten_image != "flatten_only":
- instance = self._create_instance(
- context,
- layer,
- resolved_product_type,
- folder_path,
- product_name,
- task_name
- )
- created_instances.append(instance)
-
- existing_product_names.append(product_name)
- publishable_layers.append(layer)
-
- if self.create_flatten_image != "no" and publishable_layers:
- self.log.debug("create_flatten_image")
- if not self.flatten_product_name_template:
- self.log.warning("No template for flatten image")
- return
-
- fill_pairs.pop("layer")
- product_name = self.flatten_product_name_template.format(
- **prepare_template_data(fill_pairs))
-
- first_layer = publishable_layers[0] # dummy layer
- first_layer.name = product_name
- product_type = product_type_from_settings # inherit product type
- instance = self._create_instance(
- context,
- first_layer,
- product_type,
- folder_path,
- product_name,
- task_name
- )
- instance.data["ids"] = [layer.id for layer in publishable_layers]
- created_instances.append(instance)
-
- for instance in created_instances:
- # Produce diagnostic message for any graphical
- # user interface interested in visualising it.
- self.log.info("Found: \"%s\" " % instance.data["name"])
- self.log.info("instance: {} ".format(instance.data))
-
- def _get_existing_product_names(self, context):
- """Collect manually created instances from workfile.
-
- Shouldn't be any as Webpublisher bypass publishing via Openpype, but
- might be some if workfile published through OP is reused.
- """
- existing_product_names = []
- for instance in context:
- if instance.data.get("publish") is not False:
- existing_product_names.append(instance.data.get("productName"))
-
- return existing_product_names
-
- def _create_instance(
- self,
- context,
- layer,
- product_type,
- folder_path,
- product_name,
- task_name
- ):
- instance = context.create_instance(layer.name)
- instance.data["publish"] = True
- instance.data["productType"] = product_type
- instance.data["productName"] = product_name
- instance.data["folderPath"] = folder_path
- instance.data["task"] = task_name
- instance.data["layer"] = layer
- instance.data["family"] = product_type
- instance.data["families"] = [product_type]
-
- return instance
-
- def _resolve_mapping(self, layer):
- """Matches 'layer' color code and name to mapping.
-
- If both color code AND name regex is configured, BOTH must be valid
- If layer matches to multiple mappings, only first is used!
- """
- product_type_list = []
- product_name_list = []
- for mapping in self.color_code_mapping:
- if (
- mapping["color_code"]
- and layer.color_code not in mapping["color_code"]
- ):
- continue
-
- if (
- mapping["layer_name_regex"]
- and not any(
- re.search(pattern, layer.name)
- for pattern in mapping["layer_name_regex"]
- )
- ):
- continue
-
- product_type_list.append(mapping["product_type"])
- product_name_list.append(mapping["product_name_template"])
-
- if len(product_name_list) > 1:
- self.log.warning(
- "Multiple mappings found for '{}'".format(layer.name)
- )
- self.log.warning("Only first product name template used!")
- product_name_list[:] = product_name_list[0]
-
- if len(product_type_list) > 1:
- self.log.warning(
- "Multiple mappings found for '{}'".format(layer.name)
- )
- self.log.warning("Only first product type used!")
- product_type_list[:] = product_type_list[0]
-
- resolved_product_template = None
- if product_name_list:
- resolved_product_template = product_name_list.pop()
-
- product_type = None
- if product_type_list:
- product_type = product_type_list.pop()
-
- self.log.debug("resolved_product_type {}".format(product_type))
- self.log.debug("resolved_product_template {}".format(
- resolved_product_template))
- return product_type, resolved_product_template
-
- def _clean_product_name(
- self, stub, naming_conventions, product_name, layer
- ):
- """Cleans invalid characters from product name and layer name."""
- if re.search(naming_conventions["invalid_chars"], product_name):
- product_name = re.sub(
- naming_conventions["invalid_chars"],
- naming_conventions["replace_char"],
- product_name
- )
- layer_name = re.sub(
- naming_conventions["invalid_chars"],
- naming_conventions["replace_char"],
- layer.clean_name
- )
- layer.name = layer_name
- stub.rename_layer(layer.id, layer_name)
-
- return product_name
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_current_file.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_current_file.py
deleted file mode 100644
index 02f2217f75..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_current_file.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import os
-
-import pyblish.api
-
-from ayon_photoshop import api as photoshop
-
-
-class CollectCurrentFile(pyblish.api.ContextPlugin):
- """Inject the current working file into context"""
-
- order = pyblish.api.CollectorOrder - 0.49
- label = "Current File"
- hosts = ["photoshop"]
-
- def process(self, context):
- context.data["currentFile"] = os.path.normpath(
- photoshop.stub().get_active_document_full_name()
- ).replace("\\", "/")
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_extension_version.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_extension_version.py
deleted file mode 100644
index 90415e9245..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_extension_version.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import os
-import re
-import pyblish.api
-
-from ayon_photoshop import api as photoshop
-
-
-class CollectExtensionVersion(pyblish.api.ContextPlugin):
- """ Pulls and compares version of installed extension.
-
- It is recommended to use same extension as in provided Openpype code.
-
- Please use Anastasiy’s Extension Manager or ZXPInstaller to update
- extension in case of an error.
-
- You can locate extension.zxp in your installed Openpype code in
- `repos/avalon-core/avalon/photoshop`
- """
- # This technically should be a validator, but other collectors might be
- # impacted with usage of obsolete extension, so collector that runs first
- # was chosen
- order = pyblish.api.CollectorOrder - 0.5
- label = "Collect extension version"
- hosts = ["photoshop"]
-
- optional = True
- active = True
-
- def process(self, context):
- installed_version = photoshop.stub().get_extension_version()
-
- if not installed_version:
- raise ValueError("Unknown version, probably old extension")
-
- manifest_url = os.path.join(os.path.dirname(photoshop.__file__),
- "extension", "CSXS", "manifest.xml")
-
- if not os.path.exists(manifest_url):
- self.log.debug("Unable to locate extension manifest, not checking")
- return
-
- expected_version = None
- with open(manifest_url) as fp:
- content = fp.read()
-
- found = re.findall(r'(ExtensionBundleVersion=")([0-9\.]+)(")',
- content)
- if found:
- expected_version = found[0][1]
-
- if expected_version != installed_version:
- msg = "Expected version '{}' found '{}'\n".format(
- expected_version, installed_version)
- msg += "Please update your installed extension, it might not work "
- msg += "properly."
-
- raise ValueError(msg)
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_image.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_image.py
deleted file mode 100644
index ed6af6f7d3..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_image.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import pyblish.api
-
-from ayon_photoshop import api
-
-
-class CollectImage(pyblish.api.InstancePlugin):
- """Collect layer metadata into a instance.
-
- Used later in validation
- """
- order = pyblish.api.CollectorOrder + 0.200
- label = 'Collect Image'
-
- hosts = ["photoshop"]
- families = ["image"]
-
- def process(self, instance):
- if instance.data.get("members"):
- layer = api.stub().get_layer(instance.data["members"][0])
- instance.data["layer"] = layer
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_published_version.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_published_version.py
deleted file mode 100644
index 84c9fa3e62..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_published_version.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""Collects published version of workfile and increments it.
-
-For synchronization of published image and workfile version it is required
-to store workfile version from workfile file name in context.data["version"].
-In remote publishing this name is unreliable (artist might not follow naming
-convention etc.), last published workfile version for particular workfile
-product is used instead.
-
-This plugin runs only in remote publishing (eg. Webpublisher).
-
-Requires:
- context.data["folderEntity"]
-
-Provides:
- context["version"] - incremented latest published workfile version
-"""
-
-import pyblish.api
-import ayon_api
-
-from ayon_core.pipeline.version_start import get_versioning_start
-
-
-class CollectPublishedVersion(pyblish.api.ContextPlugin):
- """Collects published version of workfile and increments it."""
-
- order = pyblish.api.CollectorOrder + 0.190
- label = "Collect published version"
- hosts = ["photoshop"]
- targets = ["automated"]
-
- def process(self, context):
- workfile_product_name = None
- for instance in context:
- if instance.data["productType"] == "workfile":
- workfile_product_name = instance.data["productName"]
- break
-
- if not workfile_product_name:
- self.log.warning("No workfile instance found, "
- "synchronization of version will not work.")
- return
-
- project_name = context.data["projectName"]
- folder_id = context.data["folderEntity"]["id"]
-
- version_entity = ayon_api.get_last_version_by_product_name(
- project_name, workfile_product_name, folder_id
- )
-
- if version_entity:
- version_int = int(version_entity["version"]) + 1
- else:
- version_int = get_versioning_start(
- project_name,
- "photoshop",
- task_name=context.data["task"],
- task_type=context.data["taskType"],
- project_settings=context.data["project_settings"]
- )
-
- self.log.debug(f"Setting {version_int} to context.")
- context.data["version"] = version_int
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_review.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_review.py
deleted file mode 100644
index d9a29f9b74..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_review.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-Requires:
- None
-
-Provides:
- instance -> family ("review")
-"""
-
-import pyblish.api
-
-
-class CollectReview(pyblish.api.ContextPlugin):
- """Adds review to families for instances marked to be reviewable.
- """
-
- label = "Collect Review"
- hosts = ["photoshop"]
- order = pyblish.api.CollectorOrder + 0.1
- settings_category = "photoshop"
-
- def process(self, context):
- for instance in context:
- creator_attributes = instance.data["creator_attributes"]
- if (creator_attributes.get("mark_for_review") and
- "review" not in instance.data["families"]):
- instance.data["families"].append("review")
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_version.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_version.py
deleted file mode 100644
index bc9f05ab50..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_version.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import pyblish.api
-
-
-class CollectVersion(pyblish.api.InstancePlugin):
- """Collect version for publishable instances.
-
- Used to synchronize version from workfile to all publishable instances:
- - image (manually created or color coded)
- - review
- - workfile
-
- Dev comment:
- Explicit collector created to control this from single place and not from
- 3 different.
-
- Workfile set here explicitly as version might to be forced from latest + 1
- because of Webpublisher.
- (This plugin must run after CollectPublishedVersion!)
- """
- order = pyblish.api.CollectorOrder + 0.200
- label = 'Collect Version'
-
- hosts = ["photoshop"]
- families = ["image", "review", "workfile"]
- settings_category = "photoshop"
-
- def process(self, instance):
- workfile_version = instance.context.data["version"]
- self.log.debug(f"Applying version {workfile_version}")
- instance.data["version"] = workfile_version
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_workfile.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_workfile.py
deleted file mode 100644
index b9080a12ff..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_workfile.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import os
-import pyblish.api
-
-
-class CollectWorkfile(pyblish.api.ContextPlugin):
- """Collect current script for publish."""
-
- order = pyblish.api.CollectorOrder + 0.1
- label = "Collect Workfile"
- hosts = ["photoshop"]
-
- default_variant = "Main"
-
- def process(self, context):
- for instance in context:
- if instance.data["productType"] == "workfile":
- file_path = context.data["currentFile"]
- _, ext = os.path.splitext(file_path)
- staging_dir = os.path.dirname(file_path)
- base_name = os.path.basename(file_path)
-
- # creating representation
- _, ext = os.path.splitext(file_path)
- instance.data["representations"].append({
- "name": ext[1:],
- "ext": ext[1:],
- "files": base_name,
- "stagingDir": staging_dir,
- })
- return
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_image.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_image.py
deleted file mode 100644
index 33599d37bb..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_image.py
+++ /dev/null
@@ -1,102 +0,0 @@
-import os
-
-import pyblish.api
-from ayon_core.pipeline import publish
-from ayon_photoshop import api as photoshop
-
-
-class ExtractImage(pyblish.api.ContextPlugin):
- """Extract all layers (groups) marked for publish.
-
- Usually publishable instance is created as a wrapper of layer(s). For each
- publishable instance so many images as there is 'formats' is created.
-
- Logic tries to hide/unhide layers minimum times.
-
- Called once for all publishable instances.
- """
-
- order = publish.Extractor.order - 0.48
- label = "Extract Image"
- hosts = ["photoshop"]
-
- families = ["image", "background"]
- formats = ["png", "jpg"]
- settings_category = "photoshop"
-
- def process(self, context):
- stub = photoshop.stub()
- hidden_layer_ids = set()
-
- all_layers = stub.get_layers()
- for layer in all_layers:
- if not layer.visible:
- hidden_layer_ids.add(layer.id)
- stub.hide_all_others_layers_ids([], layers=all_layers)
-
- with photoshop.maintained_selection():
- with photoshop.maintained_visibility(layers=all_layers):
- for instance in context:
- if instance.data["productType"] not in self.families:
- continue
-
- staging_dir = self.staging_dir(instance)
- self.log.info("Outputting image to {}".format(staging_dir))
-
- # Perform extraction
- files = {}
- ids = set()
- # real layers and groups
- members = instance.data("members")
- if members:
- ids.update(set([int(member) for member in members]))
- # virtual groups collected by color coding or auto_image
- add_ids = instance.data.pop("ids", None)
- if add_ids:
- ids.update(set(add_ids))
- extract_ids = set([ll.id for ll in stub.
- get_layers_in_layers_ids(ids, all_layers)
- if ll.id not in hidden_layer_ids])
-
- for extracted_id in extract_ids:
- stub.set_visible(extracted_id, True)
-
- file_basename = os.path.splitext(
- stub.get_active_document_name()
- )[0]
- for extension in self.formats:
- _filename = "{}.{}".format(file_basename,
- extension)
- files[extension] = _filename
-
- full_filename = os.path.join(staging_dir,
- _filename)
- stub.saveAs(full_filename, extension, True)
- self.log.info(f"Extracted: {extension}")
-
- representations = []
- for extension, filename in files.items():
- representations.append({
- "name": extension,
- "ext": extension,
- "files": filename,
- "stagingDir": staging_dir
- })
- instance.data["representations"] = representations
- instance.data["stagingDir"] = staging_dir
-
- self.log.info(f"Extracted {instance} to {staging_dir}")
-
- for extracted_id in extract_ids:
- stub.set_visible(extracted_id, False)
-
- def staging_dir(self, instance):
- """Provide a temporary directory in which to store extracted files
-
- Upon calling this method the staging directory is stored inside
- the instance.data['stagingDir']
- """
-
- from ayon_core.pipeline.publish import get_instance_staging_dir
-
- return get_instance_staging_dir(instance)
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_review.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_review.py
deleted file mode 100644
index 0f36d31648..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_review.py
+++ /dev/null
@@ -1,329 +0,0 @@
-import os
-import shutil
-from PIL import Image
-
-from ayon_core.lib import (
- run_subprocess,
- get_ffmpeg_tool_args,
-)
-from ayon_core.pipeline import publish
-from ayon_photoshop import api as photoshop
-
-
-class ExtractReview(publish.Extractor):
- """
- Produce a flattened or sequence image files from all 'image' instances.
-
- If no 'image' instance is created, it produces flattened image from
- all visible layers.
-
- It creates review, thumbnail and mov representations.
-
- 'review' family could be used in other steps as a reference, as it
- contains flattened image by default. (Eg. artist could load this
- review as a single item and see full image. In most cases 'image'
- product type is separated by layers to better usage in animation
- or comp.)
- """
-
- label = "Extract Review"
- hosts = ["photoshop"]
- families = ["review"]
- settings_category = "photoshop"
-
- # Extract Options
- jpg_options = None
- mov_options = None
- make_image_sequence = None
- max_downscale_size = 8192
-
- def process(self, instance):
- staging_dir = self.staging_dir(instance)
- self.log.info("Outputting image to {}".format(staging_dir))
-
- fps = instance.data.get("fps", 25)
- stub = photoshop.stub()
- self.output_seq_filename = os.path.splitext(
- stub.get_active_document_name())[0] + ".%04d.jpg"
-
- layers = self._get_layers_from_image_instances(instance)
- self.log.info("Layers image instance found: {}".format(layers))
-
- repre_name = "jpg"
- repre_skeleton = {
- "name": repre_name,
- "ext": "jpg",
- "stagingDir": staging_dir,
- "tags": self.jpg_options['tags'],
- }
-
- if instance.data["productType"] != "review":
- self.log.debug(
- "Existing extracted file from image product type used."
- )
- # enable creation of review, without this jpg review would clash
- # with jpg of the image product type
- output_name = repre_name
- repre_name = "{}_{}".format(repre_name, output_name)
- repre_skeleton.update({"name": repre_name,
- "outputName": output_name})
-
- img_file = self.output_seq_filename % 0
- self._prepare_file_for_image_product_type(
- img_file, instance, staging_dir
- )
- repre_skeleton.update({
- "files": img_file,
- })
- processed_img_names = [img_file]
- elif self.make_image_sequence and len(layers) > 1:
- self.log.debug("Extract layers to image sequence.")
- img_list = self._save_sequence_images(staging_dir, layers)
-
- repre_skeleton.update({
- "frameStart": 0,
- "frameEnd": len(img_list),
- "fps": fps,
- "files": img_list,
- })
- processed_img_names = img_list
- else:
- self.log.debug("Extract layers to flatten image.")
- img_file = self._save_flatten_image(staging_dir, layers)
-
- repre_skeleton.update({
- "files": img_file,
- })
- processed_img_names = [img_file]
-
- instance.data["representations"].append(repre_skeleton)
-
- ffmpeg_args = get_ffmpeg_tool_args("ffmpeg")
-
- instance.data["stagingDir"] = staging_dir
-
- source_files_pattern = os.path.join(staging_dir,
- self.output_seq_filename)
- source_files_pattern = self._check_and_resize(processed_img_names,
- source_files_pattern,
- staging_dir)
- self._generate_thumbnail(
- list(ffmpeg_args),
- instance,
- source_files_pattern,
- staging_dir)
-
- no_of_frames = len(processed_img_names)
- if no_of_frames > 1:
- self._generate_mov(
- list(ffmpeg_args),
- instance,
- fps,
- no_of_frames,
- source_files_pattern,
- staging_dir)
-
- self.log.info(f"Extracted {instance} to {staging_dir}")
-
- def _prepare_file_for_image_product_type(
- self, img_file, instance, staging_dir
- ):
- """Converts existing file for image product type to .jpg
-
- Image instance could have its own separate review (instance per layer
- for example). This uses extracted file instead of extracting again.
- Args:
- img_file (str): name of output file (with 0000 value for ffmpeg
- later)
- instance:
- staging_dir (str): temporary folder where extracted file is located
- """
- repre_file = instance.data["representations"][0]
- source_file_path = os.path.join(repre_file["stagingDir"],
- repre_file["files"])
- if not os.path.exists(source_file_path):
- raise RuntimeError(f"{source_file_path} doesn't exist for "
- "review to create from")
- _, ext = os.path.splitext(repre_file["files"])
- if ext != ".jpg":
- im = Image.open(source_file_path)
- if (im.mode in ('RGBA', 'LA') or (
- im.mode == 'P' and 'transparency' in im.info)):
- # without this it produces messy low quality jpg
- rgb_im = Image.new("RGBA", (im.width, im.height), "#ffffff")
- rgb_im.alpha_composite(im)
- rgb_im.convert("RGB").save(os.path.join(staging_dir, img_file))
- else:
- im.save(os.path.join(staging_dir, img_file))
- else:
- # handles already .jpg
- shutil.copy(source_file_path,
- os.path.join(staging_dir, img_file))
-
- def _generate_mov(self, ffmpeg_path, instance, fps, no_of_frames,
- source_files_pattern, staging_dir):
- """Generates .mov to upload to Ftrack.
-
- Args:
- ffmpeg_path (str): path to ffmpeg
- instance (Pyblish Instance)
- fps (str)
- no_of_frames (int):
- source_files_pattern (str): name of source file
- staging_dir (str): temporary location to store thumbnail
- Updates:
- instance - adds representation portion
- """
- # Generate mov.
- mov_path = os.path.join(staging_dir, "review.mov")
- self.log.info(f"Generate mov review: {mov_path}")
- args = ffmpeg_path + [
- "-y",
- "-i", source_files_pattern,
- "-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2",
- "-vframes", str(no_of_frames),
- mov_path
- ]
- self.log.debug("mov args:: {}".format(args))
- _output = run_subprocess(args)
- instance.data["representations"].append({
- "name": "mov",
- "ext": "mov",
- "files": os.path.basename(mov_path),
- "stagingDir": staging_dir,
- "frameStart": 1,
- "frameEnd": no_of_frames,
- "fps": fps,
- "tags": self.mov_options['tags']
- })
-
- def _generate_thumbnail(
- self, ffmpeg_args, instance, source_files_pattern, staging_dir
- ):
- """Generates scaled down thumbnail and adds it as representation.
-
- Args:
- ffmpeg_path (str): path to ffmpeg
- instance (Pyblish Instance)
- source_files_pattern (str): name of source file
- staging_dir (str): temporary location to store thumbnail
- Updates:
- instance - adds representation portion
- """
- # Generate thumbnail
- thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg")
- self.log.info(f"Generate thumbnail {thumbnail_path}")
- args = ffmpeg_args + [
- "-y",
- "-i", source_files_pattern,
- "-vf", "scale=300:-1",
- "-vframes", "1",
- thumbnail_path
- ]
- self.log.debug("thumbnail args:: {}".format(args))
- _output = run_subprocess(args)
- instance.data["representations"].append({
- "name": "thumbnail",
- "ext": "jpg",
- "outputName": "thumb",
- "files": os.path.basename(thumbnail_path),
- "stagingDir": staging_dir,
- "tags": ["thumbnail", "delete"]
- })
- instance.data["thumbnailPath"] = thumbnail_path
-
- def _check_and_resize(self, processed_img_names, source_files_pattern,
- staging_dir):
- """Check if saved image could be used in ffmpeg.
-
- Ffmpeg has max size 16384x16384. Saved image(s) must be resized to be
- used as a source for thumbnail or review mov.
- """
- Image.MAX_IMAGE_PIXELS = None
- first_url = os.path.join(staging_dir, processed_img_names[0])
- with Image.open(first_url) as im:
- width, height = im.size
-
- if width > self.max_downscale_size or height > self.max_downscale_size:
- resized_dir = os.path.join(staging_dir, "resized")
- os.mkdir(resized_dir)
- source_files_pattern = os.path.join(resized_dir,
- self.output_seq_filename)
- for file_name in processed_img_names:
- source_url = os.path.join(staging_dir, file_name)
- with Image.open(source_url) as res_img:
- # 'thumbnail' automatically keeps aspect ratio
- res_img.thumbnail((self.max_downscale_size,
- self.max_downscale_size),
- Image.ANTIALIAS)
- res_img.save(os.path.join(resized_dir, file_name))
-
- return source_files_pattern
-
- def _get_layers_from_image_instances(self, instance):
- """Collect all layers from 'instance'.
-
- Returns:
- (list) of PSItem
- """
- layers = []
- # creating review for existing 'image' instance
- if (
- instance.data["productType"] == "image"
- and instance.data.get("layer")
- ):
- layers.append(instance.data["layer"])
- return layers
-
- for image_instance in instance.context:
- if image_instance.data["productType"] != "image":
- continue
- if not image_instance.data.get("layer"):
- # dummy instance for flatten image
- continue
- layers.append(image_instance.data.get("layer"))
-
- return sorted(layers)
-
- def _save_flatten_image(self, staging_dir, layers):
- """Creates flat image from 'layers' into 'staging_dir'.
-
- Returns:
- (str): path to new image
- """
- img_filename = self.output_seq_filename % 0
- output_image_path = os.path.join(staging_dir, img_filename)
- stub = photoshop.stub()
-
- with photoshop.maintained_visibility():
- self.log.info("Extracting {}".format(layers))
- if layers:
- stub.hide_all_others_layers(layers)
-
- stub.saveAs(output_image_path, 'jpg', True)
-
- return img_filename
-
- def _save_sequence_images(self, staging_dir, layers):
- """Creates separate flat images from 'layers' into 'staging_dir'.
-
- Used as source for multi frames .mov to review at once.
- Returns:
- (list): paths to new images
- """
- stub = photoshop.stub()
-
- list_img_filename = []
- with photoshop.maintained_visibility():
- for i, layer in enumerate(layers):
- self.log.info("Extracting {}".format(layer))
-
- img_filename = self.output_seq_filename % i
- output_image_path = os.path.join(staging_dir, img_filename)
- list_img_filename.append(img_filename)
-
- with photoshop.maintained_visibility():
- stub.hide_all_others_layers([layer])
- stub.saveAs(output_image_path, 'jpg', True)
-
- return list_img_filename
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_save_scene.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_save_scene.py
deleted file mode 100644
index 22ebbb739d..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_save_scene.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from ayon_core.pipeline import publish
-from ayon_photoshop import api as photoshop
-
-
-class ExtractSaveScene(publish.Extractor):
- """Save scene before extraction."""
-
- order = publish.Extractor.order - 0.49
- label = "Extract Save Scene"
- hosts = ["photoshop"]
- families = ["workfile"]
-
- def process(self, instance):
- photoshop.stub().save()
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/help/validate_instance_asset.xml b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/help/validate_instance_asset.xml
deleted file mode 100644
index c033f922c6..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/help/validate_instance_asset.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-
-Folder does not match
-
-## Collected folder path is not same as in context
-
- {msg}
-### How to repair?
- {repair_msg}
- Refresh Publish afterwards (circle arrow at the bottom right).
-
- If that's not correct value, close workfile and reopen via Workfiles to get
- proper context folder path OR disable this validator and publish again
- if you are publishing to different context deliberately.
-
- (Context means combination of project, folder path and task name.)
-
-
-
\ No newline at end of file
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/help/validate_naming.xml b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/help/validate_naming.xml
deleted file mode 100644
index 28c2329c8a..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/help/validate_naming.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-
-Product name
-
-## Invalid product or layer name
-
-Product or layer name cannot contain specific characters (spaces etc) which could cause issue when product name is used in a published file name.
- {msg}
-
-### How to repair?
-
-You can fix this with "repair" button on the right and press Refresh publishing button at the bottom right.
-
-
-### __Detailed Info__ (optional)
-
-Not all characters are available in a file names on all OS. Wrong characters could be configured in Settings.
-
-
-
\ No newline at end of file
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/increment_workfile.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/increment_workfile.py
deleted file mode 100644
index b10645813a..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/increment_workfile.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import os
-import pyblish.api
-from ayon_core.pipeline.publish import get_errored_plugins_from_context
-from ayon_core.lib import version_up
-
-from ayon_photoshop import api as photoshop
-
-
-class IncrementWorkfile(pyblish.api.InstancePlugin):
- """Increment the current workfile.
-
- Saves the current scene with an increased version number.
- """
-
- label = "Increment Workfile"
- order = pyblish.api.IntegratorOrder + 9.0
- hosts = ["photoshop"]
- families = ["workfile"]
- optional = True
-
- def process(self, instance):
- errored_plugins = get_errored_plugins_from_context(instance.context)
- if errored_plugins:
- raise RuntimeError(
- "Skipping incrementing current file because publishing failed."
- )
-
- scene_path = version_up(instance.context.data["currentFile"])
- _, ext = os.path.splitext(scene_path)
- photoshop.stub().saveAs(scene_path, ext[1:], True)
-
- self.log.info("Incremented workfile to: {}".format(scene_path))
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/validate_instance_asset.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/validate_instance_asset.py
deleted file mode 100644
index 36ba621dc2..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/validate_instance_asset.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import pyblish.api
-
-from ayon_core.pipeline import get_current_folder_path
-from ayon_core.pipeline.publish import (
- ValidateContentsOrder,
- PublishXmlValidationError,
- OptionalPyblishPluginMixin
-)
-from ayon_photoshop import api as photoshop
-
-
-class ValidateInstanceFolderRepair(pyblish.api.Action):
- """Repair the instance folder."""
-
- label = "Repair"
- icon = "wrench"
- on = "failed"
-
- def process(self, context, plugin):
-
- # Get the errored instances
- failed = []
- for result in context.data["results"]:
- if (
- result["error"] is not None
- and result["instance"] is not None
- and result["instance"] not in failed
- ):
- failed.append(result["instance"])
-
- # Apply pyblish.logic to get the instances for the plug-in
- instances = pyblish.api.instances_by_plugin(failed, plugin)
- stub = photoshop.stub()
- current_folder_path = get_current_folder_path()
- for instance in instances:
- data = stub.read(instance[0])
- data["folderPath"] = current_folder_path
- stub.imprint(instance[0], data)
-
-
-class ValidateInstanceAsset(OptionalPyblishPluginMixin,
- pyblish.api.InstancePlugin):
- """Validate the instance folder is the current selected context folder.
-
- As it might happen that multiple worfiles are opened, switching
- between them would mess with selected context.
- In that case outputs might be output under wrong folder!
-
- Repair action will use Context folder value (from Workfiles or Launcher)
- Closing and reopening with Workfiles will refresh Context value.
- """
-
- label = "Validate Instance Folder"
- hosts = ["photoshop"]
- optional = True
- actions = [ValidateInstanceFolderRepair]
- order = ValidateContentsOrder
-
- def process(self, instance):
- instance_folder_path = instance.data["folderPath"]
- current_folder_path = get_current_folder_path()
-
- if instance_folder_path != current_folder_path:
- msg = (
- f"Instance folder {instance_folder_path} is not the same"
- f" as current context {current_folder_path}."
-
- )
- repair_msg = (
- "Repair with 'Repair' button"
- f" to use '{current_folder_path}'.\n"
- )
- formatting_data = {"msg": msg,
- "repair_msg": repair_msg}
- raise PublishXmlValidationError(self, msg,
- formatting_data=formatting_data)
diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/validate_naming.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/validate_naming.py
deleted file mode 100644
index e5f826b07e..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/validate_naming.py
+++ /dev/null
@@ -1,117 +0,0 @@
-import re
-
-import pyblish.api
-
-from ayon_photoshop import api as photoshop
-from ayon_core.pipeline.create import PRODUCT_NAME_ALLOWED_SYMBOLS
-from ayon_core.pipeline.publish import (
- ValidateContentsOrder,
- PublishXmlValidationError,
-)
-
-
-class ValidateNamingRepair(pyblish.api.Action):
- """Repair the instance folder."""
-
- label = "Repair"
- icon = "wrench"
- on = "failed"
- settings_category = "photoshop"
-
- def process(self, context, plugin):
-
- # Get the errored instances
- failed = []
- for result in context.data["results"]:
- if (
- result["error"] is not None
- and result["instance"] is not None
- and result["instance"] not in failed
- ):
- failed.append(result["instance"])
-
- invalid_chars, replace_char = plugin.get_replace_chars()
- self.log.debug("{} --- {}".format(invalid_chars, replace_char))
-
- # Apply pyblish.logic to get the instances for the plug-in
- instances = pyblish.api.instances_by_plugin(failed, plugin)
- stub = photoshop.stub()
- for instance in instances:
- self.log.debug("validate_naming instance {}".format(instance))
- current_layer_state = stub.get_layer(instance.data["layer"].id)
- self.log.debug("current_layer{}".format(current_layer_state))
-
- layer_meta = stub.read(current_layer_state)
- instance_id = (layer_meta.get("instance_id") or
- layer_meta.get("uuid"))
- if not instance_id:
- self.log.warning("Unable to repair, cannot find layer")
- continue
-
- layer_name = re.sub(invalid_chars,
- replace_char,
- current_layer_state.clean_name)
- layer_name = stub.PUBLISH_ICON + layer_name
-
- stub.rename_layer(current_layer_state.id, layer_name)
-
- product_name = re.sub(invalid_chars, replace_char,
- instance.data["productName"])
-
- # format from Tool Creator
- product_name = re.sub(
- "[^{}]+".format(PRODUCT_NAME_ALLOWED_SYMBOLS),
- "",
- product_name
- )
-
- layer_meta["productName"] = product_name
- stub.imprint(instance_id, layer_meta)
-
- return True
-
-
-class ValidateNaming(pyblish.api.InstancePlugin):
- """Validate the instance name.
-
- Spaces in names are not allowed. Will be replace with underscores.
- """
-
- label = "Validate Naming"
- hosts = ["photoshop"]
- order = ValidateContentsOrder
- families = ["image"]
- actions = [ValidateNamingRepair]
-
- # configured by Settings
- invalid_chars = ''
- replace_char = ''
-
- def process(self, instance):
- help_msg = ' Use Repair button to fix it and then refresh publish.'
-
- layer = instance.data.get("layer")
- if layer:
- msg = "Name \"{}\" is not allowed.{}".format(
- layer.clean_name, help_msg
- )
- formatting_data = {"msg": msg}
- if re.search(self.invalid_chars, layer.clean_name):
- raise PublishXmlValidationError(
- self, msg, formatting_data=formatting_data
- )
-
- product_name = instance.data["productName"]
- msg = "Product \"{}\" is not allowed.{}".format(
- product_name, help_msg
- )
- formatting_data = {"msg": msg}
- if re.search(self.invalid_chars, product_name):
- raise PublishXmlValidationError(
- self, msg, formatting_data=formatting_data
- )
-
- @classmethod
- def get_replace_chars(cls):
- """Pass values configured in Settings for Repair."""
- return cls.invalid_chars, cls.replace_char
diff --git a/server_addon/photoshop/client/ayon_photoshop/resources/template.psd b/server_addon/photoshop/client/ayon_photoshop/resources/template.psd
deleted file mode 100644
index 4c731771ba..0000000000
Binary files a/server_addon/photoshop/client/ayon_photoshop/resources/template.psd and /dev/null differ
diff --git a/server_addon/photoshop/client/ayon_photoshop/version.py b/server_addon/photoshop/client/ayon_photoshop/version.py
deleted file mode 100644
index 7f6de51228..0000000000
--- a/server_addon/photoshop/client/ayon_photoshop/version.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Package declaring AYON addon 'photoshop' version."""
-__version__ = "0.2.2"
diff --git a/server_addon/photoshop/client/pyproject.toml b/server_addon/photoshop/client/pyproject.toml
deleted file mode 100644
index 3beb76ba74..0000000000
--- a/server_addon/photoshop/client/pyproject.toml
+++ /dev/null
@@ -1,6 +0,0 @@
-[project]
-name="photoshop"
-description="AYON Phostoshop addon."
-
-[ayon.runtimeDependencies]
-wsrpc_aiohttp = "^3.1.1" # websocket server
diff --git a/server_addon/photoshop/package.py b/server_addon/photoshop/package.py
deleted file mode 100644
index f4d2a98293..0000000000
--- a/server_addon/photoshop/package.py
+++ /dev/null
@@ -1,10 +0,0 @@
-name = "photoshop"
-title = "Photoshop"
-version = "0.2.2"
-
-client_dir = "ayon_photoshop"
-
-ayon_required_addons = {
- "core": ">0.3.2",
-}
-ayon_compatible_addons = {}
diff --git a/server_addon/photoshop/server/__init__.py b/server_addon/photoshop/server/__init__.py
deleted file mode 100644
index 86d1025a2d..0000000000
--- a/server_addon/photoshop/server/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from ayon_server.addons import BaseServerAddon
-
-from .settings import PhotoshopSettings, DEFAULT_PHOTOSHOP_SETTING
-
-
-class Photoshop(BaseServerAddon):
- settings_model = PhotoshopSettings
-
- async def get_default_settings(self):
- settings_model_cls = self.get_settings_model()
- return settings_model_cls(**DEFAULT_PHOTOSHOP_SETTING)
diff --git a/server_addon/photoshop/server/settings/__init__.py b/server_addon/photoshop/server/settings/__init__.py
deleted file mode 100644
index 9ae5764362..0000000000
--- a/server_addon/photoshop/server/settings/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from .main import (
- PhotoshopSettings,
- DEFAULT_PHOTOSHOP_SETTING,
-)
-
-
-__all__ = (
- "PhotoshopSettings",
- "DEFAULT_PHOTOSHOP_SETTING",
-)
diff --git a/server_addon/photoshop/server/settings/creator_plugins.py b/server_addon/photoshop/server/settings/creator_plugins.py
deleted file mode 100644
index 8acc213866..0000000000
--- a/server_addon/photoshop/server/settings/creator_plugins.py
+++ /dev/null
@@ -1,77 +0,0 @@
-from ayon_server.settings import BaseSettingsModel, SettingsField
-
-
-class CreateImagePluginModel(BaseSettingsModel):
- enabled: bool = SettingsField(True, title="Enabled")
- active_on_create: bool = SettingsField(True, title="Active by default")
- mark_for_review: bool = SettingsField(False, title="Review by default")
- default_variants: list[str] = SettingsField(
- default_factory=list,
- title="Default Variants"
- )
-
-
-class AutoImageCreatorPluginModel(BaseSettingsModel):
- enabled: bool = SettingsField(False, title="Enabled")
- active_on_create: bool = SettingsField(True, title="Active by default")
- mark_for_review: bool = SettingsField(False, title="Review by default")
- default_variant: str = SettingsField("", title="Default Variants")
-
-
-class CreateReviewPlugin(BaseSettingsModel):
- enabled: bool = SettingsField(True, title="Enabled")
- active_on_create: bool = SettingsField(True, title="Active by default")
- default_variant: str = SettingsField("", title="Default Variants")
-
-
-class CreateWorkfilelugin(BaseSettingsModel):
- enabled: bool = SettingsField(True, title="Enabled")
- active_on_create: bool = SettingsField(True, title="Active by default")
- default_variant: str = SettingsField("", title="Default Variants")
-
-
-class PhotoshopCreatorPlugins(BaseSettingsModel):
- ImageCreator: CreateImagePluginModel = SettingsField(
- title="Create Image",
- default_factory=CreateImagePluginModel,
- )
- AutoImageCreator: AutoImageCreatorPluginModel = SettingsField(
- title="Create Flatten Image",
- default_factory=AutoImageCreatorPluginModel,
- )
- ReviewCreator: CreateReviewPlugin = SettingsField(
- title="Create Review",
- default_factory=CreateReviewPlugin,
- )
- WorkfileCreator: CreateWorkfilelugin = SettingsField(
- title="Create Workfile",
- default_factory=CreateWorkfilelugin,
- )
-
-
-DEFAULT_CREATE_SETTINGS = {
- "ImageCreator": {
- "enabled": True,
- "active_on_create": True,
- "mark_for_review": False,
- "default_variants": [
- "Main"
- ]
- },
- "AutoImageCreator": {
- "enabled": False,
- "active_on_create": True,
- "mark_for_review": False,
- "default_variant": ""
- },
- "ReviewCreator": {
- "enabled": True,
- "active_on_create": True,
- "default_variant": ""
- },
- "WorkfileCreator": {
- "enabled": True,
- "active_on_create": True,
- "default_variant": "Main"
- }
-}
diff --git a/server_addon/photoshop/server/settings/imageio.py b/server_addon/photoshop/server/settings/imageio.py
deleted file mode 100644
index c514f58173..0000000000
--- a/server_addon/photoshop/server/settings/imageio.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from pydantic import validator
-from ayon_server.settings import BaseSettingsModel, SettingsField
-from ayon_server.settings.validators import ensure_unique_names
-
-
-class ImageIOConfigModel(BaseSettingsModel):
- """[DEPRECATED] Addon OCIO config settings. Please set the OCIO config
- path in the Core addon profiles here
- (ayon+settings://core/imageio/ocio_config_profiles).
- """
-
- override_global_config: bool = SettingsField(
- False,
- title="Override global OCIO config",
- description=(
- "DEPRECATED functionality. Please set the OCIO config path in the "
- "Core addon profiles here (ayon+settings://core/imageio/"
- "ocio_config_profiles)."
- ),
- )
- filepath: list[str] = SettingsField(
- default_factory=list,
- title="Config path",
- description=(
- "DEPRECATED functionality. Please set the OCIO config path in the "
- "Core addon profiles here (ayon+settings://core/imageio/"
- "ocio_config_profiles)."
- ),
- )
-
-
-class ImageIOFileRuleModel(BaseSettingsModel):
- name: str = SettingsField("", title="Rule name")
- pattern: str = SettingsField("", title="Regex pattern")
- colorspace: str = SettingsField("", title="Colorspace name")
- ext: str = SettingsField("", title="File extension")
-
-
-class ImageIOFileRulesModel(BaseSettingsModel):
- activate_host_rules: bool = SettingsField(False)
- rules: list[ImageIOFileRuleModel] = SettingsField(
- default_factory=list,
- title="Rules"
- )
-
- @validator("rules")
- def validate_unique_outputs(cls, value):
- ensure_unique_names(value)
- return value
-
-
-class ImageIORemappingRulesModel(BaseSettingsModel):
- host_native_name: str = SettingsField(
- title="Application native colorspace name"
- )
- ocio_name: str = SettingsField(title="OCIO colorspace name")
-
-
-class ImageIORemappingModel(BaseSettingsModel):
- rules: list[ImageIORemappingRulesModel] = SettingsField(
- default_factory=list)
-
-
-class PhotoshopImageIOModel(BaseSettingsModel):
- activate_host_color_management: bool = SettingsField(
- True, title="Enable Color Management"
- )
- remapping: ImageIORemappingModel = SettingsField(
- title="Remapping colorspace names",
- default_factory=ImageIORemappingModel
- )
- ocio_config: ImageIOConfigModel = SettingsField(
- default_factory=ImageIOConfigModel,
- title="OCIO config"
- )
- file_rules: ImageIOFileRulesModel = SettingsField(
- default_factory=ImageIOFileRulesModel,
- title="File Rules"
- )
diff --git a/server_addon/photoshop/server/settings/main.py b/server_addon/photoshop/server/settings/main.py
deleted file mode 100644
index b6474d6d29..0000000000
--- a/server_addon/photoshop/server/settings/main.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from ayon_server.settings import BaseSettingsModel, SettingsField
-
-from .imageio import PhotoshopImageIOModel
-from .creator_plugins import PhotoshopCreatorPlugins, DEFAULT_CREATE_SETTINGS
-from .publish_plugins import PhotoshopPublishPlugins, DEFAULT_PUBLISH_SETTINGS
-from .workfile_builder import WorkfileBuilderPlugin
-
-
-class PhotoshopSettings(BaseSettingsModel):
- """Photoshop Project Settings."""
-
- imageio: PhotoshopImageIOModel = SettingsField(
- default_factory=PhotoshopImageIOModel,
- title="OCIO config"
- )
-
- create: PhotoshopCreatorPlugins = SettingsField(
- default_factory=PhotoshopCreatorPlugins,
- title="Creator plugins"
- )
-
- publish: PhotoshopPublishPlugins = SettingsField(
- default_factory=PhotoshopPublishPlugins,
- title="Publish plugins"
- )
-
- workfile_builder: WorkfileBuilderPlugin = SettingsField(
- default_factory=WorkfileBuilderPlugin,
- title="Workfile Builder"
- )
-
-
-DEFAULT_PHOTOSHOP_SETTING = {
- "create": DEFAULT_CREATE_SETTINGS,
- "publish": DEFAULT_PUBLISH_SETTINGS,
- "workfile_builder": {
- "create_first_version": False,
- "custom_templates": []
- }
-}
diff --git a/server_addon/photoshop/server/settings/publish_plugins.py b/server_addon/photoshop/server/settings/publish_plugins.py
deleted file mode 100644
index 149b08beb4..0000000000
--- a/server_addon/photoshop/server/settings/publish_plugins.py
+++ /dev/null
@@ -1,203 +0,0 @@
-from ayon_server.settings import BaseSettingsModel, SettingsField
-
-
-create_flatten_image_enum = [
- {"value": "flatten_with_images", "label": "Flatten with images"},
- {"value": "flatten_only", "label": "Flatten only"},
- {"value": "no", "label": "No"},
-]
-
-
-color_code_enum = [
- {"value": "red", "label": "Red"},
- {"value": "orange", "label": "Orange"},
- {"value": "yellowColor", "label": "Yellow"},
- {"value": "grain", "label": "Green"},
- {"value": "blue", "label": "Blue"},
- {"value": "violet", "label": "Violet"},
- {"value": "gray", "label": "Gray"},
-]
-
-
-class ColorCodeMappings(BaseSettingsModel):
- color_code: list[str] = SettingsField(
- title="Color codes for layers",
- default_factory=list,
- enum_resolver=lambda: color_code_enum,
- )
-
- layer_name_regex: list[str] = SettingsField(
- default_factory=list,
- title="Layer name regex"
- )
-
- product_type: str = SettingsField(
- "",
- title="Resulting product type"
- )
-
- product_name_template: str = SettingsField(
- "",
- title="Product name template"
- )
-
-
-class ExtractedOptions(BaseSettingsModel):
- tags: list[str] = SettingsField(
- title="Tags",
- default_factory=list
- )
-
-
-class CollectColorCodedInstancesPlugin(BaseSettingsModel):
- """Set color for publishable layers, set its resulting product type
- and template for product name. \n Can create flatten image from published
- instances.
- (Applicable only for remote publishing!)"""
-
- enabled: bool = SettingsField(True, title="Enabled")
- create_flatten_image: str = SettingsField(
- "",
- title="Create flatten image",
- enum_resolver=lambda: create_flatten_image_enum,
- )
-
- flatten_product_name_template: str = SettingsField(
- "",
- title="Product name template for flatten image"
- )
-
- color_code_mapping: list[ColorCodeMappings] = SettingsField(
- title="Color code mappings",
- default_factory=ColorCodeMappings,
- )
-
-
-class CollectReviewPlugin(BaseSettingsModel):
- """Should review product be created"""
- enabled: bool = SettingsField(True, title="Enabled")
-
-
-class CollectVersionPlugin(BaseSettingsModel):
- """Synchronize version for image and review instances by workfile version""" # noqa
- enabled: bool = SettingsField(True, title="Enabled")
-
-
-class ValidateNamingPlugin(BaseSettingsModel):
- """Validate naming of products and layers""" # noqa
- invalid_chars: str = SettingsField(
- '',
- title="Regex pattern of invalid characters"
- )
-
- replace_char: str = SettingsField(
- '',
- title="Replacement character"
- )
-
-
-class ExtractImagePlugin(BaseSettingsModel):
- """Currently only jpg and png are supported"""
- formats: list[str] = SettingsField(
- title="Extract Formats",
- default_factory=list,
- )
-
-
-class ExtractReviewPlugin(BaseSettingsModel):
- make_image_sequence: bool = SettingsField(
- False,
- title="Make an image sequence instead of flatten image"
- )
-
- max_downscale_size: int = SettingsField(
- 8192,
- title="Maximum size of sources for review",
- description="FFMpeg can only handle limited resolution for creation of review and/or thumbnail", # noqa
- gt=300, # greater than
- le=16384, # less or equal
- )
-
- jpg_options: ExtractedOptions = SettingsField(
- title="Extracted jpg Options",
- default_factory=ExtractedOptions
- )
-
- mov_options: ExtractedOptions = SettingsField(
- title="Extracted mov Options",
- default_factory=ExtractedOptions
- )
-
-
-class PhotoshopPublishPlugins(BaseSettingsModel):
- CollectColorCodedInstances: CollectColorCodedInstancesPlugin = (
- SettingsField(
- title="Collect Color Coded Instances",
- default_factory=CollectColorCodedInstancesPlugin,
- )
- )
- CollectReview: CollectReviewPlugin = SettingsField(
- title="Collect Review",
- default_factory=CollectReviewPlugin,
- )
-
- CollectVersion: CollectVersionPlugin = SettingsField(
- title="Collect Version",
- default_factory=CollectVersionPlugin,
- )
-
- ValidateNaming: ValidateNamingPlugin = SettingsField(
- title="Validate naming of products and layers",
- default_factory=ValidateNamingPlugin,
- )
-
- ExtractImage: ExtractImagePlugin = SettingsField(
- title="Extract Image",
- default_factory=ExtractImagePlugin,
- )
-
- ExtractReview: ExtractReviewPlugin = SettingsField(
- title="Extract Review",
- default_factory=ExtractReviewPlugin,
- )
-
-
-DEFAULT_PUBLISH_SETTINGS = {
- "CollectColorCodedInstances": {
- "create_flatten_image": "no",
- "flatten_product_name_template": "",
- "color_code_mapping": []
- },
- "CollectReview": {
- "enabled": True
- },
- "CollectVersion": {
- "enabled": False
- },
- "ValidateNaming": {
- "invalid_chars": "[ \\\\/+\\*\\?\\(\\)\\[\\]\\{\\}:,;]",
- "replace_char": "_"
- },
- "ExtractImage": {
- "formats": [
- "png",
- "jpg"
- ]
- },
- "ExtractReview": {
- "make_image_sequence": False,
- "max_downscale_size": 8192,
- "jpg_options": {
- "tags": [
- "review",
- "ftrackreview"
- ]
- },
- "mov_options": {
- "tags": [
- "review",
- "ftrackreview"
- ]
- }
- }
-}
diff --git a/server_addon/photoshop/server/settings/workfile_builder.py b/server_addon/photoshop/server/settings/workfile_builder.py
deleted file mode 100644
index 4b00b99272..0000000000
--- a/server_addon/photoshop/server/settings/workfile_builder.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from ayon_server.settings import (
- BaseSettingsModel,
- SettingsField,
- MultiplatformPathModel,
-)
-
-
-class CustomBuilderTemplate(BaseSettingsModel):
- _layout = "expanded"
- task_types: list[str] = SettingsField(
- default_factory=list,
- title="Task types",
- )
-
- path: MultiplatformPathModel = SettingsField(
- default_factory=MultiplatformPathModel,
- title="Template path"
- )
-
-
-class WorkfileBuilderPlugin(BaseSettingsModel):
- _title = "Workfile Builder"
- create_first_version: bool = SettingsField(
- False,
- title="Create first workfile"
- )
-
- custom_templates: list[CustomBuilderTemplate] = SettingsField(
- default_factory=CustomBuilderTemplate,
- title="Template profiles"
- )
diff --git a/server_addon/tvpaint/client/ayon_tvpaint/__init__.py b/server_addon/tvpaint/client/ayon_tvpaint/__init__.py
deleted file mode 100644
index 2c4a052234..0000000000
--- a/server_addon/tvpaint/client/ayon_tvpaint/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from .version import __version__
-from .addon import (
- get_launch_script_path,
- TVPaintAddon,
- TVPAINT_ROOT_DIR,
-)
-
-
-__all__ = (
- "__version__",
-
- "get_launch_script_path",
- "TVPaintAddon",
- "TVPAINT_ROOT_DIR",
-)
diff --git a/server_addon/tvpaint/client/ayon_tvpaint/addon.py b/server_addon/tvpaint/client/ayon_tvpaint/addon.py
deleted file mode 100644
index c98c929a96..0000000000
--- a/server_addon/tvpaint/client/ayon_tvpaint/addon.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import os
-from ayon_core.addon import AYONAddon, IHostAddon
-
-from .version import __version__
-
-TVPAINT_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
-
-
-def get_launch_script_path():
- return os.path.join(
- TVPAINT_ROOT_DIR,
- "api",
- "launch_script.py"
- )
-
-
-class TVPaintAddon(AYONAddon, IHostAddon):
- name = "tvpaint"
- version = __version__
- host_name = "tvpaint"
-
- def add_implementation_envs(self, env, _app):
- """Modify environments to contain all required for implementation."""
-
- defaults = {
- "AYON_LOG_NO_COLORS": "1"
- }
- for key, value in defaults.items():
- if not env.get(key):
- env[key] = value
-
- def get_launch_hook_paths(self, app):
- if app.host_name != self.host_name:
- return []
- return [
- os.path.join(TVPAINT_ROOT_DIR, "hooks")
- ]
-
- def get_workfile_extensions(self):
- return [".tvpp"]
diff --git a/server_addon/tvpaint/client/ayon_tvpaint/api/__init__.py b/server_addon/tvpaint/client/ayon_tvpaint/api/__init__.py
deleted file mode 100644
index 7b53aad9a4..0000000000
--- a/server_addon/tvpaint/client/ayon_tvpaint/api/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from .communication_server import CommunicationWrapper
-from .pipeline import (
- TVPaintHost,
-)
-
-
-__all__ = (
- "CommunicationWrapper",
-
- "TVPaintHost",
-)
diff --git a/server_addon/tvpaint/client/ayon_tvpaint/api/communication_server.py b/server_addon/tvpaint/client/ayon_tvpaint/api/communication_server.py
deleted file mode 100644
index 7ccb49f07e..0000000000
--- a/server_addon/tvpaint/client/ayon_tvpaint/api/communication_server.py
+++ /dev/null
@@ -1,925 +0,0 @@
-import os
-import json
-import time
-import subprocess
-import collections
-import asyncio
-import logging
-import socket
-import platform
-import filecmp
-import tempfile
-import threading
-import shutil
-
-from contextlib import closing
-
-from aiohttp import web
-from aiohttp_json_rpc import JsonRpc
-from aiohttp_json_rpc.protocol import (
- encode_request, encode_error, decode_msg, JsonRpcMsgTyp
-)
-from aiohttp_json_rpc.exceptions import RpcError
-
-from ayon_core.lib import emit_event
-from ayon_tvpaint.tvpaint_plugin import get_plugin_files_path
-
-log = logging.getLogger(__name__)
-log.setLevel(logging.DEBUG)
-
-
-class CommunicationWrapper:
- # TODO add logs and exceptions
- communicator = None
-
- log = logging.getLogger("CommunicationWrapper")
-
- @classmethod
- def create_qt_communicator(cls, *args, **kwargs):
- """Create communicator for Artist usage."""
- communicator = QtCommunicator(*args, **kwargs)
- cls.set_communicator(communicator)
- return communicator
-
- @classmethod
- def set_communicator(cls, communicator):
- if not cls.communicator:
- cls.communicator = communicator
- else:
- cls.log.warning("Communicator was set multiple times.")
-
- @classmethod
- def client(cls):
- if not cls.communicator:
- return None
- return cls.communicator.client()
-
- @classmethod
- def execute_george(cls, george_script):
- """Execute passed goerge script in TVPaint."""
- if not cls.communicator:
- return
- return cls.communicator.execute_george(george_script)
-
-
-class WebSocketServer:
- def __init__(self):
- self.client = None
-
- self.loop = asyncio.new_event_loop()
- self.app = web.Application(loop=self.loop)
- self.port = self.find_free_port()
- self.websocket_thread = WebsocketServerThread(
- self, self.port, loop=self.loop
- )
-
- @property
- def server_is_running(self):
- return self.websocket_thread.server_is_running
-
- def add_route(self, *args, **kwargs):
- self.app.router.add_route(*args, **kwargs)
-
- @staticmethod
- def find_free_port():
- with closing(
- socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- ) as sock:
- sock.bind(("", 0))
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- port = sock.getsockname()[1]
- return port
-
- def start(self):
- self.websocket_thread.start()
-
- def stop(self):
- try:
- if self.websocket_thread.is_running:
- log.debug("Stopping websocket server")
- self.websocket_thread.is_running = False
- self.websocket_thread.stop()
- except Exception:
- log.warning(
- "Error has happened during Killing websocket server",
- exc_info=True
- )
-
-
-class WebsocketServerThread(threading.Thread):
- """ Listener for websocket rpc requests.
-
- It would be probably better to "attach" this to main thread (as for
- example Harmony needs to run something on main thread), but currently
- it creates separate thread and separate asyncio event loop
- """
- def __init__(self, module, port, loop):
- super(WebsocketServerThread, self).__init__()
- self.is_running = False
- self.server_is_running = False
- self.port = port
- self.module = module
- self.loop = loop
- self.runner = None
- self.site = None
- self.tasks = []
-
- def run(self):
- self.is_running = True
-
- try:
- log.debug("Starting websocket server")
-
- self.loop.run_until_complete(self.start_server())
-
- log.info(
- "Running Websocket server on URL:"
- " \"ws://localhost:{}\"".format(self.port)
- )
-
- asyncio.ensure_future(self.check_shutdown(), loop=self.loop)
-
- self.server_is_running = True
- self.loop.run_forever()
-
- except Exception:
- log.warning(
- "Websocket Server service has failed", exc_info=True
- )
- finally:
- self.server_is_running = False
- # optional
- self.loop.close()
-
- self.is_running = False
- log.info("Websocket server stopped")
-
- async def start_server(self):
- """ Starts runner and TCPsite """
- self.runner = web.AppRunner(self.module.app)
- await self.runner.setup()
- self.site = web.TCPSite(self.runner, "localhost", self.port)
- await self.site.start()
-
- def stop(self):
- """Sets is_running flag to false, 'check_shutdown' shuts server down"""
- self.is_running = False
-
- async def check_shutdown(self):
- """ Future that is running and checks if server should be running
- periodically.
- """
- while self.is_running:
- while self.tasks:
- task = self.tasks.pop(0)
- log.debug("waiting for task {}".format(task))
- await task
- log.debug("returned value {}".format(task.result))
-
- await asyncio.sleep(0.5)
-
- log.debug("## Server shutdown started")
-
- await self.site.stop()
- log.debug("# Site stopped")
- await self.runner.cleanup()
- log.debug("# Server runner stopped")
- tasks = [
- task for task in asyncio.all_tasks()
- if task is not asyncio.current_task()
- ]
- list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks
- results = await asyncio.gather(*tasks, return_exceptions=True)
- log.debug(f"Finished awaiting cancelled tasks, results: {results}...")
- await self.loop.shutdown_asyncgens()
- # to really make sure everything else has time to stop
- await asyncio.sleep(0.07)
- self.loop.stop()
-
-
-class BaseTVPaintRpc(JsonRpc):
- def __init__(self, communication_obj, route_name="", **kwargs):
- super().__init__(**kwargs)
- self.requests_ids = collections.defaultdict(lambda: 0)
- self.waiting_requests = collections.defaultdict(list)
- self.responses = collections.defaultdict(list)
-
- self.route_name = route_name
- self.communication_obj = communication_obj
-
- async def _handle_rpc_msg(self, http_request, raw_msg):
- # This is duplicated code from super but there is no way how to do it
- # to be able handle server->client requests
- host = http_request.host
- if host in self.waiting_requests:
- try:
- _raw_message = raw_msg.data
- msg = decode_msg(_raw_message)
-
- except RpcError as error:
- await self._ws_send_str(http_request, encode_error(error))
- return
-
- if msg.type in (JsonRpcMsgTyp.RESULT, JsonRpcMsgTyp.ERROR):
- msg_data = json.loads(_raw_message)
- if msg_data.get("id") in self.waiting_requests[host]:
- self.responses[host].append(msg_data)
- return
-
- return await super()._handle_rpc_msg(http_request, raw_msg)
-
- def client_connected(self):
- # TODO This is poor check. Add check it is client from TVPaint
- if self.clients:
- return True
- return False
-
- def send_notification(self, client, method, params=None):
- if params is None:
- params = []
- asyncio.run_coroutine_threadsafe(
- client.ws.send_str(encode_request(method, params=params)),
- loop=self.loop
- )
-
- def send_request(self, client, method, params=None, timeout=0):
- if params is None:
- params = []
-
- client_host = client.host
-
- request_id = self.requests_ids[client_host]
- self.requests_ids[client_host] += 1
-
- self.waiting_requests[client_host].append(request_id)
-
- log.debug("Sending request to client {} ({}, {}) id: {}".format(
- client_host, method, params, request_id
- ))
- future = asyncio.run_coroutine_threadsafe(
- client.ws.send_str(encode_request(method, request_id, params)),
- loop=self.loop
- )
- result = future.result()
-
- not_found = object()
- response = not_found
- start = time.time()
- while True:
- if client.ws.closed:
- return None
-
- for _response in self.responses[client_host]:
- _id = _response.get("id")
- if _id == request_id:
- response = _response
- break
-
- if response is not not_found:
- break
-
- if timeout > 0 and (time.time() - start) > timeout:
- raise Exception("Timeout passed")
- return
-
- time.sleep(0.1)
-
- if response is not_found:
- raise Exception("Connection closed")
-
- self.responses[client_host].remove(response)
-
- error = response.get("error")
- result = response.get("result")
- if error:
- raise Exception("Error happened: {}".format(error))
- return result
-
-
-class QtTVPaintRpc(BaseTVPaintRpc):
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
-
- from ayon_core.tools.utils import host_tools
- self.tools_helper = host_tools.HostToolsHelper()
-
- route_name = self.route_name
-
- # Register methods
- self.add_methods(
- (route_name, self.workfiles_tool),
- (route_name, self.loader_tool),
- (route_name, self.publish_tool),
- (route_name, self.scene_inventory_tool),
- (route_name, self.library_loader_tool),
- (route_name, self.experimental_tools)
- )
-
- # Panel routes for tools
- async def workfiles_tool(self):
- log.info("Triggering Workfile tool")
- item = MainThreadItem(self.tools_helper.show_workfiles)
- self._execute_in_main_thread(item, wait=False)
- return
-
- async def loader_tool(self):
- log.info("Triggering Loader tool")
- item = MainThreadItem(self.tools_helper.show_loader)
- self._execute_in_main_thread(item, wait=False)
- return
-
- async def publish_tool(self):
- log.info("Triggering Publish tool")
- item = MainThreadItem(self.tools_helper.show_publisher_tool)
- self._execute_in_main_thread(item, wait=False)
- return
-
- async def scene_inventory_tool(self):
- """Open Scene Inventory tool.
-
- Function can't confirm if tool was opened becauise one part of
- SceneInventory initialization is calling websocket request to host but
- host can't response because is waiting for response from this call.
- """
- log.info("Triggering Scene inventory tool")
- item = MainThreadItem(self.tools_helper.show_scene_inventory)
- # Do not wait for result of callback
- self._execute_in_main_thread(item, wait=False)
- return
-
- async def library_loader_tool(self):
- log.info("Triggering Library loader tool")
- item = MainThreadItem(self.tools_helper.show_library_loader)
- self._execute_in_main_thread(item, wait=False)
- return
-
- async def experimental_tools(self):
- log.info("Triggering Library loader tool")
- item = MainThreadItem(self.tools_helper.show_experimental_tools_dialog)
- self._execute_in_main_thread(item, wait=False)
- return
-
- async def _async_execute_in_main_thread(self, item, **kwargs):
- await self.communication_obj.async_execute_in_main_thread(
- item, **kwargs
- )
-
- def _execute_in_main_thread(self, item, **kwargs):
- return self.communication_obj.execute_in_main_thread(item, **kwargs)
-
-
-class MainThreadItem:
- """Structure to store information about callback in main thread.
-
- Item should be used to execute callback in main thread which may be needed
- for execution of Qt objects.
-
- Item store callback (callable variable), arguments and keyword arguments
- for the callback. Item hold information about it's process.
- """
- not_set = object()
- sleep_time = 0.1
-
- def __init__(self, callback, *args, **kwargs):
- self.done = False
- self.exception = self.not_set
- self.result = self.not_set
- self.callback = callback
- self.args = args
- self.kwargs = kwargs
-
- def execute(self):
- """Execute callback and store its result.
-
- Method must be called from main thread. Item is marked as `done`
- when callback execution finished. Store output of callback of exception
- information when callback raises one.
- """
- log.debug("Executing process in main thread")
- if self.done:
- log.warning("- item is already processed")
- return
-
- callback = self.callback
- args = self.args
- kwargs = self.kwargs
- log.info("Running callback: {}".format(str(callback)))
- try:
- result = callback(*args, **kwargs)
- self.result = result
-
- except Exception as exc:
- self.exception = exc
-
- finally:
- self.done = True
-
- def wait(self):
- """Wait for result from main thread.
-
- This method stops current thread until callback is executed.
-
- Returns:
- object: Output of callback. May be any type or object.
-
- Raises:
- Exception: Reraise any exception that happened during callback
- execution.
- """
- while not self.done:
- time.sleep(self.sleep_time)
-
- if self.exception is self.not_set:
- return self.result
- raise self.exception
-
- async def async_wait(self):
- """Wait for result from main thread.
-
- Returns:
- object: Output of callback. May be any type or object.
-
- Raises:
- Exception: Reraise any exception that happened during callback
- execution.
- """
- while not self.done:
- await asyncio.sleep(self.sleep_time)
-
- if self.exception is self.not_set:
- return self.result
- raise self.exception
-
-
-class BaseCommunicator:
- def __init__(self):
- self.process = None
- self.websocket_server = None
- self.websocket_rpc = None
- self.exit_code = None
- self._connected_client = None
-
- @property
- def server_is_running(self):
- if self.websocket_server is None:
- return False
- return self.websocket_server.server_is_running
-
- def _windows_file_process(self, src_dst_mapping, to_remove):
- """Windows specific file processing asking for admin permissions.
-
- It is required to have administration permissions to modify plugin
- files in TVPaint installation folder.
-
- Method requires `pywin32` python module.
-
- Args:
- src_dst_mapping (list, tuple, set): Mapping of source file to
- destination. Both must be full path. Each item must be iterable
- of size 2 `(C:/src/file.dll, C:/dst/file.dll)`.
- to_remove (list): Fullpath to files that should be removed.
- """
-
- import pythoncom
- from win32comext.shell import shell
-
- # Create temp folder where plugin files are temporary copied
- # - reason is that copy to TVPaint requires administartion permissions
- # but admin may not have access to source folder
- tmp_dir = os.path.normpath(
- tempfile.mkdtemp(prefix="tvpaint_copy_")
- )
-
- # Copy source to temp folder and create new mapping
- dst_folders = collections.defaultdict(list)
- new_src_dst_mapping = []
- for old_src, dst in src_dst_mapping:
- new_src = os.path.join(tmp_dir, os.path.split(old_src)[1])
- shutil.copy(old_src, new_src)
- new_src_dst_mapping.append((new_src, dst))
-
- for src, dst in new_src_dst_mapping:
- src = os.path.normpath(src)
- dst = os.path.normpath(dst)
- dst_filename = os.path.basename(dst)
- dst_folder_path = os.path.dirname(dst)
- dst_folders[dst_folder_path].append((dst_filename, src))
-
- # create an instance of IFileOperation
- fo = pythoncom.CoCreateInstance(
- shell.CLSID_FileOperation,
- None,
- pythoncom.CLSCTX_ALL,
- shell.IID_IFileOperation
- )
- # Add delete command to file operation object
- for filepath in to_remove:
- item = shell.SHCreateItemFromParsingName(
- filepath, None, shell.IID_IShellItem
- )
- fo.DeleteItem(item)
-
- # here you can use SetOperationFlags, progress Sinks, etc.
- for folder_path, items in dst_folders.items():
- # create an instance of IShellItem for the target folder
- folder_item = shell.SHCreateItemFromParsingName(
- folder_path, None, shell.IID_IShellItem
- )
- for _dst_filename, source_file_path in items:
- # create an instance of IShellItem for the source item
- copy_item = shell.SHCreateItemFromParsingName(
- source_file_path, None, shell.IID_IShellItem
- )
- # queue the copy operation
- fo.CopyItem(copy_item, folder_item, _dst_filename, None)
-
- # commit
- fo.PerformOperations()
-
- # Remove temp folder
- shutil.rmtree(tmp_dir)
-
- def _prepare_windows_plugin(self, launch_args):
- """Copy plugin to TVPaint plugins and set PATH to dependencies.
-
- Check if plugin in TVPaint's plugins exist and match to plugin
- version to current implementation version. Based on 64-bit or 32-bit
- version of the plugin. Path to libraries required for plugin is added
- to PATH variable.
- """
-
- host_executable = launch_args[0]
- executable_file = os.path.basename(host_executable)
- if "64bit" in executable_file:
- subfolder = "windows_x64"
- elif "32bit" in executable_file:
- subfolder = "windows_x86"
- else:
- raise ValueError(
- "Can't determine if executable "
- "leads to 32-bit or 64-bit TVPaint!"
- )
-
- plugin_files_path = get_plugin_files_path()
- # Folder for right windows plugin files
- source_plugins_dir = os.path.join(plugin_files_path, subfolder)
-
- # Path to libraries (.dll) required for plugin library
- # - additional libraries can be copied to TVPaint installation folder
- # (next to executable) or added to PATH environment variable
- additional_libs_folder = os.path.join(
- source_plugins_dir,
- "additional_libraries"
- )
- additional_libs_folder = additional_libs_folder.replace("\\", "/")
- if (
- os.path.exists(additional_libs_folder)
- and additional_libs_folder not in os.environ["PATH"]
- ):
- os.environ["PATH"] += (os.pathsep + additional_libs_folder)
-
- # Path to TVPaint's plugins folder (where we want to add our plugin)
- host_plugins_path = os.path.join(
- os.path.dirname(host_executable),
- "plugins"
- )
-
- # Files that must be copied to TVPaint's plugin folder
- plugin_dir = os.path.join(source_plugins_dir, "plugin")
-
- to_copy = []
- to_remove = []
- # Remove old plugin name
- deprecated_filepath = os.path.join(
- host_plugins_path, "AvalonPlugin.dll"
- )
- if os.path.exists(deprecated_filepath):
- to_remove.append(deprecated_filepath)
-
- for filename in os.listdir(plugin_dir):
- src_full_path = os.path.join(plugin_dir, filename)
- dst_full_path = os.path.join(host_plugins_path, filename)
- if dst_full_path in to_remove:
- to_remove.remove(dst_full_path)
-
- if (
- not os.path.exists(dst_full_path)
- or not filecmp.cmp(src_full_path, dst_full_path)
- ):
- to_copy.append((src_full_path, dst_full_path))
-
- # Skip copy if everything is done
- if not to_copy and not to_remove:
- return
-
- # Try to copy
- try:
- self._windows_file_process(to_copy, to_remove)
- except Exception:
- log.error("Plugin copy failed", exc_info=True)
-
- # Validate copy was done
- invalid_copy = []
- for src, dst in to_copy:
- if not os.path.exists(dst) or not filecmp.cmp(src, dst):
- invalid_copy.append((src, dst))
-
- # Validate delete was dones
- invalid_remove = []
- for filepath in to_remove:
- if os.path.exists(filepath):
- invalid_remove.append(filepath)
-
- if not invalid_remove and not invalid_copy:
- return
-
- msg_parts = []
- if invalid_remove:
- msg_parts.append(
- "Failed to remove files: {}".format(", ".join(invalid_remove))
- )
-
- if invalid_copy:
- _invalid = [
- "\"{}\" -> \"{}\"".format(src, dst)
- for src, dst in invalid_copy
- ]
- msg_parts.append(
- "Failed to copy files: {}".format(", ".join(_invalid))
- )
- raise RuntimeError(" & ".join(msg_parts))
-
- def _launch_tv_paint(self, launch_args):
- flags = (
- subprocess.DETACHED_PROCESS
- | subprocess.CREATE_NEW_PROCESS_GROUP
- )
- env = os.environ.copy()
- # Remove QuickTime from PATH on windows
- # - quicktime overrides TVPaint's ffmpeg encode/decode which may
- # cause issues on loading
- if platform.system().lower() == "windows":
- new_path = []
- for path in env["PATH"].split(os.pathsep):
- if path and "quicktime" not in path.lower():
- new_path.append(path)
- env["PATH"] = os.pathsep.join(new_path)
-
- kwargs = {
- "env": env,
- "creationflags": flags
- }
- self.process = subprocess.Popen(launch_args, **kwargs)
-
- def _create_routes(self):
- self.websocket_rpc = BaseTVPaintRpc(
- self, loop=self.websocket_server.loop
- )
- self.websocket_server.add_route(
- "*", "/", self.websocket_rpc.handle_request
- )
-
- def _start_webserver(self):
- self.websocket_server.start()
- # Make sure RPC is using same loop as websocket server
- while not self.websocket_server.server_is_running:
- time.sleep(0.1)
-
- def _stop_webserver(self):
- self.websocket_server.stop()
-
- def _exit(self, exit_code=None):
- self._stop_webserver()
- if exit_code is not None:
- self.exit_code = exit_code
-
- if self.exit_code is None:
- self.exit_code = 0
-
- def stop(self):
- """Stop communication and currently running python process."""
- log.info("Stopping communication")
- self._exit()
-
- def launch(self, launch_args):
- """Prepare all required data and launch host.
-
- First is prepared websocket server as communication point for host,
- when server is ready to use host is launched as subprocess.
- """
- if platform.system().lower() == "windows":
- self._prepare_windows_plugin(launch_args)
-
- # Launch TVPaint and the websocket server.
- log.info("Launching TVPaint")
- self.websocket_server = WebSocketServer()
-
- self._create_routes()
-
- os.environ["WEBSOCKET_URL"] = "ws://localhost:{}".format(
- self.websocket_server.port
- )
-
- log.info("Added request handler for url: {}".format(
- os.environ["WEBSOCKET_URL"]
- ))
-
- self._start_webserver()
-
- # Start TVPaint when server is running
- self._launch_tv_paint(launch_args)
-
- log.info("Waiting for client connection")
- while True:
- if self.process.poll() is not None:
- log.debug("Host process is not alive. Exiting")
- self._exit(1)
- return
-
- if self.websocket_rpc.client_connected():
- log.info("Client has connected")
- break
- time.sleep(0.5)
-
- self._on_client_connect()
-
- emit_event("application.launched")
-
- def _on_client_connect(self):
- self._initial_textfile_write()
-
- def _initial_textfile_write(self):
- """Show popup about Write to file at start of TVPaint."""
- tmp_file = tempfile.NamedTemporaryFile(
- mode="w", prefix="a_tvp_", suffix=".txt", delete=False
- )
- tmp_file.close()
- tmp_filepath = tmp_file.name.replace("\\", "/")
- george_script = (
- "tv_writetextfile \"strict\" \"append\" \"{}\" \"empty\""
- ).format(tmp_filepath)
-
- result = CommunicationWrapper.execute_george(george_script)
-
- # Remote the file
- os.remove(tmp_filepath)
-
- if result is None:
- log.warning(
- "Host was probably closed before plugin was initialized."
- )
- elif result.lower() == "forbidden":
- log.warning("User didn't confirm saving files.")
-
- def _client(self):
- if not self.websocket_rpc:
- log.warning("Communicator's server did not start yet.")
- return None
-
- for client in self.websocket_rpc.clients:
- if not client.ws.closed:
- return client
- log.warning("Client is not yet connected to Communicator.")
- return None
-
- def client(self):
- if not self._connected_client or self._connected_client.ws.closed:
- self._connected_client = self._client()
- return self._connected_client
-
- def send_request(self, method, params=None):
- client = self.client()
- if not client:
- return
-
- return self.websocket_rpc.send_request(
- client, method, params
- )
-
- def send_notification(self, method, params=None):
- client = self.client()
- if not client:
- return
-
- self.websocket_rpc.send_notification(
- client, method, params
- )
-
- def execute_george(self, george_script):
- """Execute passed goerge script in TVPaint."""
- return self.send_request(
- "execute_george", [george_script]
- )
-
- def execute_george_through_file(self, george_script):
- """Execute george script with temp file.
-
- Allows to execute multiline george script without stopping websocket
- client.
-
- On windows make sure script does not contain paths with backwards
- slashes in paths, TVPaint won't execute properly in that case.
-
- Args:
- george_script (str): George script to execute. May be multilined.
- """
- temporary_file = tempfile.NamedTemporaryFile(
- mode="w", prefix="a_tvp_", suffix=".grg", delete=False
- )
- temporary_file.write(george_script)
- temporary_file.close()
- temp_file_path = temporary_file.name.replace("\\", "/")
- self.execute_george("tv_runscript {}".format(temp_file_path))
- os.remove(temp_file_path)
-
-
-class QtCommunicator(BaseCommunicator):
- label = os.getenv("AYON_MENU_LABEL") or "AYON"
- title = "{} Tools".format(label)
- menu_definitions = {
- "title": title,
- "menu_items": [
- {
- "callback": "workfiles_tool",
- "label": "Workfiles",
- "help": "Open workfiles tool"
- }, {
- "callback": "loader_tool",
- "label": "Load",
- "help": "Open loader tool"
- }, {
- "callback": "scene_inventory_tool",
- "label": "Scene inventory",
- "help": "Open scene inventory tool"
- }, {
- "callback": "publish_tool",
- "label": "Publish",
- "help": "Open publisher"
- }, {
- "callback": "library_loader_tool",
- "label": "Library",
- "help": "Open library loader tool"
- }, {
- "callback": "experimental_tools",
- "label": "Experimental tools",
- "help": "Open experimental tools dialog"
- }
- ]
- }
-
- def __init__(self, qt_app):
- super().__init__()
- self.callback_queue = collections.deque()
- self.qt_app = qt_app
-
- def _create_routes(self):
- self.websocket_rpc = QtTVPaintRpc(
- self, loop=self.websocket_server.loop
- )
- self.websocket_server.add_route(
- "*", "/", self.websocket_rpc.handle_request
- )
-
- def execute_in_main_thread(self, main_thread_item, wait=True):
- """Add `MainThreadItem` to callback queue and wait for result."""
- self.callback_queue.append(main_thread_item)
- if wait:
- return main_thread_item.wait()
- return
-
- async def async_execute_in_main_thread(self, main_thread_item, wait=True):
- """Add `MainThreadItem` to callback queue and wait for result."""
- self.callback_queue.append(main_thread_item)
- if wait:
- return await main_thread_item.async_wait()
-
- def main_thread_listen(self):
- """Get last `MainThreadItem` from queue.
-
- Must be called from main thread.
-
- Method checks if host process is still running as it may cause
- issues if not.
- """
- # check if host still running
- if self.process.poll() is not None:
- self._exit()
- return None
-
- if self.callback_queue:
- return self.callback_queue.popleft()
- return None
-
- def _on_client_connect(self):
- super()._on_client_connect()
- self._build_menu()
-
- def _build_menu(self):
- self.send_request(
- "define_menu", [self.menu_definitions]
- )
-
- def _exit(self, *args, **kwargs):
- super()._exit(*args, **kwargs)
- emit_event("application.exit")
- self.qt_app.exit(self.exit_code)
diff --git a/server_addon/tvpaint/client/ayon_tvpaint/api/launch_script.py b/server_addon/tvpaint/client/ayon_tvpaint/api/launch_script.py
deleted file mode 100644
index 1e23e95572..0000000000
--- a/server_addon/tvpaint/client/ayon_tvpaint/api/launch_script.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import os
-import sys
-import signal
-import traceback
-import ctypes
-import platform
-import logging
-
-from qtpy import QtWidgets, QtCore, QtGui
-
-from ayon_core import style
-from ayon_core.pipeline import install_host
-from ayon_tvpaint.api import (
- TVPaintHost,
- CommunicationWrapper,
-)
-
-log = logging.getLogger(__name__)
-
-
-def safe_excepthook(*args):
- traceback.print_exception(*args)
-
-
-def main(launch_args):
- # Be sure server won't crash at any moment but just print traceback
- sys.excepthook = safe_excepthook
-
- # Create QtApplication for tools
- # - QApplicaiton is also main thread/event loop of the server
- qt_app = QtWidgets.QApplication([])
-
- tvpaint_host = TVPaintHost()
- # Execute pipeline installation
- install_host(tvpaint_host)
-
- # Create Communicator object and trigger launch
- # - this must be done before anything is processed
- communicator = CommunicationWrapper.create_qt_communicator(qt_app)
- communicator.launch(launch_args)
-
- def process_in_main_thread():
- """Execution of `MainThreadItem`."""
- item = communicator.main_thread_listen()
- if item:
- item.execute()
-
- timer = QtCore.QTimer()
- timer.setInterval(100)
- timer.timeout.connect(process_in_main_thread)
- timer.start()
-
- # Register terminal signal handler
- def signal_handler(*_args):
- print("You pressed Ctrl+C. Process ended.")
- communicator.stop()
-
- signal.signal(signal.SIGINT, signal_handler)
- signal.signal(signal.SIGTERM, signal_handler)
-
- qt_app.setQuitOnLastWindowClosed(False)
- qt_app.setStyleSheet(style.load_stylesheet())
-
- # Load avalon icon
- icon_path = style.app_icon_path()
- if icon_path:
- icon = QtGui.QIcon(icon_path)
- qt_app.setWindowIcon(icon)
-
- # Set application name to be able show application icon in task bar
- if platform.system().lower() == "windows":
- ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(
- u"WebsocketServer"
- )
-
- # Run Qt application event processing
- sys.exit(qt_app.exec_())
-
-
-if __name__ == "__main__":
- args = list(sys.argv)
- if os.path.abspath(__file__) == os.path.normpath(args[0]):
- # Pop path to script
- args.pop(0)
- main(args)
diff --git a/server_addon/tvpaint/client/ayon_tvpaint/api/lib.py b/server_addon/tvpaint/client/ayon_tvpaint/api/lib.py
deleted file mode 100644
index f8b8c29cdb..0000000000
--- a/server_addon/tvpaint/client/ayon_tvpaint/api/lib.py
+++ /dev/null
@@ -1,542 +0,0 @@
-import os
-import logging
-import tempfile
-
-from .communication_server import CommunicationWrapper
-
-log = logging.getLogger(__name__)
-
-
-def execute_george(george_script, communicator=None):
- if not communicator:
- communicator = CommunicationWrapper.communicator
- return communicator.execute_george(george_script)
-
-
-def execute_george_through_file(george_script, communicator=None):
- """Execute george script with temp file.
-
- Allows to execute multiline george script without stopping websocket
- client.
-
- On windows make sure script does not contain paths with backwards
- slashes in paths, TVPaint won't execute properly in that case.
-
- Args:
- george_script (str): George script to execute. May be multilined.
- """
- if not communicator:
- communicator = CommunicationWrapper.communicator
-
- return communicator.execute_george_through_file(george_script)
-
-
-def parse_layers_data(data):
- """Parse layers data loaded in 'get_layers_data'."""
- layers = []
- layers_raw = data.split("\n")
- for layer_raw in layers_raw:
- layer_raw = layer_raw.strip()
- if not layer_raw:
- continue
- (
- layer_id, group_id, visible, position, opacity, name,
- layer_type,
- frame_start, frame_end, prelighttable, postlighttable,
- selected, editable, sencil_state, is_current
- ) = layer_raw.split("|")
- layer = {
- "layer_id": int(layer_id),
- "group_id": int(group_id),
- "visible": visible == "ON",
- "position": int(position),
- # Opacity from 'tv_layerinfo' is always set to '0' so it's unusable
- # "opacity": int(opacity),
- "name": name,
- "type": layer_type,
- "frame_start": int(frame_start),
- "frame_end": int(frame_end),
- "prelighttable": prelighttable == "1",
- "postlighttable": postlighttable == "1",
- "selected": selected == "1",
- "editable": editable == "1",
- "sencil_state": sencil_state,
- "is_current": is_current == "1"
- }
- layers.append(layer)
- return layers
-
-
-def get_layers_data_george_script(output_filepath, layer_ids=None):
- """Prepare george script which will collect all layers from workfile."""
- output_filepath = output_filepath.replace("\\", "/")
- george_script_lines = [
- # Variable containing full path to output file
- "output_path = \"{}\"".format(output_filepath),
- # Get Current Layer ID
- "tv_LayerCurrentID",
- "current_layer_id = result"
- ]
- # Script part for getting and storing layer information to temp
- layer_data_getter = (
- # Get information about layer's group
- "tv_layercolor \"get\" layer_id",
- "group_id = result",
- "tv_LayerInfo layer_id",
- (
- "PARSE result visible position opacity name"
- " type startFrame endFrame prelighttable postlighttable"
- " selected editable sencilState"
- ),
- # Check if layer ID match `tv_LayerCurrentID`
- "is_current=0",
- "IF CMP(current_layer_id, layer_id)==1",
- # - mark layer as selected if layer id match to current layer id
- "is_current=1",
- "selected=1",
- "END",
- # Prepare line with data separated by "|"
- (
- "line = layer_id'|'group_id'|'visible'|'position'|'opacity'|'"
- "name'|'type'|'startFrame'|'endFrame'|'prelighttable'|'"
- "postlighttable'|'selected'|'editable'|'sencilState'|'is_current"
- ),
- # Write data to output file
- "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line",
- )
-
- # Collect data for all layers if layers are not specified
- if layer_ids is None:
- george_script_lines.extend((
- # Layer loop variables
- "loop = 1",
- "idx = 0",
- # Layers loop
- "WHILE loop",
- "tv_LayerGetID idx",
- "layer_id = result",
- "idx = idx + 1",
- # Stop loop if layer_id is "NONE"
- "IF CMP(layer_id, \"NONE\")==1",
- "loop = 0",
- "ELSE",
- *layer_data_getter,
- "END",
- "END"
- ))
- else:
- for layer_id in layer_ids:
- george_script_lines.append("layer_id = {}".format(layer_id))
- george_script_lines.extend(layer_data_getter)
-
- return "\n".join(george_script_lines)
-
-
-def layers_data(layer_ids=None, communicator=None):
- """Backwards compatible function of 'get_layers_data'."""
- return get_layers_data(layer_ids, communicator)
-
-
-def get_layers_data(layer_ids=None, communicator=None):
- """Collect all layers information from currently opened workfile."""
- output_file = tempfile.NamedTemporaryFile(
- mode="w", prefix="a_tvp_", suffix=".txt", delete=False
- )
- output_file.close()
- if layer_ids is not None and isinstance(layer_ids, int):
- layer_ids = [layer_ids]
-
- output_filepath = output_file.name
-
- george_script = get_layers_data_george_script(output_filepath, layer_ids)
-
- execute_george_through_file(george_script, communicator)
-
- with open(output_filepath, "r") as stream:
- data = stream.read()
-
- output = parse_layers_data(data)
- os.remove(output_filepath)
- return output
-
-
-def parse_group_data(data):
- """Parse group data collected in 'get_groups_data'."""
- output = []
- groups_raw = data.split("\n")
- for group_raw in groups_raw:
- group_raw = group_raw.strip()
- if not group_raw:
- continue
-
- parts = group_raw.split("|")
- # Check for length and concatenate 2 last items until length match
- # - this happens if name contain spaces
- while len(parts) > 6:
- last_item = parts.pop(-1)
- parts[-1] = "|".join([parts[-1], last_item])
- clip_id, group_id, red, green, blue, name = parts
-
- group = {
- "group_id": int(group_id),
- "name": name,
- "clip_id": int(clip_id),
- "red": int(red),
- "green": int(green),
- "blue": int(blue),
- }
- output.append(group)
- return output
-
-
-def groups_data(communicator=None):
- """Backwards compatible function of 'get_groups_data'."""
- return get_groups_data(communicator)
-
-
-def get_groups_data(communicator=None):
- """Information about groups from current workfile."""
- output_file = tempfile.NamedTemporaryFile(
- mode="w", prefix="a_tvp_", suffix=".txt", delete=False
- )
- output_file.close()
-
- output_filepath = output_file.name.replace("\\", "/")
- george_script_lines = (
- # Variable containing full path to output file
- "output_path = \"{}\"".format(output_filepath),
- "empty = 0",
- # Loop over 26 groups which is ATM maximum possible (in 11.7)
- # - ref: https://www.tvpaint.com/forum/viewtopic.php?t=13880
- "FOR idx = 1 TO 26",
- # Receive information about groups
- "tv_layercolor \"getcolor\" 0 idx",
- "PARSE result clip_id group_index c_red c_green c_blue group_name",
- # Create and add line to output file
- "line = clip_id'|'group_index'|'c_red'|'c_green'|'c_blue'|'group_name",
- "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line",
- "END",
- )
- george_script = "\n".join(george_script_lines)
- execute_george_through_file(george_script, communicator)
-
- with open(output_filepath, "r") as stream:
- data = stream.read()
-
- output = parse_group_data(data)
- os.remove(output_filepath)
- return output
-
-
-def get_layers_pre_post_behavior(layer_ids, communicator=None):
- """Collect data about pre and post behavior of layer ids.
-
- Pre and Post behaviors is enumerator of possible values:
- - "none"
- - "repeat"
- - "pingpong"
- - "hold"
-
- Example output:
- ```json
- {
- 0: {
- "pre": "none",
- "post": "repeat"
- }
- }
- ```
-
- Returns:
- dict: Key is layer id value is dictionary with "pre" and "post" keys.
- """
- # Skip if is empty
- if not layer_ids:
- return {}
-
- # Auto convert to list
- if not isinstance(layer_ids, (list, set, tuple)):
- layer_ids = [layer_ids]
-
- # Prepare temp file
- output_file = tempfile.NamedTemporaryFile(
- mode="w", prefix="a_tvp_", suffix=".txt", delete=False
- )
- output_file.close()
-
- output_filepath = output_file.name.replace("\\", "/")
- george_script_lines = [
- # Variable containing full path to output file
- "output_path = \"{}\"".format(output_filepath),
- ]
- for layer_id in layer_ids:
- george_script_lines.extend([
- "layer_id = {}".format(layer_id),
- "tv_layerprebehavior layer_id",
- "pre_beh = result",
- "tv_layerpostbehavior layer_id",
- "post_beh = result",
- "line = layer_id'|'pre_beh'|'post_beh",
- "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line"
- ])
-
- george_script = "\n".join(george_script_lines)
- execute_george_through_file(george_script, communicator)
-
- # Read data
- with open(output_filepath, "r") as stream:
- data = stream.read()
-
- # Remove temp file
- os.remove(output_filepath)
-
- # Parse data
- output = {}
- raw_lines = data.split("\n")
- for raw_line in raw_lines:
- line = raw_line.strip()
- if not line:
- continue
- parts = line.split("|")
- if len(parts) != 3:
- continue
- layer_id, pre_beh, post_beh = parts
- output[int(layer_id)] = {
- "pre": pre_beh.lower(),
- "post": post_beh.lower()
- }
- return output
-
-
-def get_layers_exposure_frames(layer_ids, layers_data=None, communicator=None):
- """Get exposure frames.
-
- Easily said returns frames where keyframes are. Recognized with george
- function `tv_exposureinfo` returning "Head".
-
- Args:
- layer_ids (list): Ids of a layers for which exposure frames should
- look for.
- layers_data (list): Precollected layers data. If are not passed then
- 'get_layers_data' is used.
- communicator (BaseCommunicator): Communicator used for communication
- with TVPaint.
-
- Returns:
- dict: Frames where exposure is set to "Head" by layer id.
- """
-
- if layers_data is None:
- layers_data = get_layers_data(layer_ids)
- _layers_by_id = {
- layer["layer_id"]: layer
- for layer in layers_data
- }
- layers_by_id = {
- layer_id: _layers_by_id.get(layer_id)
- for layer_id in layer_ids
- }
- tmp_file = tempfile.NamedTemporaryFile(
- mode="w", prefix="a_tvp_", suffix=".txt", delete=False
- )
- tmp_file.close()
- tmp_output_path = tmp_file.name.replace("\\", "/")
- george_script_lines = [
- "output_path = \"{}\"".format(tmp_output_path)
- ]
-
- output = {}
- layer_id_mapping = {}
- for layer_id, layer_data in layers_by_id.items():
- layer_id_mapping[str(layer_id)] = layer_id
- output[layer_id] = []
- if not layer_data:
- continue
- first_frame = layer_data["frame_start"]
- last_frame = layer_data["frame_end"]
- george_script_lines.extend([
- "line = \"\"",
- "layer_id = {}".format(layer_id),
- "line = line''layer_id",
- "tv_layerset layer_id",
- "frame = {}".format(first_frame),
- "WHILE (frame <= {})".format(last_frame),
- "tv_exposureinfo frame",
- "exposure = result",
- "IF (CMP(exposure, \"Head\") == 1)",
- "line = line'|'frame",
- "END",
- "frame = frame + 1",
- "END",
- "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line"
- ])
-
- execute_george_through_file("\n".join(george_script_lines), communicator)
-
- with open(tmp_output_path, "r") as stream:
- data = stream.read()
-
- os.remove(tmp_output_path)
-
- lines = []
- for line in data.split("\n"):
- line = line.strip()
- if line:
- lines.append(line)
-
- for line in lines:
- line_items = list(line.split("|"))
- layer_id = line_items.pop(0)
- _layer_id = layer_id_mapping[layer_id]
- output[_layer_id] = [int(frame) for frame in line_items]
-
- return output
-
-
-def get_exposure_frames(
- layer_id, first_frame=None, last_frame=None, communicator=None
-):
- """Get exposure frames.
-
- Easily said returns frames where keyframes are. Recognized with george
- function `tv_exposureinfo` returning "Head".
-
- Args:
- layer_id (int): Id of a layer for which exposure frames should
- look for.
- first_frame (int): From which frame will look for exposure frames.
- Used layers first frame if not entered.
- last_frame (int): Last frame where will look for exposure frames.
- Used layers last frame if not entered.
-
- Returns:
- list: Frames where exposure is set to "Head".
- """
- if first_frame is None or last_frame is None:
- layer = layers_data(layer_id)[0]
- if first_frame is None:
- first_frame = layer["frame_start"]
- if last_frame is None:
- last_frame = layer["frame_end"]
-
- tmp_file = tempfile.NamedTemporaryFile(
- mode="w", prefix="a_tvp_", suffix=".txt", delete=False
- )
- tmp_file.close()
- tmp_output_path = tmp_file.name.replace("\\", "/")
- george_script_lines = [
- "tv_layerset {}".format(layer_id),
- "output_path = \"{}\"".format(tmp_output_path),
- "output = \"\"",
- "frame = {}".format(first_frame),
- "WHILE (frame <= {})".format(last_frame),
- "tv_exposureinfo frame",
- "exposure = result",
- "IF (CMP(exposure, \"Head\") == 1)",
- "IF (CMP(output, \"\") == 1)",
- "output = output''frame",
- "ELSE",
- "output = output'|'frame",
- "END",
- "END",
- "frame = frame + 1",
- "END",
- "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' output"
- ]
-
- execute_george_through_file("\n".join(george_script_lines), communicator)
-
- with open(tmp_output_path, "r") as stream:
- data = stream.read()
-
- os.remove(tmp_output_path)
-
- lines = []
- for line in data.split("\n"):
- line = line.strip()
- if line:
- lines.append(line)
-
- exposure_frames = []
- for line in lines:
- for frame in line.split("|"):
- exposure_frames.append(int(frame))
- return exposure_frames
-
-
-def get_scene_data(communicator=None):
- """Scene data of currently opened scene.
-
- Result contains resolution, pixel aspect, fps mark in/out with states,
- frame start and background color.
-
- Returns:
- dict: Scene data collected in many ways.
- """
- workfile_info = execute_george("tv_projectinfo", communicator)
- workfile_info_parts = workfile_info.split(" ")
-
- # Project frame start - not used
- workfile_info_parts.pop(-1)
- field_order = workfile_info_parts.pop(-1)
- frame_rate = float(workfile_info_parts.pop(-1))
- pixel_apsect = float(workfile_info_parts.pop(-1))
- height = int(workfile_info_parts.pop(-1))
- width = int(workfile_info_parts.pop(-1))
-
- # Marks return as "{frame - 1} {state} ", example "0 set".
- result = execute_george("tv_markin", communicator)
- mark_in_frame, mark_in_state, _ = result.split(" ")
-
- result = execute_george("tv_markout", communicator)
- mark_out_frame, mark_out_state, _ = result.split(" ")
-
- start_frame = execute_george("tv_startframe", communicator)
- return {
- "width": width,
- "height": height,
- "pixel_aspect": pixel_apsect,
- "fps": frame_rate,
- "field_order": field_order,
- "mark_in": int(mark_in_frame),
- "mark_in_state": mark_in_state,
- "mark_in_set": mark_in_state == "set",
- "mark_out": int(mark_out_frame),
- "mark_out_state": mark_out_state,
- "mark_out_set": mark_out_state == "set",
- "start_frame": int(start_frame),
- "bg_color": get_scene_bg_color(communicator)
- }
-
-
-def get_scene_bg_color(communicator=None):
- """Background color set on scene.
-
- Is important for review exporting where scene bg color is used as
- background.
- """
- output_file = tempfile.NamedTemporaryFile(
- mode="w", prefix="a_tvp_", suffix=".txt", delete=False
- )
- output_file.close()
- output_filepath = output_file.name.replace("\\", "/")
- george_script_lines = [
- # Variable containing full path to output file
- "output_path = \"{}\"".format(output_filepath),
- "tv_background",
- "bg_color = result",
- # Write data to output file
- "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' bg_color"
- ]
-
- george_script = "\n".join(george_script_lines)
- execute_george_through_file(george_script, communicator)
-
- with open(output_filepath, "r") as stream:
- data = stream.read()
-
- os.remove(output_filepath)
- data = data.strip()
- if not data:
- return None
- return data.split(" ")
diff --git a/server_addon/tvpaint/client/ayon_tvpaint/api/pipeline.py b/server_addon/tvpaint/client/ayon_tvpaint/api/pipeline.py
deleted file mode 100644
index 5ec6355138..0000000000
--- a/server_addon/tvpaint/client/ayon_tvpaint/api/pipeline.py
+++ /dev/null
@@ -1,518 +0,0 @@
-import os
-import json
-import tempfile
-import logging
-
-import requests
-import ayon_api
-import pyblish.api
-
-from ayon_tvpaint import TVPAINT_ROOT_DIR
-
-from ayon_core.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost
-from ayon_core.settings import get_current_project_settings
-from ayon_core.lib import register_event_callback
-from ayon_core.pipeline import (
- register_loader_plugin_path,
- register_creator_plugin_path,
- AVALON_CONTAINER_ID,
-)
-from ayon_core.pipeline.context_tools import get_global_context
-
-from .lib import (
- execute_george,
- execute_george_through_file
-)
-
-log = logging.getLogger(__name__)
-
-
-METADATA_SECTION = "avalon"
-SECTION_NAME_CONTEXT = "context"
-SECTION_NAME_CREATE_CONTEXT = "create_context"
-SECTION_NAME_INSTANCES = "instances"
-SECTION_NAME_CONTAINERS = "containers"
-# Maximum length of metadata chunk string
-# TODO find out the max (500 is safe enough)
-TVPAINT_CHUNK_LENGTH = 500
-
-"""TVPaint's Metadata
-
-Metadata are stored to TVPaint's workfile.
-
-Workfile works similar to .ini file but has few limitation. Most important
-limitation is that value under key has limited length. Due to this limitation
-each metadata section/key stores number of "subkeys" that are related to
-the section.
-
-Example:
-Metadata key `"instances"` may have stored value "2". In that case it is
-expected that there are also keys `["instances0", "instances1"]`.
-
-Workfile data looks like:
-```
-[avalon]
-instances0=[{{__dq__}id{__dq__}: {__dq__}ayon.create.instance{__dq__...
-instances1=...more data...
-instances=2
-```
-"""
-
-
-class TVPaintHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
- name = "tvpaint"
-
- def install(self):
- """Install TVPaint-specific functionality."""
-
- log.info("AYON - Installing TVPaint integration")
-
- # Create workdir folder if does not exist yet
- workdir = os.getenv("AYON_WORKDIR")
- if not os.path.exists(workdir):
- os.makedirs(workdir)
-
- plugins_dir = os.path.join(TVPAINT_ROOT_DIR, "plugins")
- publish_dir = os.path.join(plugins_dir, "publish")
- load_dir = os.path.join(plugins_dir, "load")
- create_dir = os.path.join(plugins_dir, "create")
-
- pyblish.api.register_host("tvpaint")
- pyblish.api.register_plugin_path(publish_dir)
- register_loader_plugin_path(load_dir)
- register_creator_plugin_path(create_dir)
-
- register_event_callback("application.launched", self.initial_launch)
- register_event_callback("application.exit", self.application_exit)
-
- def get_current_project_name(self):
- """
- Returns:
- Union[str, None]: Current project name.
- """
-
- return self.get_current_context().get("project_name")
-
- def get_current_folder_path(self):
- """
- Returns:
- Union[str, None]: Current folder path.
- """
-
- return self.get_current_context().get("folder_path")
-
- def get_current_task_name(self):
- """
- Returns:
- Union[str, None]: Current task name.
- """
-
- return self.get_current_context().get("task_name")
-
- def get_current_context(self):
- context = get_current_workfile_context()
- if not context:
- return get_global_context()
-
- if "project_name" in context:
- if "asset_name" in context:
- context["folder_path"] = context["asset_name"]
- return context
- # This is legacy way how context was stored
- return {
- "project_name": context.get("project"),
- "folder_path": context.get("asset"),
- "task_name": context.get("task")
- }
-
- # --- Create ---
- def get_context_data(self):
- return get_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, {})
-
- def update_context_data(self, data, changes):
- return write_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, data)
-
- def list_instances(self):
- """List all created instances from current workfile."""
- return list_instances()
-
- def write_instances(self, data):
- return write_instances(data)
-
- # --- Workfile ---
- def open_workfile(self, filepath):
- george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(
- filepath.replace("\\", "/")
- )
- return execute_george_through_file(george_script)
-
- def save_workfile(self, filepath=None):
- if not filepath:
- filepath = self.get_current_workfile()
- context = get_global_context()
- save_current_workfile_context(context)
-
- # Execute george script to save workfile.
- george_script = "tv_SaveProject {}".format(filepath.replace("\\", "/"))
- return execute_george(george_script)
-
- def work_root(self, session):
- return session["AYON_WORKDIR"]
-
- def get_current_workfile(self):
- return execute_george("tv_GetProjectName")
-
- def workfile_has_unsaved_changes(self):
- return None
-
- def get_workfile_extensions(self):
- return [".tvpp"]
-
- # --- Load ---
- def get_containers(self):
- return get_containers()
-
- def initial_launch(self):
- # Setup project settings if its the template that's launched.
- # TODO also check for template creation when it's possible to define
- # templates
- last_workfile = os.environ.get("AYON_LAST_WORKFILE")
- if not last_workfile or os.path.exists(last_workfile):
- return
-
- log.info("Setting up project...")
- global_context = get_global_context()
- project_name = global_context.get("project_name")
- folder_path = global_context.get("folder_path")
- if not project_name or not folder_path:
- return
-
- folder_entity = ayon_api.get_folder_by_path(project_name, folder_path)
-
- set_context_settings(project_name, folder_entity)
-
- def application_exit(self):
- """Logic related to TimerManager.
-
- Todo:
- This should be handled out of TVPaint integration logic.
- """
-
- data = get_current_project_settings()
- stop_timer = data["tvpaint"]["stop_timer_on_application_exit"]
-
- if not stop_timer:
- return
-
- # Stop application timer.
- webserver_url = os.environ.get("AYON_WEBSERVER_URL")
- rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url)
- requests.post(rest_api_url)
-
-
-def containerise(
- name, namespace, members, context, loader, current_containers=None
-):
- """Add new container to metadata.
-
- Args:
- name (str): Container name.
- namespace (str): Container namespace.
- members (list): List of members that were loaded and belongs
- to the container (layer names).
- current_containers (list): Preloaded containers. Should be used only
- on update/switch when containers were modified during the process.
-
- Returns:
- dict: Container data stored to workfile metadata.
- """
-
- container_data = {
- "schema": "openpype:container-2.0",
- "id": AVALON_CONTAINER_ID,
- "members": members,
- "name": name,
- "namespace": namespace,
- "loader": str(loader),
- "representation": context["representation"]["id"]
- }
- if current_containers is None:
- current_containers = get_containers()
-
- # Add container to containers list
- current_containers.append(container_data)
-
- # Store data to metadata
- write_workfile_metadata(SECTION_NAME_CONTAINERS, current_containers)
-
- return container_data
-
-
-def split_metadata_string(text, chunk_length=None):
- """Split string by length.
-
- Split text to chunks by entered length.
- Example:
- ```python
- text = "ABCDEFGHIJKLM"
- result = split_metadata_string(text, 3)
- print(result)
- >>> ['ABC', 'DEF', 'GHI', 'JKL']
- ```
-
- Args:
- text (str): Text that will be split into chunks.
- chunk_length (int): Single chunk size. Default chunk_length is
- set to global variable `TVPAINT_CHUNK_LENGTH`.
-
- Returns:
- list: List of strings with at least one item.
- """
- if chunk_length is None:
- chunk_length = TVPAINT_CHUNK_LENGTH
- chunks = []
- for idx in range(chunk_length, len(text) + chunk_length, chunk_length):
- start_idx = idx - chunk_length
- chunks.append(text[start_idx:idx])
- return chunks
-
-
-def get_workfile_metadata_string_for_keys(metadata_keys):
- """Read metadata for specific keys from current project workfile.
-
- All values from entered keys are stored to single string without separator.
-
- Function is designed to help get all values for one metadata key at once.
- So order of passed keys matteres.
-
- Args:
- metadata_keys (list, str): Metadata keys for which data should be
- retrieved. Order of keys matters! It is possible to enter only
- single key as string.
- """
- # Add ability to pass only single key
- if isinstance(metadata_keys, str):
- metadata_keys = [metadata_keys]
-
- output_file = tempfile.NamedTemporaryFile(
- mode="w", prefix="a_tvp_", suffix=".txt", delete=False
- )
- output_file.close()
- output_filepath = output_file.name.replace("\\", "/")
-
- george_script_parts = []
- george_script_parts.append(
- "output_path = \"{}\"".format(output_filepath)
- )
- # Store data for each index of metadata key
- for metadata_key in metadata_keys:
- george_script_parts.append(
- "tv_readprojectstring \"{}\" \"{}\" \"\"".format(
- METADATA_SECTION, metadata_key
- )
- )
- george_script_parts.append(
- "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' result"
- )
-
- # Execute the script
- george_script = "\n".join(george_script_parts)
- execute_george_through_file(george_script)
-
- # Load data from temp file
- with open(output_filepath, "r") as stream:
- file_content = stream.read()
-
- # Remove `\n` from content
- output_string = file_content.replace("\n", "")
-
- # Delete temp file
- os.remove(output_filepath)
-
- return output_string
-
-
-def get_workfile_metadata_string(metadata_key):
- """Read metadata for specific key from current project workfile."""
- result = get_workfile_metadata_string_for_keys([metadata_key])
- if not result:
- return None
-
- stripped_result = result.strip()
- if not stripped_result:
- return None
-
- # NOTE Backwards compatibility when metadata key did not store range of key
- # indexes but the value itself
- # NOTE We don't have to care about negative values with `isdecimal` check
- if not stripped_result.isdecimal():
- metadata_string = result
- else:
- keys = []
- for idx in range(int(stripped_result)):
- keys.append("{}{}".format(metadata_key, idx))
- metadata_string = get_workfile_metadata_string_for_keys(keys)
-
- # Replace quotes plaholders with their values
- metadata_string = (
- metadata_string
- .replace("{__sq__}", "'")
- .replace("{__dq__}", "\"")
- )
- return metadata_string
-
-
-def get_workfile_metadata(metadata_key, default=None):
- """Read and parse metadata for specific key from current project workfile.
-
- Pipeline use function to store loaded and created instances within keys
- stored in `SECTION_NAME_INSTANCES` and `SECTION_NAME_CONTAINERS`
- constants.
-
- Args:
- metadata_key (str): Key defying which key should read. It is expected
- value contain json serializable string.
- """
- if default is None:
- default = []
-
- json_string = get_workfile_metadata_string(metadata_key)
- if json_string:
- try:
- return json.loads(json_string)
- except json.decoder.JSONDecodeError:
- # TODO remove when backwards compatibility of storing metadata
- # will be removed
- print((
- "Fixed invalid metadata in workfile."
- " Not serializable string was: {}"
- ).format(json_string))
- write_workfile_metadata(metadata_key, default)
- return default
-
-
-def write_workfile_metadata(metadata_key, value):
- """Write metadata for specific key into current project workfile.
-
- George script has specific way how to work with quotes which should be
- solved automatically with this function.
-
- Args:
- metadata_key (str): Key defying under which key value will be stored.
- value (dict,list,str): Data to store they must be json serializable.
- """
- if isinstance(value, (dict, list)):
- value = json.dumps(value)
-
- if not value:
- value = ""
-
- # Handle quotes in dumped json string
- # - replace single and double quotes with placeholders
- value = (
- value
- .replace("'", "{__sq__}")
- .replace("\"", "{__dq__}")
- )
- chunks = split_metadata_string(value)
- chunks_len = len(chunks)
-
- write_template = "tv_writeprojectstring \"{}\" \"{}\" \"{}\""
- george_script_parts = []
- # Add information about chunks length to metadata key itself
- george_script_parts.append(
- write_template.format(METADATA_SECTION, metadata_key, chunks_len)
- )
- # Add chunk values to indexed metadata keys
- for idx, chunk_value in enumerate(chunks):
- sub_key = "{}{}".format(metadata_key, idx)
- george_script_parts.append(
- write_template.format(METADATA_SECTION, sub_key, chunk_value)
- )
-
- george_script = "\n".join(george_script_parts)
-
- return execute_george_through_file(george_script)
-
-
-def get_current_workfile_context():
- """Return context in which was workfile saved."""
- return get_workfile_metadata(SECTION_NAME_CONTEXT, {})
-
-
-def save_current_workfile_context(context):
- """Save context which was used to create a workfile."""
- return write_workfile_metadata(SECTION_NAME_CONTEXT, context)
-
-
-def list_instances():
- """List all created instances from current workfile."""
- return get_workfile_metadata(SECTION_NAME_INSTANCES)
-
-
-def write_instances(data):
- return write_workfile_metadata(SECTION_NAME_INSTANCES, data)
-
-
-def get_containers():
- output = get_workfile_metadata(SECTION_NAME_CONTAINERS)
- if output:
- for item in output:
- if "objectName" not in item and "members" in item:
- members = item["members"]
- if isinstance(members, list):
- members = "|".join([str(member) for member in members])
- item["objectName"] = members
- return output
-
-
-def set_context_settings(project_name, folder_entity):
- """Set workfile settings by folder entity attributes.
-
- Change fps, resolution and frame start/end.
-
- Args:
- project_name (str): Project name.
- folder_entity (dict[str, Any]): Folder entity.
-
- """
-
- if not folder_entity:
- return
-
- folder_attributes = folder_entity["attrib"]
-
- width = folder_attributes.get("resolutionWidth")
- height = folder_attributes.get("resolutionHeight")
- if width is None or height is None:
- print("Resolution was not found!")
- else:
- execute_george(
- "tv_resizepage {} {} 0".format(width, height)
- )
-
- framerate = folder_attributes.get("fps")
-
- if framerate is not None:
- execute_george(
- "tv_framerate {} \"timestretch\"".format(framerate)
- )
- else:
- print("Framerate was not found!")
-
- frame_start = folder_attributes.get("frameStart")
- frame_end = folder_attributes.get("frameEnd")
-
- if frame_start is None or frame_end is None:
- print("Frame range was not found!")
- return
-
- handle_start = folder_attributes.get("handleStart")
- handle_end = folder_attributes.get("handleEnd")
-
- # Always start from 0 Mark In and set only Mark Out
- mark_in = 0
- mark_out = mark_in + (frame_end - frame_start) + handle_start + handle_end
-
- execute_george("tv_markin {} set".format(mark_in))
- execute_george("tv_markout {} set".format(mark_out))
diff --git a/server_addon/tvpaint/client/ayon_tvpaint/api/plugin.py b/server_addon/tvpaint/client/ayon_tvpaint/api/plugin.py
deleted file mode 100644
index 9dd6ae530a..0000000000
--- a/server_addon/tvpaint/client/ayon_tvpaint/api/plugin.py
+++ /dev/null
@@ -1,205 +0,0 @@
-import re
-
-from ayon_core.pipeline import LoaderPlugin
-from ayon_core.pipeline.create import (
- CreatedInstance,
- get_product_name,
- AutoCreator,
- Creator,
-)
-from ayon_core.pipeline.create.creator_plugins import cache_and_get_instances
-
-from .lib import get_layers_data
-
-
-SHARED_DATA_KEY = "ayon.tvpaint.instances"
-
-
-class TVPaintCreatorCommon:
- @property
- def product_template_product_type(self):
- return self.product_type
-
- def _cache_and_get_instances(self):
- return cache_and_get_instances(
- self, SHARED_DATA_KEY, self.host.list_instances
- )
-
- def _collect_create_instances(self):
- instances_by_identifier = self._cache_and_get_instances()
- for instance_data in instances_by_identifier[self.identifier]:
- instance = CreatedInstance.from_existing(instance_data, self)
- self._add_instance_to_context(instance)
-
- def _update_create_instances(self, update_list):
- if not update_list:
- return
-
- cur_instances = self.host.list_instances()
- cur_instances_by_id = {}
- for instance_data in cur_instances:
- instance_id = instance_data.get("instance_id")
- if instance_id:
- cur_instances_by_id[instance_id] = instance_data
-
- for instance, changes in update_list:
- instance_data = changes.new_value
- cur_instance_data = cur_instances_by_id.get(instance.id)
- if cur_instance_data is None:
- cur_instances.append(instance_data)
- continue
- for key in set(cur_instance_data) - set(instance_data):
- cur_instance_data.pop(key)
- cur_instance_data.update(instance_data)
- self.host.write_instances(cur_instances)
-
- def _custom_get_product_name(
- self,
- project_name,
- folder_entity,
- task_entity,
- variant,
- host_name=None,
- instance=None
- ):
- dynamic_data = self.get_dynamic_data(
- project_name,
- folder_entity,
- task_entity,
- variant,
- host_name,
- instance
- )
- task_name = task_type = None
- if task_entity:
- task_name = task_entity["name"]
- task_type = task_entity["taskType"]
-
- return get_product_name(
- project_name,
- task_name,
- task_type,
- host_name,
- self.product_type,
- variant,
- dynamic_data=dynamic_data,
- project_settings=self.project_settings,
- product_type_filter=self.product_template_product_type
- )
-
-
-class TVPaintCreator(Creator, TVPaintCreatorCommon):
- settings_category = "tvpaint"
-
- def collect_instances(self):
- self._collect_create_instances()
-
- def update_instances(self, update_list):
- self._update_create_instances(update_list)
-
- def remove_instances(self, instances):
- ids_to_remove = {
- instance.id
- for instance in instances
- }
- cur_instances = self.host.list_instances()
- changed = False
- new_instances = []
- for instance_data in cur_instances:
- if instance_data.get("instance_id") in ids_to_remove:
- changed = True
- else:
- new_instances.append(instance_data)
-
- if changed:
- self.host.write_instances(new_instances)
-
- for instance in instances:
- self._remove_instance_from_context(instance)
-
- def get_dynamic_data(self, *args, **kwargs):
- # Change folder and name by current workfile context
- create_context = self.create_context
- folder_path = create_context.get_current_folder_path()
- task_name = create_context.get_current_task_name()
- output = {}
- if folder_path:
- folder_name = folder_path.rsplit("/")[-1]
- output["asset"] = folder_name
- output["folder"] = {"name": folder_name}
- if task_name:
- output["task"] = task_name
- return output
-
- def get_product_name(self, *args, **kwargs):
- return self._custom_get_product_name(*args, **kwargs)
-
- def _store_new_instance(self, new_instance):
- instances_data = self.host.list_instances()
- instances_data.append(new_instance.data_to_store())
- self.host.write_instances(instances_data)
- self._add_instance_to_context(new_instance)
-
-
-class TVPaintAutoCreator(AutoCreator, TVPaintCreatorCommon):
- settings_category = "tvpaint"
-
- def collect_instances(self):
- self._collect_create_instances()
-
- def update_instances(self, update_list):
- self._update_create_instances(update_list)
-
- def get_product_name(self, *args, **kwargs):
- return self._custom_get_product_name(*args, **kwargs)
-
-
-class Loader(LoaderPlugin):
- hosts = ["tvpaint"]
- settings_category = "tvpaint"
-
- @staticmethod
- def get_members_from_container(container):
- if "members" not in container and "objectName" in container:
- # Backwards compatibility
- layer_ids_str = container.get("objectName")
- return [
- int(layer_id) for layer_id in layer_ids_str.split("|")
- ]
- return container["members"]
-
- def get_unique_layer_name(self, namespace, name):
- """Layer name with counter as suffix.
-
- Find higher 3 digit suffix from all layer names in scene matching regex
- `{namespace}_{name}_{suffix}`. Higher 3 digit suffix is used
- as base for next number if scene does not contain layer matching regex
- `0` is used ase base.
-
- Args:
- namespace (str): Usually folder name.
- name (str): Name of loaded product.
-
- Returns:
- str: `{namespace}_{name}_{higher suffix + 1}`
- """
- layer_name_base = "{}_{}".format(namespace, name)
-
- counter_regex = re.compile(r"_(\d{3})$")
-
- higher_counter = 0
- for layer in get_layers_data():
- layer_name = layer["name"]
- if not layer_name.startswith(layer_name_base):
- continue
- number_subpart = layer_name[len(layer_name_base):]
- groups = counter_regex.findall(number_subpart)
- if len(groups) != 1:
- continue
-
- counter = int(groups[0])
- if counter > higher_counter:
- higher_counter = counter
- continue
-
- return "{}_{:0>3d}".format(layer_name_base, higher_counter + 1)
diff --git a/server_addon/tvpaint/client/ayon_tvpaint/hooks/pre_launch_args.py b/server_addon/tvpaint/client/ayon_tvpaint/hooks/pre_launch_args.py
deleted file mode 100644
index 8ee91aa0e7..0000000000
--- a/server_addon/tvpaint/client/ayon_tvpaint/hooks/pre_launch_args.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from ayon_core.lib import get_ayon_launcher_args
-from ayon_applications import PreLaunchHook, LaunchTypes
-
-
-class TvpaintPrelaunchHook(PreLaunchHook):
- """Launch arguments preparation.
-
- Hook add python executable and script path to tvpaint implementation before
- tvpaint executable and add last workfile path to launch arguments.
-
- Existence of last workfile is checked. If workfile does not exists tries
- to copy templated workfile from predefined path.
- """
- app_groups = {"tvpaint"}
- launch_types = {LaunchTypes.local}
-
- def execute(self):
- # Pop tvpaint executable
- executable_path = self.launch_context.launch_args.pop(0)
-
- # Pop rest of launch arguments - There should not be other arguments!
- remainders = []
- while self.launch_context.launch_args:
- remainders.append(self.launch_context.launch_args.pop(0))
-
- new_launch_args = get_ayon_launcher_args(
- "run", self.launch_script_path(), executable_path
- )
-
- # Append as whole list as these areguments should not be separated
- self.launch_context.launch_args.append(new_launch_args)
-
- if remainders:
- self.log.warning((
- "There are unexpected launch arguments in TVPaint launch. {}"
- ).format(str(remainders)))
- self.launch_context.launch_args.extend(remainders)
-
- def launch_script_path(self):
- from ayon_tvpaint import get_launch_script_path
-
- return get_launch_script_path()
diff --git a/server_addon/tvpaint/client/ayon_tvpaint/lib.py b/server_addon/tvpaint/client/ayon_tvpaint/lib.py
deleted file mode 100644
index 97cf8d3633..0000000000
--- a/server_addon/tvpaint/client/ayon_tvpaint/lib.py
+++ /dev/null
@@ -1,684 +0,0 @@
-import os
-import shutil
-import collections
-from PIL import Image, ImageDraw
-
-
-def backwards_id_conversion(data_by_layer_id):
- """Convert layer ids to strings from integers."""
- for key in tuple(data_by_layer_id.keys()):
- if not isinstance(key, str):
- data_by_layer_id[str(key)] = data_by_layer_id.pop(key)
-
-
-def get_frame_filename_template(frame_end, filename_prefix=None, ext=None):
- """Get file template with frame key for rendered files.
-
- This is simple template contains `{frame}{ext}` for sequential outputs
- and `single_file{ext}` for single file output. Output is rendered to
- temporary folder so filename should not matter as integrator change
- them.
- """
- frame_padding = 4
- frame_end_str_len = len(str(frame_end))
- if frame_end_str_len > frame_padding:
- frame_padding = frame_end_str_len
-
- ext = ext or ".png"
- filename_prefix = filename_prefix or ""
-
- return "{}{{frame:0>{}}}{}".format(filename_prefix, frame_padding, ext)
-
-
-def get_layer_pos_filename_template(range_end, filename_prefix=None, ext=None):
- filename_prefix = filename_prefix or ""
- new_filename_prefix = filename_prefix + "pos_{pos}."
- return get_frame_filename_template(range_end, new_filename_prefix, ext)
-
-
-def _calculate_pre_behavior_copy(
- range_start, exposure_frames, pre_beh,
- layer_frame_start, layer_frame_end,
- output_idx_by_frame_idx
-):
- """Calculate frames before first exposure frame based on pre behavior.
-
- Function may skip whole processing if first exposure frame is before
- layer's first frame. In that case pre behavior does not make sense.
-
- Args:
- range_start(int): First frame of range which should be rendered.
- exposure_frames(list): List of all exposure frames on layer.
- pre_beh(str): Pre behavior of layer (enum of 4 strings).
- layer_frame_start(int): First frame of layer.
- layer_frame_end(int): Last frame of layer.
- output_idx_by_frame_idx(dict): References to already prepared frames
- and where result will be stored.
- """
- # Check if last layer frame is after range end
- if layer_frame_start < range_start:
- return
-
- first_exposure_frame = min(exposure_frames)
- # Skip if last exposure frame is after range end
- if first_exposure_frame < range_start:
- return
-
- # Calculate frame count of layer
- frame_count = layer_frame_end - layer_frame_start + 1
-
- if pre_beh == "none":
- # Just fill all frames from last exposure frame to range end with None
- for frame_idx in range(range_start, layer_frame_start):
- output_idx_by_frame_idx[frame_idx] = None
-
- elif pre_beh == "hold":
- # Keep first frame for whole time
- for frame_idx in range(range_start, layer_frame_start):
- output_idx_by_frame_idx[frame_idx] = first_exposure_frame
-
- elif pre_beh == "repeat":
- # Loop backwards from last frame of layer
- for frame_idx in reversed(range(range_start, layer_frame_start)):
- eq_frame_idx_offset = (
- (layer_frame_end - frame_idx) % frame_count
- )
- eq_frame_idx = layer_frame_start + (
- layer_frame_end - eq_frame_idx_offset
- )
- output_idx_by_frame_idx[frame_idx] = eq_frame_idx
-
- elif pre_beh == "pingpong":
- half_seq_len = frame_count - 1
- seq_len = half_seq_len * 2
- for frame_idx in reversed(range(range_start, layer_frame_start)):
- eq_frame_idx_offset = (layer_frame_start - frame_idx) % seq_len
- if eq_frame_idx_offset > half_seq_len:
- eq_frame_idx_offset = (seq_len - eq_frame_idx_offset)
- eq_frame_idx = layer_frame_start + eq_frame_idx_offset
- output_idx_by_frame_idx[frame_idx] = eq_frame_idx
-
-
-def _calculate_post_behavior_copy(
- range_end, exposure_frames, post_beh,
- layer_frame_start, layer_frame_end,
- output_idx_by_frame_idx
-):
- """Calculate frames after last frame of layer based on post behavior.
-
- Function may skip whole processing if last layer frame is after range_end.
- In that case post behavior does not make sense.
-
- Args:
- range_end(int): Last frame of range which should be rendered.
- exposure_frames(list): List of all exposure frames on layer.
- post_beh(str): Post behavior of layer (enum of 4 strings).
- layer_frame_start(int): First frame of layer.
- layer_frame_end(int): Last frame of layer.
- output_idx_by_frame_idx(dict): References to already prepared frames
- and where result will be stored.
- """
- # Check if last layer frame is after range end
- if layer_frame_end >= range_end:
- return
-
- last_exposure_frame = max(exposure_frames)
- # Skip if last exposure frame is after range end
- # - this is probably irrelevant with layer frame end check?
- if last_exposure_frame >= range_end:
- return
-
- # Calculate frame count of layer
- frame_count = layer_frame_end - layer_frame_start + 1
-
- if post_beh == "none":
- # Just fill all frames from last exposure frame to range end with None
- for frame_idx in range(layer_frame_end + 1, range_end + 1):
- output_idx_by_frame_idx[frame_idx] = None
-
- elif post_beh == "hold":
- # Keep last exposure frame to the end
- for frame_idx in range(layer_frame_end + 1, range_end + 1):
- output_idx_by_frame_idx[frame_idx] = last_exposure_frame
-
- elif post_beh == "repeat":
- # Loop backwards from last frame of layer
- for frame_idx in range(layer_frame_end + 1, range_end + 1):
- eq_frame_idx = layer_frame_start + (frame_idx % frame_count)
- output_idx_by_frame_idx[frame_idx] = eq_frame_idx
-
- elif post_beh == "pingpong":
- half_seq_len = frame_count - 1
- seq_len = half_seq_len * 2
- for frame_idx in range(layer_frame_end + 1, range_end + 1):
- eq_frame_idx_offset = (frame_idx - layer_frame_end) % seq_len
- if eq_frame_idx_offset > half_seq_len:
- eq_frame_idx_offset = seq_len - eq_frame_idx_offset
- eq_frame_idx = layer_frame_end - eq_frame_idx_offset
- output_idx_by_frame_idx[frame_idx] = eq_frame_idx
-
-
-def _calculate_in_range_frames(
- range_start, range_end,
- exposure_frames, layer_frame_end,
- output_idx_by_frame_idx
-):
- """Calculate frame references in defined range.
-
- Function may skip whole processing if last layer frame is after range_end.
- In that case post behavior does not make sense.
-
- Args:
- range_start(int): First frame of range which should be rendered.
- range_end(int): Last frame of range which should be rendered.
- exposure_frames(list): List of all exposure frames on layer.
- layer_frame_end(int): Last frame of layer.
- output_idx_by_frame_idx(dict): References to already prepared frames
- and where result will be stored.
- """
- # Calculate in range frames
- in_range_frames = []
- for frame_idx in exposure_frames:
- if range_start <= frame_idx <= range_end:
- output_idx_by_frame_idx[frame_idx] = frame_idx
- in_range_frames.append(frame_idx)
-
- if in_range_frames:
- first_in_range_frame = min(in_range_frames)
- # Calculate frames from first exposure frames to range end or last
- # frame of layer (post behavior should be calculated since that time)
- previous_exposure = first_in_range_frame
- for frame_idx in range(first_in_range_frame, range_end + 1):
- if frame_idx > layer_frame_end:
- break
-
- if frame_idx in exposure_frames:
- previous_exposure = frame_idx
- else:
- output_idx_by_frame_idx[frame_idx] = previous_exposure
-
- # There can be frames before first exposure frame in range
- # First check if we don't alreade have first range frame filled
- if range_start in output_idx_by_frame_idx:
- return
-
- first_exposure_frame = max(exposure_frames)
- last_exposure_frame = max(exposure_frames)
- # Check if is first exposure frame smaller than defined range
- # if not then skip
- if first_exposure_frame >= range_start:
- return
-
- # Check is if last exposure frame is also before range start
- # in that case we can't use fill frames before out range
- if last_exposure_frame < range_start:
- return
-
- closest_exposure_frame = first_exposure_frame
- for frame_idx in exposure_frames:
- if frame_idx >= range_start:
- break
- if frame_idx > closest_exposure_frame:
- closest_exposure_frame = frame_idx
-
- output_idx_by_frame_idx[closest_exposure_frame] = closest_exposure_frame
- for frame_idx in range(range_start, range_end + 1):
- if frame_idx in output_idx_by_frame_idx:
- break
- output_idx_by_frame_idx[frame_idx] = closest_exposure_frame
-
-
-def _cleanup_frame_references(output_idx_by_frame_idx):
- """Cleanup frame references to frame reference.
-
- Cleanup not direct references to rendered frame.
- ```
- // Example input
- {
- 1: 1,
- 2: 1,
- 3: 2
- }
- // Result
- {
- 1: 1,
- 2: 1,
- 3: 1 // Changed reference to final rendered frame
- }
- ```
- Result is dictionary where keys leads to frame that should be rendered.
- """
- for frame_idx in tuple(output_idx_by_frame_idx.keys()):
- reference_idx = output_idx_by_frame_idx[frame_idx]
- # Skip transparent frames
- if reference_idx is None or reference_idx == frame_idx:
- continue
-
- real_reference_idx = reference_idx
- _tmp_reference_idx = reference_idx
- while True:
- _temp = output_idx_by_frame_idx[_tmp_reference_idx]
- if _temp == _tmp_reference_idx:
- real_reference_idx = _tmp_reference_idx
- break
- _tmp_reference_idx = _temp
-
- if real_reference_idx != reference_idx:
- output_idx_by_frame_idx[frame_idx] = real_reference_idx
-
-
-def _cleanup_out_range_frames(output_idx_by_frame_idx, range_start, range_end):
- """Cleanup frame references to frames out of passed range.
-
- First available frame in range is used
- ```
- // Example input. Range 2-3
- {
- 1: 1,
- 2: 1,
- 3: 1
- }
- // Result
- {
- 2: 2, // Redirect to self as is first that reference out range
- 3: 2 // Redirect to first redirected frame
- }
- ```
- Result is dictionary where keys leads to frame that should be rendered.
- """
- in_range_frames_by_out_frames = collections.defaultdict(set)
- out_range_frames = set()
- for frame_idx in tuple(output_idx_by_frame_idx.keys()):
- # Skip frames that are already out of range
- if frame_idx < range_start or frame_idx > range_end:
- out_range_frames.add(frame_idx)
- continue
-
- reference_idx = output_idx_by_frame_idx[frame_idx]
- # Skip transparent frames
- if reference_idx is None:
- continue
-
- # Skip references in range
- if reference_idx < range_start or reference_idx > range_end:
- in_range_frames_by_out_frames[reference_idx].add(frame_idx)
-
- for reference_idx in tuple(in_range_frames_by_out_frames.keys()):
- frame_indexes = in_range_frames_by_out_frames.pop(reference_idx)
- new_reference = None
- for frame_idx in frame_indexes:
- if new_reference is None:
- new_reference = frame_idx
- output_idx_by_frame_idx[frame_idx] = new_reference
-
- # Finally remove out of range frames
- for frame_idx in out_range_frames:
- output_idx_by_frame_idx.pop(frame_idx)
-
-
-def calculate_layer_frame_references(
- range_start, range_end,
- layer_frame_start,
- layer_frame_end,
- exposure_frames,
- pre_beh, post_beh
-):
- """Calculate frame references for one layer based on it's data.
-
- Output is dictionary where key is frame index referencing to rendered frame
- index. If frame index should be rendered then is referencing to self.
-
- ```
- // Example output
- {
- 1: 1, // Reference to self - will be rendered
- 2: 1, // Reference to frame 1 - will be copied
- 3: 1, // Reference to frame 1 - will be copied
- 4: 4, // Reference to self - will be rendered
- ...
- 20: 4 // Reference to frame 4 - will be copied
- 21: None // Has reference to None - transparent image
- }
- ```
-
- Args:
- range_start(int): First frame of range which should be rendered.
- range_end(int): Last frame of range which should be rendered.
- layer_frame_start(int)L First frame of layer.
- layer_frame_end(int): Last frame of layer.
- exposure_frames(list): List of all exposure frames on layer.
- pre_beh(str): Pre behavior of layer (enum of 4 strings).
- post_beh(str): Post behavior of layer (enum of 4 strings).
- """
- # Output variable
- output_idx_by_frame_idx = {}
- # Skip if layer does not have any exposure frames
- if not exposure_frames:
- return output_idx_by_frame_idx
-
- # First calculate in range frames
- _calculate_in_range_frames(
- range_start, range_end,
- exposure_frames, layer_frame_end,
- output_idx_by_frame_idx
- )
- # Calculate frames by pre behavior of layer
- _calculate_pre_behavior_copy(
- range_start, exposure_frames, pre_beh,
- layer_frame_start, layer_frame_end,
- output_idx_by_frame_idx
- )
- # Calculate frames by post behavior of layer
- _calculate_post_behavior_copy(
- range_end, exposure_frames, post_beh,
- layer_frame_start, layer_frame_end,
- output_idx_by_frame_idx
- )
- # Cleanup of referenced frames
- _cleanup_frame_references(output_idx_by_frame_idx)
-
- # Remove frames out of range
- _cleanup_out_range_frames(output_idx_by_frame_idx, range_start, range_end)
-
- return output_idx_by_frame_idx
-
-
-def calculate_layers_extraction_data(
- layers_data,
- exposure_frames_by_layer_id,
- behavior_by_layer_id,
- range_start,
- range_end,
- skip_not_visible=True,
- filename_prefix=None,
- ext=None
-):
- """Calculate extraction data for passed layers data.
-
- ```
- {
- : {
- "frame_references": {...},
- "filenames_by_frame_index": {...}
- },
- ...
- }
- ```
-
- Frame references contains frame index reference to rendered frame index.
-
- Filename by frame index represents filename under which should be frame
- stored. Directory is not handled here because each usage may need different
- approach.
-
- Args:
- layers_data(list): Layers data loaded from TVPaint.
- exposure_frames_by_layer_id(dict): Exposure frames of layers stored by
- layer id.
- behavior_by_layer_id(dict): Pre and Post behavior of layers stored by
- layer id.
- range_start(int): First frame of rendered range.
- range_end(int): Last frame of rendered range.
- skip_not_visible(bool): Skip calculations for hidden layers (Skipped
- by default).
- filename_prefix(str): Prefix before filename.
- ext(str): Extension which filenames will have ('.png' is default).
-
- Returns:
- dict: Prepared data for rendering by layer position.
- """
- # Make sure layer ids are strings
- # backwards compatibility when layer ids were integers
- backwards_id_conversion(exposure_frames_by_layer_id)
- backwards_id_conversion(behavior_by_layer_id)
-
- layer_template = get_layer_pos_filename_template(
- range_end, filename_prefix, ext
- )
- output = {}
- for layer_data in layers_data:
- if skip_not_visible and not layer_data["visible"]:
- continue
-
- orig_layer_id = layer_data["layer_id"]
- layer_id = str(orig_layer_id)
-
- # Skip if does not have any exposure frames (empty layer)
- exposure_frames = exposure_frames_by_layer_id[layer_id]
- if not exposure_frames:
- continue
-
- layer_position = layer_data["position"]
- layer_frame_start = layer_data["frame_start"]
- layer_frame_end = layer_data["frame_end"]
-
- layer_behavior = behavior_by_layer_id[layer_id]
-
- pre_behavior = layer_behavior["pre"]
- post_behavior = layer_behavior["post"]
-
- frame_references = calculate_layer_frame_references(
- range_start, range_end,
- layer_frame_start,
- layer_frame_end,
- exposure_frames,
- pre_behavior, post_behavior
- )
- # All values in 'frame_references' reference to a frame that must be
- # rendered out
- frames_to_render = set(frame_references.values())
- # Remove 'None' reference (transparent image)
- if None in frames_to_render:
- frames_to_render.remove(None)
-
- # Skip layer if has nothing to render
- if not frames_to_render:
- continue
-
- # All filenames that should be as output (not final output)
- filename_frames = (
- set(range(range_start, range_end + 1))
- | frames_to_render
- )
- filenames_by_frame_index = {}
- for frame_idx in filename_frames:
- filenames_by_frame_index[frame_idx] = layer_template.format(
- pos=layer_position,
- frame=frame_idx
- )
-
- # Store objects under the layer id
- output[orig_layer_id] = {
- "frame_references": frame_references,
- "filenames_by_frame_index": filenames_by_frame_index
- }
- return output
-
-
-def create_transparent_image_from_source(src_filepath, dst_filepath):
- """Create transparent image of same type and size as source image."""
- img_obj = Image.open(src_filepath)
- painter = ImageDraw.Draw(img_obj)
- painter.rectangle((0, 0, *img_obj.size), fill=(0, 0, 0, 0))
- img_obj.save(dst_filepath)
-
-
-def fill_reference_frames(frame_references, filepaths_by_frame):
- # Store path to first transparent image if there is any
- for frame_idx, ref_idx in frame_references.items():
- # Frame referencing to self should be rendered and used as source
- # and reference indexes with None can't be filled
- if ref_idx is None or frame_idx == ref_idx:
- continue
-
- # Get destination filepath
- src_filepath = filepaths_by_frame[ref_idx]
- dst_filepath = filepaths_by_frame[frame_idx]
-
- if hasattr(os, "link"):
- os.link(src_filepath, dst_filepath)
- else:
- shutil.copy(src_filepath, dst_filepath)
-
-
-def copy_render_file(src_path, dst_path):
- """Create copy file of an image."""
- if hasattr(os, "link"):
- os.link(src_path, dst_path)
- else:
- shutil.copy(src_path, dst_path)
-
-
-def cleanup_rendered_layers(filepaths_by_layer_id):
- """Delete all files for each individual layer files after compositing."""
- # Collect all filepaths from data
- all_filepaths = []
- for filepaths_by_frame in filepaths_by_layer_id.values():
- all_filepaths.extend(filepaths_by_frame.values())
-
- # Loop over loop
- for filepath in set(all_filepaths):
- if filepath is not None and os.path.exists(filepath):
- os.remove(filepath)
-
-
-def composite_rendered_layers(
- layers_data, filepaths_by_layer_id,
- range_start, range_end,
- dst_filepaths_by_frame, cleanup=True
-):
- """Composite multiple rendered layers by their position.
-
- Result is single frame sequence with transparency matching content
- created in TVPaint. Missing source filepaths are replaced with transparent
- images but at least one image must be rendered and exist.
-
- Function can be used even if single layer was created to fill transparent
- filepaths.
-
- Args:
- layers_data(list): Layers data loaded from TVPaint.
- filepaths_by_layer_id(dict): Rendered filepaths stored by frame index
- per layer id. Used as source for compositing.
- range_start(int): First frame of rendered range.
- range_end(int): Last frame of rendered range.
- dst_filepaths_by_frame(dict): Output filepaths by frame where final
- image after compositing will be stored. Path must not clash with
- source filepaths.
- cleanup(bool): Remove all source filepaths when done with compositing.
- """
- # Prepare layers by their position
- # - position tells in which order will compositing happen
- layer_ids_by_position = {}
- for layer in layers_data:
- layer_position = layer["position"]
- layer_ids_by_position[layer_position] = layer["layer_id"]
-
- # Sort layer positions
- sorted_positions = tuple(reversed(sorted(layer_ids_by_position.keys())))
- # Prepare variable where filepaths without any rendered content
- # - transparent will be created
- transparent_filepaths = set()
- # Store first final filepath
- first_dst_filepath = None
- for frame_idx in range(range_start, range_end + 1):
- dst_filepath = dst_filepaths_by_frame[frame_idx]
- src_filepaths = []
- for layer_position in sorted_positions:
- layer_id = layer_ids_by_position[layer_position]
- filepaths_by_frame = filepaths_by_layer_id[layer_id]
- src_filepath = filepaths_by_frame.get(frame_idx)
- if src_filepath is not None:
- src_filepaths.append(src_filepath)
-
- if not src_filepaths:
- transparent_filepaths.add(dst_filepath)
- continue
-
- # Store first destination filepath to be used for transparent images
- if first_dst_filepath is None:
- first_dst_filepath = dst_filepath
-
- if len(src_filepaths) == 1:
- src_filepath = src_filepaths[0]
- if cleanup:
- os.rename(src_filepath, dst_filepath)
- else:
- copy_render_file(src_filepath, dst_filepath)
-
- else:
- composite_images(src_filepaths, dst_filepath)
-
- # Store first transparent filepath to be able copy it
- transparent_filepath = None
- for dst_filepath in transparent_filepaths:
- if transparent_filepath is None:
- create_transparent_image_from_source(
- first_dst_filepath, dst_filepath
- )
- transparent_filepath = dst_filepath
- else:
- copy_render_file(transparent_filepath, dst_filepath)
-
- # Remove all files that were used as source for compositing
- if cleanup:
- cleanup_rendered_layers(filepaths_by_layer_id)
-
-
-def composite_images(input_image_paths, output_filepath):
- """Composite images in order from passed list.
-
- Raises:
- ValueError: When entered list is empty.
- """
- if not input_image_paths:
- raise ValueError("Nothing to composite.")
-
- img_obj = None
- for image_filepath in input_image_paths:
- _img_obj = Image.open(image_filepath)
- if img_obj is None:
- img_obj = _img_obj
- else:
- img_obj.alpha_composite(_img_obj)
- img_obj.save(output_filepath)
-
-
-def rename_filepaths_by_frame_start(
- filepaths_by_frame, range_start, range_end, new_frame_start
-):
- """Change frames in filenames of finished images to new frame start."""
-
- # Calculate frame end
- new_frame_end = range_end + (new_frame_start - range_start)
- # Create filename template
- filename_template = get_frame_filename_template(
- max(range_end, new_frame_end)
- )
-
- # Use different ranges based on Mark In and output Frame Start values
- # - this is to make sure that filename renaming won't affect files that
- # are not renamed yet
- if range_start < new_frame_start:
- source_range = range(range_end, range_start - 1, -1)
- output_range = range(new_frame_end, new_frame_start - 1, -1)
- else:
- # This is less possible situation as frame start will be in most
- # cases higher than Mark In.
- source_range = range(range_start, range_end + 1)
- output_range = range(new_frame_start, new_frame_end + 1)
-
- # Skip if source first frame is same as destination first frame
- new_dst_filepaths = {}
- for src_frame, dst_frame in zip(source_range, output_range):
- src_filepath = os.path.normpath(filepaths_by_frame[src_frame])
- dirpath, src_filename = os.path.split(src_filepath)
- dst_filename = filename_template.format(frame=dst_frame)
- dst_filepath = os.path.join(dirpath, dst_filename)
-
- if src_filename != dst_filename:
- os.rename(src_filepath, dst_filepath)
-
- new_dst_filepaths[dst_frame] = dst_filepath
-
- return new_dst_filepaths
diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/convert_legacy.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/convert_legacy.py
deleted file mode 100644
index e79a6565e8..0000000000
--- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/convert_legacy.py
+++ /dev/null
@@ -1,150 +0,0 @@
-import collections
-
-from ayon_core.pipeline.create.creator_plugins import (
- ProductConvertorPlugin,
- cache_and_get_instances,
-)
-from ayon_tvpaint.api.plugin import SHARED_DATA_KEY
-from ayon_tvpaint.api.lib import get_groups_data
-
-
-class TVPaintLegacyConverted(ProductConvertorPlugin):
- """Conversion of legacy instances in scene to new creators.
-
- This convertor handles only instances created by core creators.
-
- All instances that would be created using auto-creators are removed as at
- the moment of finding them would there already be existing instances.
- """
-
- identifier = "tvpaint.legacy.converter"
-
- def find_instances(self):
- instances_by_identifier = cache_and_get_instances(
- self, SHARED_DATA_KEY, self.host.list_instances
- )
- if instances_by_identifier[None]:
- self.add_convertor_item("Convert legacy instances")
-
- def convert(self):
- current_instances = self.host.list_instances()
- to_convert = collections.defaultdict(list)
- converted = False
- for instance in current_instances:
- if instance.get("creator_identifier") is not None:
- continue
- converted = True
-
- family = instance.get("family")
- if family in (
- "renderLayer",
- "renderPass",
- "renderScene",
- "review",
- "workfile",
- ):
- to_convert[family].append(instance)
- else:
- instance["keep"] = False
-
- # Skip if nothing was changed
- if not converted:
- self.remove_convertor_item()
- return
-
- self._convert_render_layers(
- to_convert["renderLayer"], current_instances)
- self._convert_render_passes(
- to_convert["renderPass"], current_instances)
- self._convert_render_scenes(
- to_convert["renderScene"], current_instances)
- self._convert_workfiles(
- to_convert["workfile"], current_instances)
- self._convert_reviews(
- to_convert["review"], current_instances)
-
- new_instances = [
- instance
- for instance in current_instances
- if instance.get("keep") is not False
- ]
- self.host.write_instances(new_instances)
- # remove legacy item if all is fine
- self.remove_convertor_item()
-
- def _convert_render_layers(self, render_layers, current_instances):
- if not render_layers:
- return
-
- # Look for possible existing render layers in scene
- render_layers_by_group_id = {}
- for instance in current_instances:
- if instance.get("creator_identifier") == "render.layer":
- group_id = instance["creator_identifier"]["group_id"]
- render_layers_by_group_id[group_id] = instance
-
- groups_by_id = {
- group["group_id"]: group
- for group in get_groups_data()
- }
- for render_layer in render_layers:
- group_id = render_layer.pop("group_id")
- # Just remove legacy instance if group is already occupied
- if group_id in render_layers_by_group_id:
- render_layer["keep"] = False
- continue
- # Add identifier
- render_layer["creator_identifier"] = "render.layer"
- # Change 'uuid' to 'instance_id'
- render_layer["instance_id"] = render_layer.pop("uuid")
- # Fill creator attributes
- render_layer["creator_attributes"] = {
- "group_id": group_id
- }
- render_layer["productType"] = "render"
- group = groups_by_id[group_id]
- # Use group name for variant
- group["variant"] = group["name"]
-
- def _convert_render_passes(self, render_passes, current_instances):
- if not render_passes:
- return
-
- # Render passes must have available render layers so we look for render
- # layers first
- # - '_convert_render_layers' must be called before this method
- render_layers_by_group_id = {}
- for instance in current_instances:
- if instance.get("creator_identifier") == "render.layer":
- group_id = instance["creator_attributes"]["group_id"]
- render_layers_by_group_id[group_id] = instance
-
- for render_pass in render_passes:
- group_id = render_pass.pop("group_id")
- render_layer = render_layers_by_group_id.get(group_id)
- if not render_layer:
- render_pass["keep"] = False
- continue
-
- render_pass["creator_identifier"] = "render.pass"
- render_pass["instance_id"] = render_pass.pop("uuid")
- render_pass["productType"] = "render"
-
- render_pass["creator_attributes"] = {
- "render_layer_instance_id": render_layer["instance_id"]
- }
- render_pass["variant"] = render_pass.pop("pass")
- render_pass.pop("renderlayer")
-
- # Rest of instances are just marked for deletion
- def _convert_render_scenes(self, render_scenes, current_instances):
- for render_scene in render_scenes:
- render_scene["keep"] = False
-
- def _convert_workfiles(self, workfiles, current_instances):
- for render_scene in workfiles:
- render_scene["keep"] = False
-
- def _convert_reviews(self, reviews, current_instances):
- for render_scene in reviews:
- render_scene["keep"] = False
diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/create_render.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/create_render.py
deleted file mode 100644
index 2286a4417a..0000000000
--- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/create_render.py
+++ /dev/null
@@ -1,1208 +0,0 @@
-"""Render Layer and Passes creators.
-
-Render layer is main part which is represented by group in TVPaint. All TVPaint
-layers marked with that group color are part of the render layer. To be more
-specific about some parts of layer it is possible to create sub-sets of layer
-which are named passes. Render pass consist of layers in same color group as
-render layer but define more specific part.
-
-For example render layer could be 'Bob' which consist of 5 TVPaint layers.
-- Bob has 'head' which consist of 2 TVPaint layers -> Render pass 'head'
-- Bob has 'body' which consist of 1 TVPaint layer -> Render pass 'body'
-- Bob has 'arm' which consist of 1 TVPaint layer -> Render pass 'arm'
-- Last layer does not belong to render pass at all
-
-Bob will be rendered as 'beauty' of bob (all visible layers in group).
-His head will be rendered too but without any other parts. The same for body
-and arm.
-
-What is this good for? Compositing has more power how the renders are used.
-Can do transforms on each render pass without need to modify a re-render them
-using TVPaint.
-
-The workflow may hit issues when there are used other blending modes than
-default 'color' blend more. In that case it is not recommended to use this
-workflow at all as other blend modes may affect all layers in clip which can't
-be done.
-
-There is special case for simple publishing of scene which is called
-'render.scene'. That will use all visible layers and render them as one big
-sequence.
-
-Todos:
- Add option to extract marked layers and passes as json output format for
- AfterEffects.
-"""
-
-import collections
-from typing import Any, Optional, Union
-
-import ayon_api
-
-from ayon_core.lib import (
- prepare_template_data,
- AbstractAttrDef,
- UILabelDef,
- UISeparatorDef,
- EnumDef,
- TextDef,
- BoolDef,
-)
-from ayon_core.pipeline.create import (
- CreatedInstance,
- CreatorError,
-)
-from ayon_tvpaint.api.plugin import (
- TVPaintCreator,
- TVPaintAutoCreator,
-)
-from ayon_tvpaint.api.lib import (
- get_layers_data,
- get_groups_data,
- execute_george_through_file,
-)
-
-RENDER_LAYER_DETAILED_DESCRIPTIONS = (
- """Render Layer is "a group of TVPaint layers"
-
-Be aware Render Layer is not TVPaint layer.
-
-All TVPaint layers in the scene with the color group id are rendered in the
-beauty pass. To create sub passes use Render Pass creator which is
-dependent on existence of render layer instance.
-
-The group can represent an asset (tree) or different part of scene that consist
-of one or more TVPaint layers that can be used as single item during
-compositing (for example).
-
-In some cases may be needed to have sub parts of the layer. For example 'Bob'
-could be Render Layer which has 'Arm', 'Head' and 'Body' as Render Passes.
-"""
-)
-
-
-RENDER_PASS_DETAILED_DESCRIPTIONS = (
- """Render Pass is sub part of Render Layer.
-
-Render Pass can consist of one or more TVPaint layers. Render Pass must
-belong to a Render Layer. Marked TVPaint layers will change it's group color
-to match group color of Render Layer.
-"""
-)
-
-
-AUTODETECT_RENDER_DETAILED_DESCRIPTION = (
- """Semi-automated Render Layer and Render Pass creation.
-
-Based on information in TVPaint scene will be created Render Layers and Render
-Passes. All color groups used in scene will be used for Render Layer creation.
-Name of the group is used as a variant.
-
-All TVPaint layers under the color group will be created as Render Pass where
-layer name is used as variant.
-
-The plugin will use all used color groups and layers, or can skip those that
-are not visible.
-
-There is option to auto-rename color groups before Render Layer creation. That
-is based on settings template where is filled index of used group from bottom
-to top.
-"""
-)
-
-class CreateRenderlayer(TVPaintCreator):
- """Mark layer group as Render layer instance.
-
- All TVPaint layers in the scene with the color group id are rendered in the
- beauty pass. To create sub passes use Render Layer creator which is
- dependent on existence of render layer instance.
- """
-
- label = "Render Layer"
- product_type = "render"
- product_template_product_type = "renderLayer"
- identifier = "render.layer"
- icon = "fa5.images"
-
- # George script to change color group
- rename_script_template = (
- "tv_layercolor \"setcolor\""
- " {clip_id} {group_id} {r} {g} {b} \"{name}\""
- )
- # Order to be executed before Render Pass creator
- order = 90
- description = "Mark TVPaint color group as one Render Layer."
- detailed_description = RENDER_LAYER_DETAILED_DESCRIPTIONS
-
- # Settings
- # - Default render pass name for beauty
- default_pass_name = "beauty"
- # - Mark by default instance for review
- mark_for_review = True
-
- def apply_settings(self, project_settings):
- plugin_settings = (
- project_settings["tvpaint"]["create"]["create_render_layer"]
- )
- self.default_variant = plugin_settings["default_variant"]
- self.default_variants = plugin_settings["default_variants"]
- self.default_pass_name = plugin_settings["default_pass_name"]
- self.mark_for_review = plugin_settings["mark_for_review"]
-
- def get_dynamic_data(
- self,
- project_name,
- folder_entity,
- task_entity,
- variant,
- host_name,
- instance
- ):
- dynamic_data = super().get_dynamic_data(
- project_name,
- folder_entity,
- task_entity,
- variant,
- host_name,
- instance
- )
- dynamic_data["renderpass"] = self.default_pass_name
- dynamic_data["renderlayer"] = variant
- return dynamic_data
-
- def _get_selected_group_ids(self):
- return {
- layer["group_id"]
- for layer in get_layers_data()
- if layer["selected"]
- }
-
- def create(self, product_name, instance_data, pre_create_data):
- self.log.debug("Query data from workfile.")
-
- group_name = instance_data["variant"]
- group_id = pre_create_data.get("group_id")
- # This creator should run only on one group
- if group_id is None or group_id == -1:
- selected_groups = self._get_selected_group_ids()
- selected_groups.discard(0)
- if len(selected_groups) > 1:
- raise CreatorError("You have selected more than one group")
-
- if len(selected_groups) == 0:
- raise CreatorError("You don't have selected any group")
- group_id = tuple(selected_groups)[0]
-
- self.log.debug("Querying groups data from workfile.")
- groups_data = get_groups_data()
- group_item = None
- for group_data in groups_data:
- if group_data["group_id"] == group_id:
- group_item = group_data
-
- for instance in self.create_context.instances:
- if (
- instance.creator_identifier == self.identifier
- and instance["creator_attributes"]["group_id"] == group_id
- ):
- raise CreatorError((
- f"Group \"{group_item.get('name')}\" is already used"
- f" by another render layer \"{instance['productName']}\""
- ))
-
- self.log.debug(f"Selected group id is \"{group_id}\".")
- if "creator_attributes" not in instance_data:
- instance_data["creator_attributes"] = {}
- creator_attributes = instance_data["creator_attributes"]
- mark_for_review = pre_create_data.get("mark_for_review")
- if mark_for_review is None:
- mark_for_review = self.mark_for_review
- creator_attributes["group_id"] = group_id
- creator_attributes["mark_for_review"] = mark_for_review
-
- self.log.info(f"Product name is {product_name}")
- new_instance = CreatedInstance(
- self.product_type,
- product_name,
- instance_data,
- self
- )
- self._store_new_instance(new_instance)
-
- if not group_id or group_item["name"] == group_name:
- return new_instance
-
- self.log.debug("Changing name of the group.")
- # Rename TVPaint group (keep color same)
- # - groups can't contain spaces
- rename_script = self.rename_script_template.format(
- clip_id=group_item["clip_id"],
- group_id=group_item["group_id"],
- r=group_item["red"],
- g=group_item["green"],
- b=group_item["blue"],
- name=group_name
- )
- execute_george_through_file(rename_script)
-
- self.log.info((
- f"Name of group with index {group_id}"
- f" was changed to \"{group_name}\"."
- ))
- return new_instance
-
- def _get_groups_enum(self):
- groups_enum = []
- empty_groups = []
- for group in get_groups_data():
- group_name = group["name"]
- item = {
- "label": group_name,
- "value": group["group_id"]
- }
- # TVPaint have defined how many color groups is available, but
- # the count is not consistent across versions. It is not possible
- # to know how many groups there is.
- #
- if group_name and group_name != "0":
- if empty_groups:
- groups_enum.extend(empty_groups)
- empty_groups = []
- groups_enum.append(item)
- else:
- empty_groups.append(item)
- return groups_enum
-
- def get_pre_create_attr_defs(self):
- groups_enum = self._get_groups_enum()
- groups_enum.insert(0, {"label": "