diff --git a/client/ayon_core/addon/base.py b/client/ayon_core/addon/base.py
index 3d028dba07..21b1193b07 100644
--- a/client/ayon_core/addon/base.py
+++ b/client/ayon_core/addon/base.py
@@ -50,7 +50,7 @@ IGNORED_MODULES_IN_AYON = set()
# When addon was moved from ayon-core codebase
# - this is used to log the missing addon
MOVED_ADDON_MILESTONE_VERSIONS = {
- "applications": VersionInfo(2, 0, 0),
+ "applications": VersionInfo(0, 2, 0),
}
# Inherit from `object` for Python 2 hosts
diff --git a/client/ayon_core/hosts/aftereffects/api/pipeline.py b/client/ayon_core/hosts/aftereffects/api/pipeline.py
index 105fee64b9..2239040f09 100644
--- a/client/ayon_core/hosts/aftereffects/api/pipeline.py
+++ b/client/ayon_core/hosts/aftereffects/api/pipeline.py
@@ -8,14 +8,11 @@ from ayon_core.lib import Logger, register_event_callback
from ayon_core.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
+ register_workfile_build_plugin_path,
AVALON_CONTAINER_ID,
AVALON_INSTANCE_ID,
AYON_INSTANCE_ID,
)
-from ayon_core.hosts.aftereffects.api.workfile_template_builder import (
- AEPlaceholderLoadPlugin,
- AEPlaceholderCreatePlugin
-)
from ayon_core.pipeline.load import any_outdated_containers
import ayon_core.hosts.aftereffects
@@ -40,6 +37,7 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
+WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build")
class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
@@ -76,6 +74,7 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
+ register_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
register_event_callback("application.launched", application_launch)
@@ -118,12 +117,6 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
item["id"] = "publish_context"
self.stub.imprint(item["id"], item)
- def get_workfile_build_placeholder_plugins(self):
- return [
- AEPlaceholderLoadPlugin,
- AEPlaceholderCreatePlugin
- ]
-
# created instances section
def list_instances(self):
"""List all created instances from current workfile which
diff --git a/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py b/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py
index aa2f36e8aa..99d5bbb938 100644
--- a/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py
+++ b/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py
@@ -1,6 +1,7 @@
import os.path
import uuid
import shutil
+from abc import abstractmethod
from ayon_core.pipeline import registered_host
from ayon_core.tools.workfile_template_build import (
@@ -9,13 +10,9 @@ from ayon_core.tools.workfile_template_build import (
from ayon_core.pipeline.workfile.workfile_template_builder import (
AbstractTemplateBuilder,
PlaceholderPlugin,
- LoadPlaceholderItem,
- CreatePlaceholderItem,
- PlaceholderLoadMixin,
- PlaceholderCreateMixin
+ PlaceholderItem
)
from ayon_core.hosts.aftereffects.api import get_stub
-from ayon_core.hosts.aftereffects.api.lib import set_settings
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
PLACEHOLDER_ID = "openpype.placeholder"
@@ -51,6 +48,10 @@ class AETemplateBuilder(AbstractTemplateBuilder):
class AEPlaceholderPlugin(PlaceholderPlugin):
"""Contains generic methods for all PlaceholderPlugins."""
+ @abstractmethod
+ def _create_placeholder_item(self, item_data: dict) -> PlaceholderItem:
+ pass
+
def collect_placeholders(self):
"""Collect info from file metadata about created placeholders.
@@ -63,17 +64,7 @@ class AEPlaceholderPlugin(PlaceholderPlugin):
if item.get("plugin_identifier") != self.identifier:
continue
- if isinstance(self, AEPlaceholderLoadPlugin):
- item = LoadPlaceholderItem(item["uuid"],
- item["data"],
- self)
- elif isinstance(self, AEPlaceholderCreatePlugin):
- item = CreatePlaceholderItem(item["uuid"],
- item["data"],
- self)
- else:
- raise NotImplementedError(f"Not implemented for {type(self)}")
-
+ item = self._create_placeholder_item(item)
output.append(item)
return output
@@ -135,87 +126,6 @@ class AEPlaceholderPlugin(PlaceholderPlugin):
stub.imprint(item_id, container_data)
-class AEPlaceholderCreatePlugin(AEPlaceholderPlugin, PlaceholderCreateMixin):
- """Adds Create placeholder.
-
- This adds composition and runs Create
- """
- identifier = "aftereffects.create"
- label = "AfterEffects create"
-
- def create_placeholder(self, placeholder_data):
- stub = get_stub()
- name = "CREATEPLACEHOLDER"
- item_id = stub.add_item(name, "COMP")
-
- self._imprint_item(item_id, name, placeholder_data, stub)
-
- def populate_placeholder(self, placeholder):
- """Replace 'placeholder' with publishable instance.
-
- Renames prepared composition name, creates publishable instance, sets
- frame/duration settings according to DB.
- """
- pre_create_data = {"use_selection": True}
- item_id, item = self._get_item(placeholder)
- get_stub().select_items([item_id])
- self.populate_create_placeholder(placeholder, pre_create_data)
-
- # apply settings for populated composition
- item_id, metadata_item = self._get_item(placeholder)
- set_settings(True, True, [item_id])
-
- def get_placeholder_options(self, options=None):
- return self.get_create_plugin_options(options)
-
-
-class AEPlaceholderLoadPlugin(AEPlaceholderPlugin, PlaceholderLoadMixin):
- identifier = "aftereffects.load"
- label = "AfterEffects load"
-
- def create_placeholder(self, placeholder_data):
- """Creates AE's Placeholder item in Project items list.
-
- Sets dummy resolution/duration/fps settings, will be replaced when
- populated.
- """
- stub = get_stub()
- name = "LOADERPLACEHOLDER"
- item_id = stub.add_placeholder(name, 1920, 1060, 25, 10)
-
- self._imprint_item(item_id, name, placeholder_data, stub)
-
- def populate_placeholder(self, placeholder):
- """Use Openpype Loader from `placeholder` to create new FootageItems
-
- New FootageItems are created, files are imported.
- """
- self.populate_load_placeholder(placeholder)
- errors = placeholder.get_errors()
- stub = get_stub()
- if errors:
- stub.print_msg("\n".join(errors))
- else:
- if not placeholder.data["keep_placeholder"]:
- metadata = stub.get_metadata()
- for item in metadata:
- if not item.get("is_placeholder"):
- continue
- scene_identifier = item.get("uuid")
- if (scene_identifier and
- scene_identifier == placeholder.scene_identifier):
- stub.delete_item(item["members"][0])
- stub.remove_instance(placeholder.scene_identifier, metadata)
-
- def get_placeholder_options(self, options=None):
- return self.get_load_plugin_options(options)
-
- def load_succeed(self, placeholder, container):
- placeholder_item_id, _ = self._get_item(placeholder)
- item_id = container.id
- get_stub().add_item_instead_placeholder(placeholder_item_id, item_id)
-
-
def build_workfile_template(*args, **kwargs):
builder = AETemplateBuilder(registered_host())
builder.build_template(*args, **kwargs)
diff --git a/client/ayon_core/hosts/aftereffects/plugins/workfile_build/create_placeholder.py b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/create_placeholder.py
new file mode 100644
index 0000000000..c7927f176f
--- /dev/null
+++ b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/create_placeholder.py
@@ -0,0 +1,49 @@
+from ayon_core.pipeline.workfile.workfile_template_builder import (
+ CreatePlaceholderItem,
+ PlaceholderCreateMixin
+)
+from ayon_core.hosts.aftereffects.api import get_stub
+from ayon_core.hosts.aftereffects.api.lib import set_settings
+import ayon_core.hosts.aftereffects.api.workfile_template_builder as wtb
+
+
+class AEPlaceholderCreatePlugin(wtb.AEPlaceholderPlugin,
+ PlaceholderCreateMixin):
+ """Adds Create placeholder.
+
+ This adds composition and runs Create
+ """
+ identifier = "aftereffects.create"
+ label = "AfterEffects create"
+
+ def _create_placeholder_item(self, item_data) -> CreatePlaceholderItem:
+ return CreatePlaceholderItem(
+ scene_identifier=item_data["uuid"],
+ data=item_data["data"],
+ plugin=self
+ )
+
+ def create_placeholder(self, placeholder_data):
+ stub = get_stub()
+ name = "CREATEPLACEHOLDER"
+ item_id = stub.add_item(name, "COMP")
+
+ self._imprint_item(item_id, name, placeholder_data, stub)
+
+ def populate_placeholder(self, placeholder):
+ """Replace 'placeholder' with publishable instance.
+
+ Renames prepared composition name, creates publishable instance, sets
+ frame/duration settings according to DB.
+ """
+ pre_create_data = {"use_selection": True}
+ item_id, item = self._get_item(placeholder)
+ get_stub().select_items([item_id])
+ self.populate_create_placeholder(placeholder, pre_create_data)
+
+ # apply settings for populated composition
+ item_id, metadata_item = self._get_item(placeholder)
+ set_settings(True, True, [item_id])
+
+ def get_placeholder_options(self, options=None):
+ return self.get_create_plugin_options(options)
diff --git a/client/ayon_core/hosts/aftereffects/plugins/workfile_build/load_placeholder.py b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/load_placeholder.py
new file mode 100644
index 0000000000..7f7e4f49ce
--- /dev/null
+++ b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/load_placeholder.py
@@ -0,0 +1,60 @@
+from ayon_core.pipeline.workfile.workfile_template_builder import (
+ LoadPlaceholderItem,
+ PlaceholderLoadMixin
+)
+from ayon_core.hosts.aftereffects.api import get_stub
+import ayon_core.hosts.aftereffects.api.workfile_template_builder as wtb
+
+
+class AEPlaceholderLoadPlugin(wtb.AEPlaceholderPlugin, PlaceholderLoadMixin):
+ identifier = "aftereffects.load"
+ label = "AfterEffects load"
+
+ def _create_placeholder_item(self, item_data) -> LoadPlaceholderItem:
+ return LoadPlaceholderItem(
+ scene_identifier=item_data["uuid"],
+ data=item_data["data"],
+ plugin=self
+ )
+
+ def create_placeholder(self, placeholder_data):
+ """Creates AE's Placeholder item in Project items list.
+
+ Sets dummy resolution/duration/fps settings, will be replaced when
+ populated.
+ """
+ stub = get_stub()
+ name = "LOADERPLACEHOLDER"
+ item_id = stub.add_placeholder(name, 1920, 1060, 25, 10)
+
+ self._imprint_item(item_id, name, placeholder_data, stub)
+
+ def populate_placeholder(self, placeholder):
+ """Use Openpype Loader from `placeholder` to create new FootageItems
+
+ New FootageItems are created, files are imported.
+ """
+ self.populate_load_placeholder(placeholder)
+ errors = placeholder.get_errors()
+ stub = get_stub()
+ if errors:
+ stub.print_msg("\n".join(errors))
+ else:
+ if not placeholder.data["keep_placeholder"]:
+ metadata = stub.get_metadata()
+ for item in metadata:
+ if not item.get("is_placeholder"):
+ continue
+ scene_identifier = item.get("uuid")
+ if (scene_identifier and
+ scene_identifier == placeholder.scene_identifier):
+ stub.delete_item(item["members"][0])
+ stub.remove_instance(placeholder.scene_identifier, metadata)
+
+ def get_placeholder_options(self, options=None):
+ return self.get_load_plugin_options(options)
+
+ def load_succeed(self, placeholder, container):
+ placeholder_item_id, _ = self._get_item(placeholder)
+ item_id = container.id
+ get_stub().add_item_instead_placeholder(placeholder_item_id, item_id)
diff --git a/client/ayon_core/hosts/blender/plugins/publish/extract_abc.py b/client/ayon_core/hosts/blender/plugins/publish/extract_abc.py
index 094f88fd8c..6590be515c 100644
--- a/client/ayon_core/hosts/blender/plugins/publish/extract_abc.py
+++ b/client/ayon_core/hosts/blender/plugins/publish/extract_abc.py
@@ -2,6 +2,7 @@ import os
import bpy
+from ayon_core.lib import BoolDef
from ayon_core.pipeline import publish
from ayon_core.hosts.blender.api import plugin
@@ -17,6 +18,8 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
if not self.is_active(instance.data):
return
+ attr_values = self.get_attr_values_from_data(instance.data)
+
# Define extract output file path
stagingdir = self.staging_dir(instance)
folder_name = instance.data["folderEntity"]["name"]
@@ -46,7 +49,8 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
bpy.ops.wm.alembic_export(
filepath=filepath,
selected=True,
- flatten=False
+ flatten=False,
+ subdiv_schema=attr_values.get("subdiv_schema", False)
)
plugin.deselect_all()
@@ -65,6 +69,21 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
self.log.debug("Extracted instance '%s' to: %s",
instance.name, representation)
+ @classmethod
+ def get_attribute_defs(cls):
+ return [
+ BoolDef(
+ "subdiv_schema",
+ label="Alembic Mesh Subdiv Schema",
+ tooltip="Export Meshes using Alembic's subdivision schema.\n"
+ "Enabling this includes creases with the export but "
+ "excludes the mesh's normals.\n"
+ "Enabling this usually result in smaller file size "
+ "due to lack of normals.",
+ default=False
+ )
+ ]
+
class ExtractModelABC(ExtractABC):
"""Extract model as ABC."""
diff --git a/client/ayon_core/hosts/fusion/hooks/pre_fusion_launch_menu_hook.py b/client/ayon_core/hosts/fusion/hooks/pre_fusion_launch_menu_hook.py
index e70d4b844e..113a1ffe59 100644
--- a/client/ayon_core/hosts/fusion/hooks/pre_fusion_launch_menu_hook.py
+++ b/client/ayon_core/hosts/fusion/hooks/pre_fusion_launch_menu_hook.py
@@ -1,5 +1,5 @@
import os
-from ayon_core.lib import PreLaunchHook
+from ayon_applications import PreLaunchHook
from ayon_core.hosts.fusion import FUSION_HOST_DIR
diff --git a/client/ayon_core/hosts/hiero/api/events.py b/client/ayon_core/hosts/hiero/api/events.py
index 304605e24e..663004abd2 100644
--- a/client/ayon_core/hosts/hiero/api/events.py
+++ b/client/ayon_core/hosts/hiero/api/events.py
@@ -8,6 +8,7 @@ from .lib import (
sync_avalon_data_to_workfile,
launch_workfiles_app,
before_project_save,
+ apply_colorspace_project
)
from .tags import add_tags_to_workfile
from .menu import update_menu_task_label
@@ -44,6 +45,8 @@ def afterNewProjectCreated(event):
# reset workfiles startup not to open any more in session
os.environ["WORKFILES_STARTUP"] = "0"
+ apply_colorspace_project()
+
def beforeProjectLoad(event):
log.info("before project load event...")
@@ -122,6 +125,7 @@ def register_hiero_events():
except RuntimeError:
pass
+
def register_events():
"""
Adding all callbacks.
diff --git a/client/ayon_core/hosts/hiero/api/lib.py b/client/ayon_core/hosts/hiero/api/lib.py
index 8682ff7780..aaf99546c7 100644
--- a/client/ayon_core/hosts/hiero/api/lib.py
+++ b/client/ayon_core/hosts/hiero/api/lib.py
@@ -11,7 +11,6 @@ import warnings
import json
import ast
import secrets
-import shutil
import hiero
from qtpy import QtWidgets, QtCore
@@ -36,9 +35,6 @@ from .constants import (
DEFAULT_SEQUENCE_NAME,
DEFAULT_BIN_NAME
)
-from ayon_core.pipeline.colorspace import (
- get_imageio_config
-)
class _CTX:
@@ -105,9 +101,9 @@ def flatten(list_):
def get_current_project(remove_untitled=False):
- projects = flatten(hiero.core.projects())
+ projects = hiero.core.projects()
if not remove_untitled:
- return next(iter(projects))
+ return projects[0]
# if remove_untitled
for proj in projects:
@@ -1050,18 +1046,68 @@ def _set_hrox_project_knobs(doc, **knobs):
def apply_colorspace_project():
- project_name = get_current_project_name()
- # get path the the active projects
- project = get_current_project(remove_untitled=True)
- current_file = project.path()
-
- # close the active project
- project.close()
+ """Apply colorspaces from settings.
+ Due to not being able to set the project settings through the Python API,
+ we need to do use some dubious code to find the widgets and set them. It is
+ possible to set the project settings without traversing through the widgets
+ but it involves reading the hrox files from disk with XML, so no in-memory
+ support. See https://community.foundry.com/discuss/topic/137771/change-a-project-s-default-color-transform-with-python # noqa
+ for more details.
+ """
# get presets for hiero
+ project_name = get_current_project_name()
imageio = get_project_settings(project_name)["hiero"]["imageio"]
presets = imageio.get("workfile")
+ # Open Project Settings UI.
+ for act in hiero.ui.registeredActions():
+ if act.objectName() == "foundry.project.settings":
+ act.trigger()
+
+ # Find widgets from their sibling label.
+ labels = {
+ "Working Space:": "workingSpace",
+ "Viewer:": "viewerLut",
+ "Thumbnails:": "thumbnailLut",
+ "Monitor Out:": "monitorOutLut",
+ "8 Bit Files:": "eightBitLut",
+ "16 Bit Files:": "sixteenBitLut",
+ "Log Files:": "logLut",
+ "Floating Point Files:": "floatLut"
+ }
+ widgets = {x: None for x in labels.values()}
+
+ def _recursive_children(widget, labels, widgets):
+ children = widget.children()
+ for count, child in enumerate(children):
+ if isinstance(child, QtWidgets.QLabel):
+ if child.text() in labels.keys():
+ widgets[labels[child.text()]] = children[count + 1]
+ _recursive_children(child, labels, widgets)
+
+ app = QtWidgets.QApplication.instance()
+ title = "Project Settings"
+ for widget in app.topLevelWidgets():
+ if isinstance(widget, QtWidgets.QMainWindow):
+ if widget.windowTitle() != title:
+ continue
+ _recursive_children(widget, labels, widgets)
+ widget.close()
+
+ msg = "Setting value \"{}\" is not a valid option for \"{}\""
+ for key, widget in widgets.items():
+ options = [widget.itemText(i) for i in range(widget.count())]
+ setting_value = presets[key]
+ assert setting_value in options, msg.format(setting_value, key)
+ widget.setCurrentText(presets[key])
+
+ # This code block is for setting up project colorspaces for files on disk.
+ # Due to not having Python API access to set the project settings, the
+ # Foundry recommended way is to modify the hrox files on disk with XML. See
+ # this forum thread for more details;
+ # https://community.foundry.com/discuss/topic/137771/change-a-project-s-default-color-transform-with-python # noqa
+ '''
# backward compatibility layer
# TODO: remove this after some time
config_data = get_imageio_config(
@@ -1074,6 +1120,13 @@ def apply_colorspace_project():
"ocioConfigName": "custom"
})
+ # get path the the active projects
+ project = get_current_project()
+ current_file = project.path()
+
+ msg = "The project needs to be saved to disk to apply colorspace settings."
+ assert current_file, msg
+
# save the workfile as subversion "comment:_colorspaceChange"
split_current_file = os.path.splitext(current_file)
copy_current_file = current_file
@@ -1116,6 +1169,7 @@ def apply_colorspace_project():
# open the file as current project
hiero.core.openProject(copy_current_file)
+ '''
def apply_colorspace_clips():
@@ -1125,10 +1179,8 @@ def apply_colorspace_clips():
# get presets for hiero
imageio = get_project_settings(project_name)["hiero"]["imageio"]
- from pprint import pprint
presets = imageio.get("regexInputs", {}).get("inputs", {})
- pprint(presets)
for clip in clips:
clip_media_source_path = clip.mediaSource().firstpath()
clip_name = clip.name()
diff --git a/client/ayon_core/hosts/hiero/api/tags.py b/client/ayon_core/hosts/hiero/api/tags.py
index 5abfee75d0..d4acb23493 100644
--- a/client/ayon_core/hosts/hiero/api/tags.py
+++ b/client/ayon_core/hosts/hiero/api/tags.py
@@ -144,7 +144,7 @@ def add_tags_to_workfile():
# Get project task types.
project_name = get_current_project_name()
project_entity = ayon_api.get_project(project_name)
- task_types = project_entity["taskType"]
+ task_types = project_entity["taskTypes"]
nks_pres_tags["[Tasks]"] = {}
log.debug("__ tasks: {}".format(task_types))
for task_type in task_types:
diff --git a/client/ayon_core/hosts/houdini/api/lib.py b/client/ayon_core/hosts/houdini/api/lib.py
index da1b21ad95..7ca8f7f8f0 100644
--- a/client/ayon_core/hosts/houdini/api/lib.py
+++ b/client/ayon_core/hosts/houdini/api/lib.py
@@ -811,6 +811,43 @@ def get_current_context_template_data_with_folder_attrs():
return template_data
+def set_review_color_space(opengl_node, review_color_space="", log=None):
+ """Set ociocolorspace parameter for the given OpenGL node.
+
+ Set `ociocolorspace` parameter of the given OpenGl node
+ to to the given review_color_space value.
+ If review_color_space is empty, a default colorspace corresponding to
+ the display & view of the current Houdini session will be used.
+
+ Args:
+ opengl_node (hou.Node): ROP node to set its ociocolorspace parm.
+ review_color_space (str): Colorspace value for ociocolorspace parm.
+ log (logging.Logger): Logger to log to.
+ """
+
+ if log is None:
+ log = self.log
+
+ # Set Color Correction parameter to OpenColorIO
+ colorcorrect_parm = opengl_node.parm("colorcorrect")
+ if colorcorrect_parm.eval() != 2:
+ colorcorrect_parm.set(2)
+ log.debug(
+ "'Color Correction' parm on '{}' has been set to"
+ " 'OpenColorIO'".format(opengl_node.path())
+ )
+
+ opengl_node.setParms(
+ {"ociocolorspace": review_color_space}
+ )
+
+ log.debug(
+ "'OCIO Colorspace' parm on '{}' has been set to "
+ "the view color space '{}'"
+ .format(opengl_node, review_color_space)
+ )
+
+
def get_context_var_changes():
"""get context var changes."""
diff --git a/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py b/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py
new file mode 100644
index 0000000000..7d41979600
--- /dev/null
+++ b/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py
@@ -0,0 +1,64 @@
+from ayon_applications import PreLaunchHook, LaunchTypes
+
+
+class SetDefaultDisplayView(PreLaunchHook):
+ """Set default view and default display for houdini via OpenColorIO.
+
+ Houdini's defaultDisplay and defaultView are set by
+ setting 'OCIO_ACTIVE_DISPLAYS' and 'OCIO_ACTIVE_VIEWS'
+ environment variables respectively.
+
+ More info: https://www.sidefx.com/docs/houdini/io/ocio.html#set-up
+ """
+
+ app_groups = {"houdini"}
+ launch_types = {LaunchTypes.local}
+
+ def execute(self):
+
+ OCIO = self.launch_context.env.get("OCIO")
+
+ # This is a cheap way to skip this hook if either global color
+ # management or houdini color management was disabled because the
+ # OCIO var would be set by the global OCIOEnvHook
+ if not OCIO:
+ return
+
+ # workfile settings added in '0.2.13'
+ houdini_color_settings = \
+ self.data["project_settings"]["houdini"]["imageio"].get("workfile")
+
+ if not houdini_color_settings:
+ self.log.info("Hook 'SetDefaultDisplayView' requires Houdini "
+ "addon version >= '0.2.13'")
+ return
+
+ if not houdini_color_settings["enabled"]:
+ self.log.info(
+ "Houdini workfile color management is disabled."
+ )
+ return
+
+ # 'OCIO_ACTIVE_DISPLAYS', 'OCIO_ACTIVE_VIEWS' are checked
+ # as Admins can add them in Ayon env vars or Ayon tools.
+
+ default_display = houdini_color_settings["default_display"]
+ if default_display:
+ # get 'OCIO_ACTIVE_DISPLAYS' value if exists.
+ self._set_context_env("OCIO_ACTIVE_DISPLAYS", default_display)
+
+ default_view = houdini_color_settings["default_view"]
+ if default_view:
+ # get 'OCIO_ACTIVE_VIEWS' value if exists.
+ self._set_context_env("OCIO_ACTIVE_VIEWS", default_view)
+
+ def _set_context_env(self, env_var, default_value):
+ env_value = self.launch_context.env.get(env_var, "")
+ new_value = ":".join(
+ key for key in [default_value, env_value] if key
+ )
+ self.log.info(
+ "Setting {} environment to: {}"
+ .format(env_var, new_value)
+ )
+ self.launch_context.env[env_var] = new_value
diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_review.py b/client/ayon_core/hosts/houdini/plugins/create/create_review.py
index 18f7ce498d..f5e4d4ce64 100644
--- a/client/ayon_core/hosts/houdini/plugins/create/create_review.py
+++ b/client/ayon_core/hosts/houdini/plugins/create/create_review.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating openGL reviews."""
-from ayon_core.hosts.houdini.api import plugin
+from ayon_core.hosts.houdini.api import lib, plugin
from ayon_core.lib import EnumDef, BoolDef, NumberDef
import os
@@ -14,6 +14,16 @@ class CreateReview(plugin.HoudiniCreator):
label = "Review"
product_type = "review"
icon = "video-camera"
+ review_color_space = ""
+
+ def apply_settings(self, project_settings):
+ super(CreateReview, self).apply_settings(project_settings)
+ # workfile settings added in '0.2.13'
+ color_settings = project_settings["houdini"]["imageio"].get(
+ "workfile", {}
+ )
+ if color_settings.get("enabled"):
+ self.review_color_space = color_settings.get("review_color_space")
def create(self, product_name, instance_data, pre_create_data):
@@ -85,10 +95,20 @@ class CreateReview(plugin.HoudiniCreator):
instance_node.setParms(parms)
- # Set OCIO Colorspace to the default output colorspace
+ # Set OCIO Colorspace to the default colorspace
# if there's OCIO
if os.getenv("OCIO"):
- self.set_colorcorrect_to_default_view_space(instance_node)
+ # Fall to the default value if cls.review_color_space is empty.
+ if not self.review_color_space:
+ # cls.review_color_space is an empty string
+ # when the imageio/workfile setting is disabled or
+ # when the Review colorspace setting is empty.
+ from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
+ self.review_color_space = get_default_display_view_colorspace()
+
+ lib.set_review_color_space(instance_node,
+ self.review_color_space,
+ self.log)
to_lock = ["id", "productType"]
@@ -131,23 +151,3 @@ class CreateReview(plugin.HoudiniCreator):
minimum=0.0001,
decimals=3)
]
-
- def set_colorcorrect_to_default_view_space(self,
- instance_node):
- """Set ociocolorspace to the default output space."""
- from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
-
- # set Color Correction parameter to OpenColorIO
- instance_node.setParms({"colorcorrect": 2})
-
- # Get default view space for ociocolorspace parm.
- default_view_space = get_default_display_view_colorspace()
- instance_node.setParms(
- {"ociocolorspace": default_view_space}
- )
-
- self.log.debug(
- "'OCIO Colorspace' parm on '{}' has been set to "
- "the default view color space '{}'"
- .format(instance_node, default_view_space)
- )
diff --git a/client/ayon_core/hosts/houdini/plugins/load/load_alembic.py b/client/ayon_core/hosts/houdini/plugins/load/load_alembic.py
index 1bb9043cd0..5f04781501 100644
--- a/client/ayon_core/hosts/houdini/plugins/load/load_alembic.py
+++ b/client/ayon_core/hosts/houdini/plugins/load/load_alembic.py
@@ -45,33 +45,11 @@ class AbcLoader(load.LoaderPlugin):
alembic = container.createNode("alembic", node_name=node_name)
alembic.setParms({"fileName": file_path})
- # Add unpack node
- unpack_name = "unpack_{}".format(name)
- unpack = container.createNode("unpack", node_name=unpack_name)
- unpack.setInput(0, alembic)
- unpack.setParms({"transfer_attributes": "path"})
+ # Position nodes nicely
+ container.moveToGoodPosition()
+ container.layoutChildren()
- # Add normal to points
- # Order of menu ['point', 'vertex', 'prim', 'detail']
- normal_name = "normal_{}".format(name)
- normal_node = container.createNode("normal", node_name=normal_name)
- normal_node.setParms({"type": 0})
-
- normal_node.setInput(0, unpack)
-
- null = container.createNode("null", node_name="OUT")
- null.setInput(0, normal_node)
-
- # Ensure display flag is on the Alembic input node and not on the OUT
- # node to optimize "debug" displaying in the viewport.
- alembic.setDisplayFlag(True)
-
- # Set new position for unpack node else it gets cluttered
- nodes = [container, alembic, unpack, normal_node, null]
- for nr, node in enumerate(nodes):
- node.setPosition([0, (0 - nr)])
-
- self[:] = nodes
+ nodes = [container, alembic]
return pipeline.containerise(
node_name,
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py
index c6dfb4332d..0fab69ef4a 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py
@@ -7,7 +7,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop, splitext
import hou
-class ExtractComposite(publish.Extractor):
+class ExtractComposite(publish.Extractor,
+ publish.ColormanagedPyblishPluginMixin):
order = pyblish.api.ExtractorOrder
label = "Extract Composite (Image Sequence)"
@@ -45,8 +46,14 @@ class ExtractComposite(publish.Extractor):
"frameEnd": instance.data["frameEndHandle"],
}
- from pprint import pformat
-
- self.log.info(pformat(representation))
+ if ext.lower() == "exr":
+ # Inject colorspace with 'scene_linear' as that's the
+ # default Houdini working colorspace and all extracted
+ # OpenEXR images should be in that colorspace.
+ # https://www.sidefx.com/docs/houdini/render/linear.html#image-formats
+ self.set_representation_colorspace(
+ representation, instance.context,
+ colorspace="scene_linear"
+ )
instance.data["representations"].append(representation)
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py
index fabdfd9a9d..57bb8b881a 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py
@@ -8,7 +8,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop
import hou
-class ExtractOpenGL(publish.Extractor):
+class ExtractOpenGL(publish.Extractor,
+ publish.ColormanagedPyblishPluginMixin):
order = pyblish.api.ExtractorOrder - 0.01
label = "Extract OpenGL"
@@ -46,6 +47,14 @@ class ExtractOpenGL(publish.Extractor):
"camera_name": instance.data.get("review_camera")
}
+ if ropnode.evalParm("colorcorrect") == 2: # OpenColorIO enabled
+ colorspace = ropnode.evalParm("ociocolorspace")
+ # inject colorspace data
+ self.set_representation_colorspace(
+ representation, instance.context,
+ colorspace=colorspace
+ )
+
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["representations"].append(representation)
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py
index fdf03d5cba..91bd36018a 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py
@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
-import sys
+import hou
import pyblish.api
-import six
from ayon_core.pipeline import PublishValidationError
@@ -26,28 +25,21 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin):
invalid = self.get_invalid(instance)
if invalid:
raise PublishValidationError(
- ("Output node(s) `{}` are incorrect. "
- "See plug-in log for details.").format(invalid),
- title=self.label
+ "Output node '{}' is incorrect. "
+ "See plug-in log for details.".format(invalid),
+ title=self.label,
+ description=(
+ "### Invalid COP output node\n\n"
+ "The output node path for the instance must be set to a "
+ "valid COP node path.\n\nSee the log for more details."
+ )
)
@classmethod
def get_invalid(cls, instance):
+ output_node = instance.data.get("output_node")
- import hou
-
- try:
- output_node = instance.data["output_node"]
- except KeyError:
- six.reraise(
- PublishValidationError,
- PublishValidationError(
- "Can't determine COP output node.",
- title=cls.__name__),
- sys.exc_info()[2]
- )
-
- if output_node is None:
+ if not output_node:
node = hou.node(instance.data.get("instance_node"))
cls.log.error(
"COP Output node in '%s' does not exist. "
@@ -61,8 +53,8 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin):
cls.log.error(
"Output node %s is not a COP node. "
"COP Path must point to a COP node, "
- "instead found category type: %s"
- % (output_node.path(), output_node.type().category().name())
+ "instead found category type: %s",
+ output_node.path(), output_node.type().category().name()
)
return [output_node.path()]
@@ -70,9 +62,7 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin):
# is Cop2 to avoid potential edge case scenarios even though
# the isinstance check above should be stricter than this category
if output_node.type().category().name() != "Cop2":
- raise PublishValidationError(
- (
- "Output node {} is not of category Cop2."
- " This is a bug..."
- ).format(output_node.path()),
- title=cls.label)
+ cls.log.error(
+ "Output node %s is not of category Cop2.", output_node.path()
+ )
+ return [output_node.path()]
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py
index 031138e21d..e7f528ba57 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py
@@ -4,15 +4,19 @@ from ayon_core.pipeline import (
PublishValidationError,
OptionalPyblishPluginMixin
)
-from ayon_core.pipeline.publish import RepairAction
+from ayon_core.pipeline.publish import (
+ RepairAction,
+ get_plugin_settings,
+ apply_plugin_settings_automatically
+)
from ayon_core.hosts.houdini.api.action import SelectROPAction
import os
import hou
-class SetDefaultViewSpaceAction(RepairAction):
- label = "Set default view colorspace"
+class ResetViewSpaceAction(RepairAction):
+ label = "Reset OCIO colorspace parm"
icon = "mdi.monitor"
@@ -27,9 +31,28 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
families = ["review"]
hosts = ["houdini"]
label = "Validate Review Colorspace"
- actions = [SetDefaultViewSpaceAction, SelectROPAction]
+ actions = [ResetViewSpaceAction, SelectROPAction]
optional = True
+ review_color_space = ""
+
+ @classmethod
+ def apply_settings(cls, project_settings):
+ # Preserve automatic settings applying logic
+ settings = get_plugin_settings(plugin=cls,
+ project_settings=project_settings,
+ log=cls.log,
+ category="houdini")
+ apply_plugin_settings_automatically(cls, settings, logger=cls.log)
+
+ # workfile settings added in '0.2.13'
+ color_settings = project_settings["houdini"]["imageio"].get(
+ "workfile", {}
+ )
+ # Add review color settings
+ if color_settings.get("enabled"):
+ cls.review_color_space = color_settings.get("review_color_space")
+
def process(self, instance):
@@ -52,39 +75,54 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
" 'OpenColorIO'".format(rop_node.path())
)
- if rop_node.evalParm("ociocolorspace") not in \
- hou.Color.ocio_spaces():
-
+ current_color_space = rop_node.evalParm("ociocolorspace")
+ if current_color_space not in hou.Color.ocio_spaces():
raise PublishValidationError(
"Invalid value: Colorspace name doesn't exist.\n"
"Check 'OCIO Colorspace' parameter on '{}' ROP"
.format(rop_node.path())
)
- @classmethod
- def repair(cls, instance):
- """Set Default View Space Action.
+ # if houdini/imageio/workfile is enabled and
+ # Review colorspace setting is empty then this check should
+ # actually check if the current_color_space setting equals
+ # the default colorspace value.
+ # However, it will make the black cmd screen show up more often
+ # which is very annoying.
+ if self.review_color_space and \
+ self.review_color_space != current_color_space:
- It is a helper action more than a repair action,
- used to set colorspace on opengl node to the default view.
- """
- from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
-
- rop_node = hou.node(instance.data["instance_node"])
-
- if rop_node.evalParm("colorcorrect") != 2:
- rop_node.setParms({"colorcorrect": 2})
- cls.log.debug(
- "'Color Correction' parm on '{}' has been set to"
- " 'OpenColorIO'".format(rop_node.path())
+ raise PublishValidationError(
+ "Invalid value: Colorspace name doesn't match"
+ "the Colorspace specified in settings."
)
- # Get default view colorspace name
- default_view_space = get_default_display_view_colorspace()
+ @classmethod
+ def repair(cls, instance):
+ """Reset view colorspace.
- rop_node.setParms({"ociocolorspace": default_view_space})
- cls.log.info(
- "'OCIO Colorspace' parm on '{}' has been set to "
- "the default view color space '{}'"
- .format(rop_node, default_view_space)
- )
+ It is used to set colorspace on opengl node.
+
+ It uses the colorspace value specified in the Houdini addon settings.
+ If the value in the Houdini addon settings is empty,
+ it will fall to the default colorspace.
+
+ Note:
+ This repair action assumes that OCIO is enabled.
+ As if OCIO is disabled the whole validation is skipped
+ and this repair action won't show up.
+ """
+ from ayon_core.hosts.houdini.api.lib import set_review_color_space
+
+ # Fall to the default value if cls.review_color_space is empty.
+ if not cls.review_color_space:
+ # cls.review_color_space is an empty string
+ # when the imageio/workfile setting is disabled or
+ # when the Review colorspace setting is empty.
+ from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
+ cls.review_color_space = get_default_display_view_colorspace()
+
+ rop_node = hou.node(instance.data["instance_node"])
+ set_review_color_space(rop_node,
+ cls.review_color_space,
+ cls.log)
diff --git a/client/ayon_core/hosts/max/api/lib.py b/client/ayon_core/hosts/max/api/lib.py
index 48bb15f538..d9a3af3336 100644
--- a/client/ayon_core/hosts/max/api/lib.py
+++ b/client/ayon_core/hosts/max/api/lib.py
@@ -8,10 +8,15 @@ from typing import Any, Dict, Union
import six
import ayon_api
-from ayon_core.pipeline import get_current_project_name, colorspace
+from ayon_core.pipeline import (
+ get_current_project_name,
+ get_current_folder_path,
+ get_current_task_name,
+ colorspace
+)
from ayon_core.settings import get_project_settings
from ayon_core.pipeline.context_tools import (
- get_current_folder_entity,
+ get_current_task_entity
)
from ayon_core.style import load_stylesheet
from pymxs import runtime as rt
@@ -221,41 +226,30 @@ def reset_scene_resolution():
scene resolution can be overwritten by a folder if the folder.attrib
contains any information regarding scene resolution.
"""
-
- folder_entity = get_current_folder_entity(
- fields={"attrib.resolutionWidth", "attrib.resolutionHeight"}
- )
- folder_attributes = folder_entity["attrib"]
- width = int(folder_attributes["resolutionWidth"])
- height = int(folder_attributes["resolutionHeight"])
+ task_attributes = get_current_task_entity(fields={"attrib"})["attrib"]
+ width = int(task_attributes["resolutionWidth"])
+ height = int(task_attributes["resolutionHeight"])
set_scene_resolution(width, height)
-def get_frame_range(folder_entiy=None) -> Union[Dict[str, Any], None]:
- """Get the current folder frame range and handles.
+def get_frame_range(task_entity=None) -> Union[Dict[str, Any], None]:
+ """Get the current task frame range and handles
Args:
- folder_entiy (dict): Folder eneity.
+ task_entity (dict): Task Entity.
Returns:
dict: with frame start, frame end, handle start, handle end.
"""
# Set frame start/end
- if folder_entiy is None:
- folder_entiy = get_current_folder_entity()
-
- folder_attributes = folder_entiy["attrib"]
- frame_start = folder_attributes.get("frameStart")
- frame_end = folder_attributes.get("frameEnd")
-
- if frame_start is None or frame_end is None:
- return {}
-
- frame_start = int(frame_start)
- frame_end = int(frame_end)
- handle_start = int(folder_attributes.get("handleStart", 0))
- handle_end = int(folder_attributes.get("handleEnd", 0))
+ if task_entity is None:
+ task_entity = get_current_task_entity(fields={"attrib"})
+ task_attributes = task_entity["attrib"]
+ frame_start = int(task_attributes["frameStart"])
+ frame_end = int(task_attributes["frameEnd"])
+ handle_start = int(task_attributes["handleStart"])
+ handle_end = int(task_attributes["handleEnd"])
frame_start_handle = frame_start - handle_start
frame_end_handle = frame_end + handle_end
@@ -281,9 +275,9 @@ def reset_frame_range(fps: bool = True):
scene frame rate in frames-per-second.
"""
if fps:
- project_name = get_current_project_name()
- project_entity = ayon_api.get_project(project_name)
- fps_number = float(project_entity["attrib"].get("fps"))
+ task_entity = get_current_task_entity()
+ task_attributes = task_entity["attrib"]
+ fps_number = float(task_attributes["fps"])
rt.frameRate = fps_number
frame_range = get_frame_range()
@@ -502,9 +496,9 @@ def object_transform_set(container_children):
"""
transform_set = {}
for node in container_children:
- name = f"{node.name}.transform"
+ name = f"{node}.transform"
transform_set[name] = node.pos
- name = f"{node.name}.scale"
+ name = f"{node}.scale"
transform_set[name] = node.scale
return transform_set
@@ -525,6 +519,36 @@ def get_plugins() -> list:
return plugin_info_list
+def update_modifier_node_names(event, node):
+ """Update the name of the nodes after renaming
+
+ Args:
+ event (pymxs.MXSWrapperBase): Event Name (
+ Mandatory argument for rt.NodeEventCallback)
+ node (list): Event Number (
+ Mandatory argument for rt.NodeEventCallback)
+
+ """
+ containers = [
+ obj
+ for obj in rt.Objects
+ if (
+ rt.ClassOf(obj) == rt.Container
+ and rt.getUserProp(obj, "id") == "pyblish.avalon.instance"
+ and rt.getUserProp(obj, "productType") not in {
+ "workfile", "tyflow"
+ }
+ )
+ ]
+ if not containers:
+ return
+ for container in containers:
+ ayon_data = container.modifiers[0].openPypeData
+ updated_node_names = [str(node.node) for node
+ in ayon_data.all_handles]
+ rt.setProperty(ayon_data, "sel_list", updated_node_names)
+
+
@contextlib.contextmanager
def render_resolution(width, height):
"""Set render resolution option during context
diff --git a/client/ayon_core/hosts/max/api/pipeline.py b/client/ayon_core/hosts/max/api/pipeline.py
index 675f36c24f..dc13f47795 100644
--- a/client/ayon_core/hosts/max/api/pipeline.py
+++ b/client/ayon_core/hosts/max/api/pipeline.py
@@ -63,6 +63,8 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
rt.callbacks.addScript(rt.Name('postWorkspaceChange'),
self._deferred_menu_creation)
+ rt.NodeEventCallback(
+ nameChanged=lib.update_modifier_node_names)
def workfile_has_unsaved_changes(self):
return rt.getSaveRequired()
diff --git a/client/ayon_core/hosts/max/plugins/load/load_max_scene.py b/client/ayon_core/hosts/max/plugins/load/load_max_scene.py
index 4f982dd5ba..97b8c6cd52 100644
--- a/client/ayon_core/hosts/max/plugins/load/load_max_scene.py
+++ b/client/ayon_core/hosts/max/plugins/load/load_max_scene.py
@@ -117,7 +117,7 @@ class MaxSceneLoader(load.LoaderPlugin):
)
for max_obj, obj_name in zip(max_objects, max_object_names):
max_obj.name = f"{namespace}:{obj_name}"
- max_container.append(rt.getNodeByName(max_obj.name))
+ max_container.append(max_obj)
return containerise(
name, max_container, context,
namespace, loader=self.__class__.__name__)
@@ -158,11 +158,11 @@ class MaxSceneLoader(load.LoaderPlugin):
current_max_object_names):
max_obj.name = f"{namespace}:{obj_name}"
max_objects.append(max_obj)
- max_transform = f"{max_obj.name}.transform"
+ max_transform = f"{max_obj}.transform"
if max_transform in transform_data.keys():
max_obj.pos = transform_data[max_transform] or 0
max_obj.scale = transform_data[
- f"{max_obj.name}.scale"] or 0
+ f"{max_obj}.scale"] or 0
update_custom_attribute_data(node, max_objects)
lib.imprint(container["instance_node"], {
diff --git a/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py b/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py
index 82cad71c3e..6f5de20ae0 100644
--- a/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py
+++ b/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py
@@ -76,11 +76,11 @@ class FbxModelLoader(load.LoaderPlugin):
for fbx_object in current_fbx_objects:
fbx_object.name = f"{namespace}:{fbx_object.name}"
fbx_objects.append(fbx_object)
- fbx_transform = f"{fbx_object.name}.transform"
+ fbx_transform = f"{fbx_object}.transform"
if fbx_transform in transform_data.keys():
fbx_object.pos = transform_data[fbx_transform] or 0
fbx_object.scale = transform_data[
- f"{fbx_object.name}.scale"] or 0
+ f"{fbx_object}.scale"] or 0
with maintained_selection():
rt.Select(node)
diff --git a/client/ayon_core/hosts/max/plugins/load/load_model_obj.py b/client/ayon_core/hosts/max/plugins/load/load_model_obj.py
index 38f2cdf43c..a9119259df 100644
--- a/client/ayon_core/hosts/max/plugins/load/load_model_obj.py
+++ b/client/ayon_core/hosts/max/plugins/load/load_model_obj.py
@@ -67,11 +67,11 @@ class ObjLoader(load.LoaderPlugin):
selections = rt.GetCurrentSelection()
for selection in selections:
selection.name = f"{namespace}:{selection.name}"
- selection_transform = f"{selection.name}.transform"
+ selection_transform = f"{selection}.transform"
if selection_transform in transform_data.keys():
selection.pos = transform_data[selection_transform] or 0
selection.scale = transform_data[
- f"{selection.name}.scale"] or 0
+ f"{selection}.scale"] or 0
update_custom_attribute_data(node, selections)
with maintained_selection():
rt.Select(node)
diff --git a/client/ayon_core/hosts/max/plugins/load/load_model_usd.py b/client/ayon_core/hosts/max/plugins/load/load_model_usd.py
index 2b946eb2aa..2ed5d64a18 100644
--- a/client/ayon_core/hosts/max/plugins/load/load_model_usd.py
+++ b/client/ayon_core/hosts/max/plugins/load/load_model_usd.py
@@ -95,11 +95,11 @@ class ModelUSDLoader(load.LoaderPlugin):
for children in asset.Children:
children.name = f"{namespace}:{children.name}"
usd_objects.append(children)
- children_transform = f"{children.name}.transform"
+ children_transform = f"{children}.transform"
if children_transform in transform_data.keys():
children.pos = transform_data[children_transform] or 0
children.scale = transform_data[
- f"{children.name}.scale"] or 0
+ f"{children}.scale"] or 0
asset.name = f"{namespace}:{asset.name}"
usd_objects.append(asset)
diff --git a/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py b/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py
index 2efb7c7f62..47690f84e9 100644
--- a/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py
+++ b/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py
@@ -92,10 +92,10 @@ class OxAbcLoader(load.LoaderPlugin):
abc.Parent = container
abc.name = f"{namespace}:{abc.name}"
ox_abc_objects.append(abc)
- ox_transform = f"{abc.name}.transform"
+ ox_transform = f"{abc}.transform"
if ox_transform in transform_data.keys():
abc.pos = transform_data[ox_transform] or 0
- abc.scale = transform_data[f"{abc.name}.scale"] or 0
+ abc.scale = transform_data[f"{abc}.scale"] or 0
update_custom_attribute_data(node, ox_abc_objects)
lib.imprint(
container["instance_node"],
diff --git a/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py b/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py
index 67b5174200..67cec23ecc 100644
--- a/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py
+++ b/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py
@@ -53,6 +53,7 @@ class ExtractAlembic(publish.Extractor,
hosts = ["max"]
families = ["pointcache"]
optional = True
+ active = True
def process(self, instance):
if not self.is_active(instance.data):
@@ -102,24 +103,27 @@ class ExtractAlembic(publish.Extractor,
@classmethod
def get_attribute_defs(cls):
- return [
+ defs = super(ExtractAlembic, cls).get_attribute_defs()
+ defs.extend([
BoolDef("custom_attrs",
label="Custom Attributes",
default=False),
- ]
+ ])
+ return defs
class ExtractCameraAlembic(ExtractAlembic):
"""Extract Camera with AlembicExport."""
-
label = "Extract Alembic Camera"
families = ["camera"]
+ optional = True
-class ExtractModel(ExtractAlembic):
+class ExtractModelAlembic(ExtractAlembic):
"""Extract Geometry in Alembic Format"""
label = "Extract Geometry (Alembic)"
families = ["model"]
+ optional = True
def _set_abc_attributes(self, instance):
attr_values = self.get_attr_values_from_data(instance.data)
diff --git a/client/ayon_core/hosts/max/plugins/publish/validate_frame_range.py b/client/ayon_core/hosts/max/plugins/publish/validate_frame_range.py
index 2f4ec5f86c..11b55232d5 100644
--- a/client/ayon_core/hosts/max/plugins/publish/validate_frame_range.py
+++ b/client/ayon_core/hosts/max/plugins/publish/validate_frame_range.py
@@ -42,7 +42,7 @@ class ValidateFrameRange(pyblish.api.InstancePlugin,
return
frame_range = get_frame_range(
- instance.data["folderEntity"])
+ instance.data["taskEntity"])
inst_frame_start = instance.data.get("frameStartHandle")
inst_frame_end = instance.data.get("frameEndHandle")
diff --git a/client/ayon_core/hosts/max/plugins/publish/validate_instance_in_context.py b/client/ayon_core/hosts/max/plugins/publish/validate_instance_in_context.py
index cecfd5fd12..5107665235 100644
--- a/client/ayon_core/hosts/max/plugins/publish/validate_instance_in_context.py
+++ b/client/ayon_core/hosts/max/plugins/publish/validate_instance_in_context.py
@@ -38,7 +38,7 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin,
context_label = "{} > {}".format(*context)
instance_label = "{} > {}".format(folderPath, task)
message = (
- "Instance '{}' publishes to different folder or task "
+ "Instance '{}' publishes to different context(folder or task) "
"than current context: {}. Current context: {}".format(
instance.name, instance_label, context_label
)
@@ -46,7 +46,7 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin,
raise PublishValidationError(
message=message,
description=(
- "## Publishing to a different context folder or task\n"
+ "## Publishing to a different context data(folder or task)\n"
"There are publish instances present which are publishing "
"into a different folder path or task than your current context.\n\n"
"Usually this is not what you want but there can be cases "
diff --git a/client/ayon_core/hosts/max/plugins/publish/validate_resolution_setting.py b/client/ayon_core/hosts/max/plugins/publish/validate_resolution_setting.py
index f499f851f1..5f6cd0a21d 100644
--- a/client/ayon_core/hosts/max/plugins/publish/validate_resolution_setting.py
+++ b/client/ayon_core/hosts/max/plugins/publish/validate_resolution_setting.py
@@ -7,7 +7,10 @@ from ayon_core.pipeline.publish import (
RepairAction,
PublishValidationError
)
-from ayon_core.hosts.max.api.lib import reset_scene_resolution
+from ayon_core.hosts.max.api.lib import (
+ reset_scene_resolution,
+ imprint
+)
class ValidateResolutionSetting(pyblish.api.InstancePlugin,
@@ -25,8 +28,10 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
if not self.is_active(instance.data):
return
width, height = self.get_folder_resolution(instance)
- current_width = rt.renderWidth
- current_height = rt.renderHeight
+ current_width, current_height = (
+ self.get_current_resolution(instance)
+ )
+
if current_width != width and current_height != height:
raise PublishValidationError("Resolution Setting "
"not matching resolution "
@@ -41,12 +46,16 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
"not matching resolution set "
"on asset or shot.")
- def get_folder_resolution(self, instance):
- folder_entity = instance.data["folderEntity"]
- if folder_entity:
- folder_attributes = folder_entity["attrib"]
- width = folder_attributes["resolutionWidth"]
- height = folder_attributes["resolutionHeight"]
+ def get_current_resolution(self, instance):
+ return rt.renderWidth, rt.renderHeight
+
+ @classmethod
+ def get_folder_resolution(cls, instance):
+ task_entity = instance.data.get("taskEntity")
+ if task_entity:
+ task_attributes = task_entity["attrib"]
+ width = task_attributes["resolutionWidth"]
+ height = task_attributes["resolutionHeight"]
return int(width), int(height)
# Defaults if not found in folder entity
@@ -55,3 +64,29 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
@classmethod
def repair(cls, instance):
reset_scene_resolution()
+
+
+class ValidateReviewResolutionSetting(ValidateResolutionSetting):
+ families = ["review"]
+ optional = True
+ actions = [RepairAction]
+
+ def get_current_resolution(self, instance):
+ current_width = instance.data["review_width"]
+ current_height = instance.data["review_height"]
+ return current_width, current_height
+
+ @classmethod
+ def repair(cls, instance):
+ context_width, context_height = (
+ cls.get_folder_resolution(instance)
+ )
+ creator_attrs = instance.data["creator_attributes"]
+ creator_attrs["review_width"] = context_width
+ creator_attrs["review_height"] = context_height
+ creator_attrs_data = {
+ "creator_attributes": creator_attrs
+ }
+ # update the width and height of review
+ # data in creator_attributes
+ imprint(instance.data["instance_node"], creator_attrs_data)
diff --git a/client/ayon_core/hosts/max/startup/startup.ms b/client/ayon_core/hosts/max/startup/startup.ms
index 2dfe53a6a5..c5b4f0e526 100644
--- a/client/ayon_core/hosts/max/startup/startup.ms
+++ b/client/ayon_core/hosts/max/startup/startup.ms
@@ -12,4 +12,4 @@
max create mode
python.ExecuteFile startup
-)
\ No newline at end of file
+)
diff --git a/client/ayon_core/hosts/maya/api/alembic.py b/client/ayon_core/hosts/maya/api/alembic.py
new file mode 100644
index 0000000000..bf887df4c7
--- /dev/null
+++ b/client/ayon_core/hosts/maya/api/alembic.py
@@ -0,0 +1,305 @@
+import json
+import logging
+import os
+
+from maya import cmds # noqa
+
+from ayon_core.hosts.maya.api.lib import evaluation
+
+log = logging.getLogger(__name__)
+
+# The maya alembic export types
+ALEMBIC_ARGS = {
+ "attr": (list, tuple),
+ "attrPrefix": (list, tuple),
+ "autoSubd": bool,
+ "dataFormat": str,
+ "endFrame": float,
+ "eulerFilter": bool,
+ "frameRange": str, # "start end"; overrides startFrame & endFrame
+ "frameRelativeSample": float,
+ "melPerFrameCallback": str,
+ "melPostJobCallback": str,
+ "noNormals": bool,
+ "preRoll": bool,
+ "preRollStartFrame": int,
+ "pythonPerFrameCallback": str,
+ "pythonPostJobCallback": str,
+ "renderableOnly": bool,
+ "root": (list, tuple),
+ "selection": bool,
+ "startFrame": float,
+ "step": float,
+ "stripNamespaces": bool,
+ "userAttr": (list, tuple),
+ "userAttrPrefix": (list, tuple),
+ "uvWrite": bool,
+ "uvsOnly": bool,
+ "verbose": bool,
+ "wholeFrameGeo": bool,
+ "worldSpace": bool,
+ "writeColorSets": bool,
+ "writeCreases": bool, # Maya 2015 Ext1+
+ "writeFaceSets": bool,
+ "writeUVSets": bool, # Maya 2017+
+ "writeVisibility": bool,
+}
+
+
+def extract_alembic(
+ file,
+ attr=None,
+ attrPrefix=None,
+ dataFormat="ogawa",
+ endFrame=None,
+ eulerFilter=True,
+ frameRange="",
+ noNormals=False,
+ preRoll=False,
+ preRollStartFrame=0,
+ renderableOnly=False,
+ root=None,
+ selection=True,
+ startFrame=None,
+ step=1.0,
+ stripNamespaces=True,
+ uvWrite=True,
+ verbose=False,
+ wholeFrameGeo=False,
+ worldSpace=False,
+ writeColorSets=False,
+ writeCreases=False,
+ writeFaceSets=False,
+ writeUVSets=False,
+ writeVisibility=False
+):
+ """Extract a single Alembic Cache.
+
+ This extracts an Alembic cache using the `-selection` flag to minimize
+ the extracted content to solely what was Collected into the instance.
+
+ Arguments:
+ file (str): The filepath to write the alembic file to.
+
+ attr (list of str, optional): A specific geometric attribute to write
+ out. Defaults to [].
+
+ attrPrefix (list of str, optional): Prefix filter for determining which
+ geometric attributes to write out. Defaults to ["ABC_"].
+
+ dataFormat (str): The data format to use for the cache,
+ defaults to "ogawa"
+
+ endFrame (float): End frame of output. Ignored if `frameRange`
+ provided.
+
+ eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with
+ an Euler filter. Euler filtering helps resolve irregularities in
+ rotations especially if X, Y, and Z rotations exceed 360 degrees.
+ Defaults to True.
+
+ frameRange (tuple or str): Two-tuple with start and end frame or a
+ string formatted as: "startFrame endFrame". This argument
+ overrides `startFrame` and `endFrame` arguments.
+
+ noNormals (bool): When on, normal data from the original polygon
+ objects is not included in the exported Alembic cache file.
+
+ preRoll (bool): This frame range will not be sampled.
+ Defaults to False.
+
+ preRollStartFrame (float): The frame to start scene
+ evaluation at. This is used to set the starting frame for time
+ dependent translations and can be used to evaluate run-up that
+ isn't actually translated. Defaults to 0.
+
+ renderableOnly (bool): When on, any non-renderable nodes or hierarchy,
+ such as hidden objects, are not included in the Alembic file.
+ Defaults to False.
+
+ root (list of str): Maya dag path which will be parented to
+ the root of the Alembic file. Defaults to [], which means the
+ entire scene will be written out.
+
+ selection (bool): Write out all all selected nodes from the
+ active selection list that are descendents of the roots specified
+ with -root. Defaults to False.
+
+ startFrame (float): Start frame of output. Ignored if `frameRange`
+ provided.
+
+ step (float): The time interval (expressed in frames) at
+ which the frame range is sampled. Additional samples around each
+ frame can be specified with -frs. Defaults to 1.0.
+
+ stripNamespaces (bool): When on, any namespaces associated with the
+ exported objects are removed from the Alembic file. For example, an
+ object with the namespace taco:foo:bar appears as bar in the
+ Alembic file.
+
+ uvWrite (bool): When on, UV data from polygon meshes and subdivision
+ objects are written to the Alembic file. Only the current UV map is
+ included.
+
+ verbose (bool): When on, outputs frame number information to the
+ Script Editor or output window during extraction.
+
+ wholeFrameGeo (bool): Data for geometry will only be written
+ out on whole frames. Defaults to False.
+
+ worldSpace (bool): When on, the top node in the node hierarchy is
+ stored as world space. By default, these nodes are stored as local
+ space. Defaults to False.
+
+ writeColorSets (bool): Write all color sets on MFnMeshes as
+ color 3 or color 4 indexed geometry parameters with face varying
+ scope. Defaults to False.
+
+ writeCreases (bool): If the mesh has crease edges or crease
+ vertices, the mesh (OPolyMesh) would now be written out as an OSubD
+ and crease info will be stored in the Alembic file. Otherwise,
+ creases info won't be preserved in Alembic file unless a custom
+ Boolean attribute SubDivisionMesh has been added to mesh node and
+ its value is true. Defaults to False.
+
+ writeFaceSets (bool): Write all Face sets on MFnMeshes.
+ Defaults to False.
+
+ writeUVSets (bool): Write all uv sets on MFnMeshes as vector
+ 2 indexed geometry parameters with face varying scope. Defaults to
+ False.
+
+ writeVisibility (bool): Visibility state will be stored in
+ the Alembic file. Otherwise everything written out is treated as
+ visible. Defaults to False.
+ """
+
+ # Ensure alembic exporter is loaded
+ cmds.loadPlugin('AbcExport', quiet=True)
+
+ # Alembic Exporter requires forward slashes
+ file = file.replace('\\', '/')
+
+ # Ensure list arguments are valid.
+ attr = attr or []
+ attrPrefix = attrPrefix or []
+ root = root or []
+
+ # Pass the start and end frame on as `frameRange` so that it
+ # never conflicts with that argument
+ if not frameRange:
+ # Fallback to maya timeline if no start or end frame provided.
+ if startFrame is None:
+ startFrame = cmds.playbackOptions(query=True,
+ animationStartTime=True)
+ if endFrame is None:
+ endFrame = cmds.playbackOptions(query=True,
+ animationEndTime=True)
+
+ # Ensure valid types are converted to frame range
+ assert isinstance(startFrame, ALEMBIC_ARGS["startFrame"])
+ assert isinstance(endFrame, ALEMBIC_ARGS["endFrame"])
+ frameRange = "{0} {1}".format(startFrame, endFrame)
+ else:
+ # Allow conversion from tuple for `frameRange`
+ if isinstance(frameRange, (list, tuple)):
+ assert len(frameRange) == 2
+ frameRange = "{0} {1}".format(frameRange[0], frameRange[1])
+
+ # Assemble options
+ options = {
+ "selection": selection,
+ "frameRange": frameRange,
+ "eulerFilter": eulerFilter,
+ "noNormals": noNormals,
+ "preRoll": preRoll,
+ "renderableOnly": renderableOnly,
+ "uvWrite": uvWrite,
+ "writeColorSets": writeColorSets,
+ "writeFaceSets": writeFaceSets,
+ "wholeFrameGeo": wholeFrameGeo,
+ "worldSpace": worldSpace,
+ "writeVisibility": writeVisibility,
+ "writeUVSets": writeUVSets,
+ "writeCreases": writeCreases,
+ "dataFormat": dataFormat,
+ "step": step,
+ "attr": attr,
+ "attrPrefix": attrPrefix,
+ "stripNamespaces": stripNamespaces,
+ "verbose": verbose,
+ "preRollStartFrame": preRollStartFrame
+ }
+
+ # Validate options
+ for key, value in options.copy().items():
+
+ # Discard unknown options
+ if key not in ALEMBIC_ARGS:
+ log.warning("extract_alembic() does not support option '%s'. "
+ "Flag will be ignored..", key)
+ options.pop(key)
+ continue
+
+ # Validate value type
+ valid_types = ALEMBIC_ARGS[key]
+ if not isinstance(value, valid_types):
+ raise TypeError("Alembic option unsupported type: "
+ "{0} (expected {1})".format(value, valid_types))
+
+ # Ignore empty values, like an empty string, since they mess up how
+ # job arguments are built
+ if isinstance(value, (list, tuple)):
+ value = [x for x in value if x.strip()]
+
+ # Ignore option completely if no values remaining
+ if not value:
+ options.pop(key)
+ continue
+
+ options[key] = value
+
+ # The `writeCreases` argument was changed to `autoSubd` in Maya 2018+
+ maya_version = int(cmds.about(version=True))
+ if maya_version >= 2018:
+ options['autoSubd'] = options.pop('writeCreases', False)
+
+ # Format the job string from options
+ job_args = list()
+ for key, value in options.items():
+ if isinstance(value, (list, tuple)):
+ for entry in value:
+ job_args.append("-{} {}".format(key, entry))
+ elif isinstance(value, bool):
+ # Add only when state is set to True
+ if value:
+ job_args.append("-{0}".format(key))
+ else:
+ job_args.append("-{0} {1}".format(key, value))
+
+ job_str = " ".join(job_args)
+ job_str += ' -file "%s"' % file
+
+ # Ensure output directory exists
+ parent_dir = os.path.dirname(file)
+ if not os.path.exists(parent_dir):
+ os.makedirs(parent_dir)
+
+ if verbose:
+ log.debug("Preparing Alembic export with options: %s",
+ json.dumps(options, indent=4))
+ log.debug("Extracting Alembic with job arguments: %s", job_str)
+
+ # Perform extraction
+ print("Alembic Job Arguments : {}".format(job_str))
+
+ # Disable the parallel evaluation temporarily to ensure no buggy
+ # exports are made. (PLN-31)
+ # TODO: Make sure this actually fixes the issues
+ with evaluation("off"):
+ cmds.AbcExport(j=job_str, verbose=verbose)
+
+ if verbose:
+ log.debug("Extracted Alembic to: %s", file)
+
+ return file
diff --git a/client/ayon_core/hosts/maya/api/lib.py b/client/ayon_core/hosts/maya/api/lib.py
index 321bcbc0b5..1defa3debd 100644
--- a/client/ayon_core/hosts/maya/api/lib.py
+++ b/client/ayon_core/hosts/maya/api/lib.py
@@ -70,37 +70,6 @@ DEFAULT_MATRIX = [1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0]
-# The maya alembic export types
-_alembic_options = {
- "startFrame": float,
- "endFrame": float,
- "frameRange": str, # "start end"; overrides startFrame & endFrame
- "eulerFilter": bool,
- "frameRelativeSample": float,
- "noNormals": bool,
- "renderableOnly": bool,
- "step": float,
- "stripNamespaces": bool,
- "uvWrite": bool,
- "wholeFrameGeo": bool,
- "worldSpace": bool,
- "writeVisibility": bool,
- "writeColorSets": bool,
- "writeFaceSets": bool,
- "writeCreases": bool, # Maya 2015 Ext1+
- "writeUVSets": bool, # Maya 2017+
- "dataFormat": str,
- "root": (list, tuple),
- "attr": (list, tuple),
- "attrPrefix": (list, tuple),
- "userAttr": (list, tuple),
- "melPerFrameCallback": str,
- "melPostJobCallback": str,
- "pythonPerFrameCallback": str,
- "pythonPostJobCallback": str,
- "selection": bool
-}
-
INT_FPS = {15, 24, 25, 30, 48, 50, 60, 44100, 48000}
FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94}
@@ -1346,178 +1315,6 @@ def is_visible(node,
return True
-
-def extract_alembic(file,
- startFrame=None,
- endFrame=None,
- selection=True,
- uvWrite=True,
- eulerFilter=True,
- dataFormat="ogawa",
- verbose=False,
- **kwargs):
- """Extract a single Alembic Cache.
-
- This extracts an Alembic cache using the `-selection` flag to minimize
- the extracted content to solely what was Collected into the instance.
-
- Arguments:
-
- startFrame (float): Start frame of output. Ignored if `frameRange`
- provided.
-
- endFrame (float): End frame of output. Ignored if `frameRange`
- provided.
-
- frameRange (tuple or str): Two-tuple with start and end frame or a
- string formatted as: "startFrame endFrame". This argument
- overrides `startFrame` and `endFrame` arguments.
-
- dataFormat (str): The data format to use for the cache,
- defaults to "ogawa"
-
- verbose (bool): When on, outputs frame number information to the
- Script Editor or output window during extraction.
-
- noNormals (bool): When on, normal data from the original polygon
- objects is not included in the exported Alembic cache file.
-
- renderableOnly (bool): When on, any non-renderable nodes or hierarchy,
- such as hidden objects, are not included in the Alembic file.
- Defaults to False.
-
- stripNamespaces (bool): When on, any namespaces associated with the
- exported objects are removed from the Alembic file. For example, an
- object with the namespace taco:foo:bar appears as bar in the
- Alembic file.
-
- uvWrite (bool): When on, UV data from polygon meshes and subdivision
- objects are written to the Alembic file. Only the current UV map is
- included.
-
- worldSpace (bool): When on, the top node in the node hierarchy is
- stored as world space. By default, these nodes are stored as local
- space. Defaults to False.
-
- eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with
- an Euler filter. Euler filtering helps resolve irregularities in
- rotations especially if X, Y, and Z rotations exceed 360 degrees.
- Defaults to True.
-
- """
-
- # Ensure alembic exporter is loaded
- cmds.loadPlugin('AbcExport', quiet=True)
-
- # Alembic Exporter requires forward slashes
- file = file.replace('\\', '/')
-
- # Pass the start and end frame on as `frameRange` so that it
- # never conflicts with that argument
- if "frameRange" not in kwargs:
- # Fallback to maya timeline if no start or end frame provided.
- if startFrame is None:
- startFrame = cmds.playbackOptions(query=True,
- animationStartTime=True)
- if endFrame is None:
- endFrame = cmds.playbackOptions(query=True,
- animationEndTime=True)
-
- # Ensure valid types are converted to frame range
- assert isinstance(startFrame, _alembic_options["startFrame"])
- assert isinstance(endFrame, _alembic_options["endFrame"])
- kwargs["frameRange"] = "{0} {1}".format(startFrame, endFrame)
- else:
- # Allow conversion from tuple for `frameRange`
- frame_range = kwargs["frameRange"]
- if isinstance(frame_range, (list, tuple)):
- assert len(frame_range) == 2
- kwargs["frameRange"] = "{0} {1}".format(frame_range[0],
- frame_range[1])
-
- # Assemble options
- options = {
- "selection": selection,
- "uvWrite": uvWrite,
- "eulerFilter": eulerFilter,
- "dataFormat": dataFormat
- }
- options.update(kwargs)
-
- # Validate options
- for key, value in options.copy().items():
-
- # Discard unknown options
- if key not in _alembic_options:
- log.warning("extract_alembic() does not support option '%s'. "
- "Flag will be ignored..", key)
- options.pop(key)
- continue
-
- # Validate value type
- valid_types = _alembic_options[key]
- if not isinstance(value, valid_types):
- raise TypeError("Alembic option unsupported type: "
- "{0} (expected {1})".format(value, valid_types))
-
- # Ignore empty values, like an empty string, since they mess up how
- # job arguments are built
- if isinstance(value, (list, tuple)):
- value = [x for x in value if x.strip()]
-
- # Ignore option completely if no values remaining
- if not value:
- options.pop(key)
- continue
-
- options[key] = value
-
- # The `writeCreases` argument was changed to `autoSubd` in Maya 2018+
- maya_version = int(cmds.about(version=True))
- if maya_version >= 2018:
- options['autoSubd'] = options.pop('writeCreases', False)
-
- # Format the job string from options
- job_args = list()
- for key, value in options.items():
- if isinstance(value, (list, tuple)):
- for entry in value:
- job_args.append("-{} {}".format(key, entry))
- elif isinstance(value, bool):
- # Add only when state is set to True
- if value:
- job_args.append("-{0}".format(key))
- else:
- job_args.append("-{0} {1}".format(key, value))
-
- job_str = " ".join(job_args)
- job_str += ' -file "%s"' % file
-
- # Ensure output directory exists
- parent_dir = os.path.dirname(file)
- if not os.path.exists(parent_dir):
- os.makedirs(parent_dir)
-
- if verbose:
- log.debug("Preparing Alembic export with options: %s",
- json.dumps(options, indent=4))
- log.debug("Extracting Alembic with job arguments: %s", job_str)
-
- # Perform extraction
- print("Alembic Job Arguments : {}".format(job_str))
-
- # Disable the parallel evaluation temporarily to ensure no buggy
- # exports are made. (PLN-31)
- # TODO: Make sure this actually fixes the issues
- with evaluation("off"):
- cmds.AbcExport(j=job_str, verbose=verbose)
-
- if verbose:
- log.debug("Extracted Alembic to: %s", file)
-
- return file
-
-
# region ID
def get_id_required_nodes(referenced_nodes=False,
nodes=None,
@@ -2520,7 +2317,16 @@ def set_scene_fps(fps, update=True):
"""
fps_mapping = {
+ '2': '2fps',
+ '3': '3fps',
+ '4': '4fps',
+ '5': '5fps',
+ '6': '6fps',
+ '8': '8fps',
+ '10': '10fps',
+ '12': '12fps',
'15': 'game',
+ '16': '16fps',
'24': 'film',
'25': 'pal',
'30': 'ntsc',
@@ -2612,21 +2418,24 @@ def get_fps_for_current_context():
Returns:
Union[int, float]: FPS value.
"""
-
- project_name = get_current_project_name()
- folder_path = get_current_folder_path()
- folder_entity = ayon_api.get_folder_by_path(
- project_name, folder_path, fields={"attrib.fps"}
- ) or {}
- fps = folder_entity.get("attrib", {}).get("fps")
+ task_entity = get_current_task_entity(fields={"attrib"})
+ fps = task_entity.get("attrib", {}).get("fps")
if not fps:
- project_entity = ayon_api.get_project(
- project_name, fields=["attrib.fps"]
+ project_name = get_current_project_name()
+ folder_path = get_current_folder_path()
+ folder_entity = ayon_api.get_folder_by_path(
+ project_name, folder_path, fields={"attrib.fps"}
) or {}
- fps = project_entity.get("attrib", {}).get("fps")
+ fps = folder_entity.get("attrib", {}).get("fps")
if not fps:
- fps = 25
+ project_entity = ayon_api.get_project(
+ project_name, fields=["attrib.fps"]
+ ) or {}
+ fps = project_entity.get("attrib", {}).get("fps")
+
+ if not fps:
+ fps = 25
return convert_to_maya_fps(fps)
diff --git a/client/ayon_core/hosts/maya/api/lib_renderproducts.py b/client/ayon_core/hosts/maya/api/lib_renderproducts.py
index 832d1c21c2..52c282c6de 100644
--- a/client/ayon_core/hosts/maya/api/lib_renderproducts.py
+++ b/client/ayon_core/hosts/maya/api/lib_renderproducts.py
@@ -720,7 +720,8 @@ class RenderProductsArnold(ARenderProducts):
# AOVs > Legacy > Maya Render View > Mode
aovs_enabled = bool(
- self._get_attr("defaultArnoldRenderOptions.aovMode")
+ self._get_attr(
+ "defaultArnoldRenderOptions.aovMode", as_string=False)
)
if not aovs_enabled:
return beauty_products
diff --git a/client/ayon_core/hosts/maya/api/pipeline.py b/client/ayon_core/hosts/maya/api/pipeline.py
index 864a0c1599..74d73e5f95 100644
--- a/client/ayon_core/hosts/maya/api/pipeline.py
+++ b/client/ayon_core/hosts/maya/api/pipeline.py
@@ -30,9 +30,11 @@ from ayon_core.pipeline import (
register_loader_plugin_path,
register_inventory_action_path,
register_creator_plugin_path,
+ register_workfile_build_plugin_path,
deregister_loader_plugin_path,
deregister_inventory_action_path,
deregister_creator_plugin_path,
+ deregister_workfile_build_plugin_path,
AYON_CONTAINER_ID,
AVALON_CONTAINER_ID,
)
@@ -47,7 +49,6 @@ from ayon_core.hosts.maya import MAYA_ROOT_DIR
from ayon_core.hosts.maya.lib import create_workspace_mel
from . import menu, lib
-from .workfile_template_builder import MayaPlaceholderLoadPlugin
from .workio import (
open_file,
save_file,
@@ -64,6 +65,7 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
+WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build")
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
@@ -93,7 +95,7 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
register_inventory_action_path(INVENTORY_PATH)
- self.log.info(PUBLISH_PATH)
+ register_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
self.log.info("Installing callbacks ... ")
register_event_callback("init", on_init)
@@ -148,11 +150,6 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
def get_containers(self):
return ls()
- def get_workfile_build_placeholder_plugins(self):
- return [
- MayaPlaceholderLoadPlugin
- ]
-
@contextlib.contextmanager
def maintained_selection(self):
with lib.maintained_selection():
@@ -338,6 +335,7 @@ def uninstall():
deregister_loader_plugin_path(LOAD_PATH)
deregister_creator_plugin_path(CREATE_PATH)
deregister_inventory_action_path(INVENTORY_PATH)
+ deregister_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
menu.uninstall()
diff --git a/client/ayon_core/hosts/maya/api/render_setup_tools.py b/client/ayon_core/hosts/maya/api/render_setup_tools.py
index a5e04de184..9b00b53eee 100644
--- a/client/ayon_core/hosts/maya/api/render_setup_tools.py
+++ b/client/ayon_core/hosts/maya/api/render_setup_tools.py
@@ -19,7 +19,7 @@ from .lib import pairwise
@contextlib.contextmanager
-def _allow_export_from_render_setup_layer():
+def allow_export_from_render_setup_layer():
"""Context manager to override Maya settings to allow RS layer export"""
try:
@@ -102,7 +102,7 @@ def export_in_rs_layer(path, nodes, export=None):
cmds.disconnectAttr(src, dest)
# Export Selected
- with _allow_export_from_render_setup_layer():
+ with allow_export_from_render_setup_layer():
cmds.select(nodes, noExpand=True)
if export:
export()
diff --git a/client/ayon_core/hosts/maya/api/workfile_template_builder.py b/client/ayon_core/hosts/maya/api/workfile_template_builder.py
index 75386d7e64..cfd416b708 100644
--- a/client/ayon_core/hosts/maya/api/workfile_template_builder.py
+++ b/client/ayon_core/hosts/maya/api/workfile_template_builder.py
@@ -1,5 +1,3 @@
-import json
-
from maya import cmds
from ayon_core.pipeline import (
@@ -10,16 +8,13 @@ from ayon_core.pipeline import (
)
from ayon_core.pipeline.workfile.workfile_template_builder import (
TemplateAlreadyImported,
- AbstractTemplateBuilder,
- PlaceholderPlugin,
- LoadPlaceholderItem,
- PlaceholderLoadMixin,
+ AbstractTemplateBuilder
)
from ayon_core.tools.workfile_template_build import (
WorkfileBuildPlaceholderDialog,
)
-from .lib import read, imprint, get_reference_node, get_main_window
+from .lib import get_main_window
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
@@ -91,255 +86,6 @@ class MayaTemplateBuilder(AbstractTemplateBuilder):
return True
-class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
- identifier = "maya.load"
- label = "Maya load"
-
- def _collect_scene_placeholders(self):
- # Cache placeholder data to shared data
- placeholder_nodes = self.builder.get_shared_populate_data(
- "placeholder_nodes"
- )
- if placeholder_nodes is None:
- attributes = cmds.ls("*.plugin_identifier", long=True)
- placeholder_nodes = {}
- for attribute in attributes:
- node_name = attribute.rpartition(".")[0]
- placeholder_nodes[node_name] = (
- self._parse_placeholder_node_data(node_name)
- )
-
- self.builder.set_shared_populate_data(
- "placeholder_nodes", placeholder_nodes
- )
- return placeholder_nodes
-
- def _parse_placeholder_node_data(self, node_name):
- placeholder_data = read(node_name)
- parent_name = (
- cmds.getAttr(node_name + ".parent", asString=True)
- or node_name.rpartition("|")[0]
- or ""
- )
- if parent_name:
- siblings = cmds.listRelatives(parent_name, children=True)
- else:
- siblings = cmds.ls(assemblies=True)
- node_shortname = node_name.rpartition("|")[2]
- current_index = cmds.getAttr(node_name + ".index", asString=True)
- if current_index < 0:
- current_index = siblings.index(node_shortname)
-
- placeholder_data.update({
- "parent": parent_name,
- "index": current_index
- })
- return placeholder_data
-
- def _create_placeholder_name(self, placeholder_data):
- placeholder_name_parts = placeholder_data["builder_type"].split("_")
-
- pos = 1
- placeholder_product_type = placeholder_data.get("product_type")
- if placeholder_product_type is None:
- placeholder_product_type = placeholder_data.get("family")
-
- if placeholder_product_type:
- placeholder_name_parts.insert(pos, placeholder_product_type)
- pos += 1
-
- # add loader arguments if any
- loader_args = placeholder_data["loader_args"]
- if loader_args:
- loader_args = json.loads(loader_args.replace('\'', '\"'))
- values = [v for v in loader_args.values()]
- for value in values:
- placeholder_name_parts.insert(pos, value)
- pos += 1
-
- placeholder_name = "_".join(placeholder_name_parts)
-
- return placeholder_name.capitalize()
-
- def _get_loaded_repre_ids(self):
- loaded_representation_ids = self.builder.get_shared_populate_data(
- "loaded_representation_ids"
- )
- if loaded_representation_ids is None:
- try:
- containers = cmds.sets("AVALON_CONTAINERS", q=True)
- except ValueError:
- containers = []
-
- loaded_representation_ids = {
- cmds.getAttr(container + ".representation")
- for container in containers
- }
- self.builder.set_shared_populate_data(
- "loaded_representation_ids", loaded_representation_ids
- )
- return loaded_representation_ids
-
- def create_placeholder(self, placeholder_data):
- selection = cmds.ls(selection=True)
- if len(selection) > 1:
- raise ValueError("More then one item are selected")
-
- parent = selection[0] if selection else None
-
- placeholder_data["plugin_identifier"] = self.identifier
-
- placeholder_name = self._create_placeholder_name(placeholder_data)
-
- placeholder = cmds.spaceLocator(name=placeholder_name)[0]
- if parent:
- placeholder = cmds.parent(placeholder, selection[0])[0]
-
- imprint(placeholder, placeholder_data)
-
- # Add helper attributes to keep placeholder info
- cmds.addAttr(
- placeholder,
- longName="parent",
- hidden=True,
- dataType="string"
- )
- cmds.addAttr(
- placeholder,
- longName="index",
- hidden=True,
- attributeType="short",
- defaultValue=-1
- )
-
- cmds.setAttr(placeholder + ".parent", "", type="string")
-
- def update_placeholder(self, placeholder_item, placeholder_data):
- node_name = placeholder_item.scene_identifier
- new_values = {}
- for key, value in placeholder_data.items():
- placeholder_value = placeholder_item.data.get(key)
- if value != placeholder_value:
- new_values[key] = value
- placeholder_item.data[key] = value
-
- for key in new_values.keys():
- cmds.deleteAttr(node_name + "." + key)
-
- imprint(node_name, new_values)
-
- def collect_placeholders(self):
- output = []
- scene_placeholders = self._collect_scene_placeholders()
- for node_name, placeholder_data in scene_placeholders.items():
- if placeholder_data.get("plugin_identifier") != self.identifier:
- continue
-
- # TODO do data validations and maybe upgrades if they are invalid
- output.append(
- LoadPlaceholderItem(node_name, placeholder_data, self)
- )
-
- return output
-
- def populate_placeholder(self, placeholder):
- self.populate_load_placeholder(placeholder)
-
- def repopulate_placeholder(self, placeholder):
- repre_ids = self._get_loaded_repre_ids()
- self.populate_load_placeholder(placeholder, repre_ids)
-
- def get_placeholder_options(self, options=None):
- return self.get_load_plugin_options(options)
-
- def post_placeholder_process(self, placeholder, failed):
- """Cleanup placeholder after load of its corresponding representations.
-
- Args:
- placeholder (PlaceholderItem): Item which was just used to load
- representation.
- failed (bool): Loading of representation failed.
- """
- # Hide placeholder and add them to placeholder set
- node = placeholder.scene_identifier
-
- cmds.sets(node, addElement=PLACEHOLDER_SET)
- cmds.hide(node)
- cmds.setAttr(node + ".hiddenInOutliner", True)
-
- def delete_placeholder(self, placeholder):
- """Remove placeholder if building was successful"""
- cmds.delete(placeholder.scene_identifier)
-
- def load_succeed(self, placeholder, container):
- self._parent_in_hierarchy(placeholder, container)
-
- def _parent_in_hierarchy(self, placeholder, container):
- """Parent loaded container to placeholder's parent.
-
- ie : Set loaded content as placeholder's sibling
-
- Args:
- container (str): Placeholder loaded containers
- """
-
- if not container:
- return
-
- roots = cmds.sets(container, q=True) or []
- ref_node = None
- try:
- ref_node = get_reference_node(roots)
- except AssertionError as e:
- self.log.info(e.args[0])
-
- nodes_to_parent = []
- for root in roots:
- if ref_node:
- ref_root = cmds.referenceQuery(root, nodes=True)[0]
- ref_root = (
- cmds.listRelatives(ref_root, parent=True, path=True) or
- [ref_root]
- )
- nodes_to_parent.extend(ref_root)
- continue
- if root.endswith("_RN"):
- # Backwards compatibility for hardcoded reference names.
- refRoot = cmds.referenceQuery(root, n=True)[0]
- refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot]
- nodes_to_parent.extend(refRoot)
- elif root not in cmds.listSets(allSets=True):
- nodes_to_parent.append(root)
-
- elif not cmds.sets(root, q=True):
- return
-
- # Move loaded nodes to correct index in outliner hierarchy
- placeholder_form = cmds.xform(
- placeholder.scene_identifier,
- q=True,
- matrix=True,
- worldSpace=True
- )
- scene_parent = cmds.listRelatives(
- placeholder.scene_identifier, parent=True, fullPath=True
- )
- for node in set(nodes_to_parent):
- cmds.reorder(node, front=True)
- cmds.reorder(node, relative=placeholder.data["index"])
- cmds.xform(node, matrix=placeholder_form, ws=True)
- if scene_parent:
- cmds.parent(node, scene_parent)
- else:
- cmds.parent(node, world=True)
-
- holding_sets = cmds.listSets(object=placeholder.scene_identifier)
- if not holding_sets:
- return
- for holding_set in holding_sets:
- cmds.sets(roots, forceElement=holding_set)
-
-
def build_workfile_template(*args):
builder = MayaTemplateBuilder(registered_host())
builder.build_template()
diff --git a/client/ayon_core/hosts/maya/api/yeti.py b/client/ayon_core/hosts/maya/api/yeti.py
new file mode 100644
index 0000000000..1526c3a2f3
--- /dev/null
+++ b/client/ayon_core/hosts/maya/api/yeti.py
@@ -0,0 +1,101 @@
+from typing import List
+
+from maya import cmds
+
+
+def get_yeti_user_variables(yeti_shape_node: str) -> List[str]:
+ """Get user defined yeti user variables for a `pgYetiMaya` shape node.
+
+ Arguments:
+ yeti_shape_node (str): The `pgYetiMaya` shape node.
+
+ Returns:
+ list: Attribute names (for a vector attribute it only lists the top
+ parent attribute, not the attribute per axis)
+ """
+
+ attrs = cmds.listAttr(yeti_shape_node,
+ userDefined=True,
+ string=("yetiVariableV_*",
+ "yetiVariableF_*")) or []
+ valid_attrs = []
+ for attr in attrs:
+ attr_type = cmds.attributeQuery(attr, node=yeti_shape_node,
+ attributeType=True)
+ if attr.startswith("yetiVariableV_") and attr_type == "double3":
+ # vector
+ valid_attrs.append(attr)
+ elif attr.startswith("yetiVariableF_") and attr_type == "double":
+ valid_attrs.append(attr)
+
+ return valid_attrs
+
+
+def create_yeti_variable(yeti_shape_node: str,
+ attr_name: str,
+ value=None,
+ force_value: bool = False) -> bool:
+ """Get user defined yeti user variables for a `pgYetiMaya` shape node.
+
+ Arguments:
+ yeti_shape_node (str): The `pgYetiMaya` shape node.
+ attr_name (str): The fully qualified yeti variable name, e.g.
+ "yetiVariableF_myfloat" or "yetiVariableV_myvector"
+ value (object): The value to set (must match the type of the attribute)
+ When value is None it will ignored and not be set.
+ force_value (bool): Whether to set the value if the attribute already
+ exists or not.
+
+ Returns:
+ bool: Whether the attribute value was set or not.
+
+ """
+ exists = cmds.attributeQuery(attr_name, node=yeti_shape_node, exists=True)
+ if not exists:
+ if attr_name.startswith("yetiVariableV_"):
+ _create_vector_yeti_user_variable(yeti_shape_node, attr_name)
+ if attr_name.startswith("yetiVariableF_"):
+ _create_float_yeti_user_variable(yeti_shape_node, attr_name)
+
+ if value is not None and (not exists or force_value):
+ plug = "{}.{}".format(yeti_shape_node, attr_name)
+ if (
+ isinstance(value, (list, tuple))
+ and attr_name.startswith("yetiVariableV_")
+ ):
+ cmds.setAttr(plug, *value, type="double3")
+ else:
+ cmds.setAttr(plug, value)
+
+ return True
+ return False
+
+
+def _create_vector_yeti_user_variable(yeti_shape_node: str, attr_name: str):
+ if not attr_name.startswith("yetiVariableV_"):
+ raise ValueError("Must start with yetiVariableV_")
+ cmds.addAttr(yeti_shape_node,
+ longName=attr_name,
+ attributeType="double3",
+ cachedInternally=True,
+ keyable=True)
+ for axis in "XYZ":
+ cmds.addAttr(yeti_shape_node,
+ longName="{}{}".format(attr_name, axis),
+ attributeType="double",
+ parent=attr_name,
+ cachedInternally=True,
+ keyable=True)
+
+
+def _create_float_yeti_user_variable(yeti_node: str, attr_name: str):
+ if not attr_name.startswith("yetiVariableF_"):
+ raise ValueError("Must start with yetiVariableF_")
+
+ cmds.addAttr(yeti_node,
+ longName=attr_name,
+ attributeType="double",
+ cachedInternally=True,
+ softMinValue=0,
+ softMaxValue=100,
+ keyable=True)
diff --git a/client/ayon_core/hosts/maya/plugins/create/create_animation.py b/client/ayon_core/hosts/maya/plugins/create/create_animation.py
deleted file mode 100644
index f30d9aba81..0000000000
--- a/client/ayon_core/hosts/maya/plugins/create/create_animation.py
+++ /dev/null
@@ -1,89 +0,0 @@
-from ayon_core.hosts.maya.api import (
- lib,
- plugin
-)
-from ayon_core.lib import (
- BoolDef,
- TextDef
-)
-
-
-class CreateAnimation(plugin.MayaHiddenCreator):
- """Animation output for character rigs
-
- We hide the animation creator from the UI since the creation of it is
- automated upon loading a rig. There's an inventory action to recreate it
- for loaded rigs if by chance someone deleted the animation instance.
- """
- identifier = "io.openpype.creators.maya.animation"
- name = "animationDefault"
- label = "Animation"
- product_type = "animation"
- icon = "male"
-
- write_color_sets = False
- write_face_sets = False
- include_parent_hierarchy = False
- include_user_defined_attributes = False
-
- def get_instance_attr_defs(self):
-
- defs = lib.collect_animation_defs()
-
- defs.extend([
- BoolDef("writeColorSets",
- label="Write vertex colors",
- tooltip="Write vertex colors with the geometry",
- default=self.write_color_sets),
- BoolDef("writeFaceSets",
- label="Write face sets",
- tooltip="Write face sets with the geometry",
- default=self.write_face_sets),
- BoolDef("writeNormals",
- label="Write normals",
- tooltip="Write normals with the deforming geometry",
- default=True),
- BoolDef("renderableOnly",
- label="Renderable Only",
- tooltip="Only export renderable visible shapes",
- default=False),
- BoolDef("visibleOnly",
- label="Visible Only",
- tooltip="Only export dag objects visible during "
- "frame range",
- default=False),
- BoolDef("includeParentHierarchy",
- label="Include Parent Hierarchy",
- tooltip="Whether to include parent hierarchy of nodes in "
- "the publish instance",
- default=self.include_parent_hierarchy),
- BoolDef("worldSpace",
- label="World-Space Export",
- default=True),
- BoolDef("includeUserDefinedAttributes",
- label="Include User Defined Attributes",
- default=self.include_user_defined_attributes),
- TextDef("attr",
- label="Custom Attributes",
- default="",
- placeholder="attr1, attr2"),
- TextDef("attrPrefix",
- label="Custom Attributes Prefix",
- placeholder="prefix1, prefix2")
- ])
-
- # TODO: Implement these on a Deadline plug-in instead?
- """
- # Default to not send to farm.
- self.data["farm"] = False
- self.data["priority"] = 50
- """
-
- return defs
-
- def apply_settings(self, project_settings):
- super(CreateAnimation, self).apply_settings(project_settings)
- # Hardcoding creator to be enabled due to existing settings would
- # disable the creator causing the creator plugin to not be
- # discoverable.
- self.enabled = True
diff --git a/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py b/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py
new file mode 100644
index 0000000000..08d50a1ab8
--- /dev/null
+++ b/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py
@@ -0,0 +1,139 @@
+from maya import cmds
+
+from ayon_core.hosts.maya.api import lib, plugin
+
+from ayon_core.lib import (
+ BoolDef,
+ NumberDef,
+)
+from ayon_core.pipeline import CreatedInstance
+
+
+def _get_animation_attr_defs(cls):
+ """Get Animation generic definitions."""
+ defs = lib.collect_animation_defs()
+ defs.extend(
+ [
+ BoolDef("farm", label="Submit to Farm"),
+ NumberDef("priority", label="Farm job Priority", default=50),
+ BoolDef("refresh", label="Refresh viewport during export"),
+ BoolDef(
+ "includeParentHierarchy",
+ label="Include Parent Hierarchy",
+ tooltip=(
+ "Whether to include parent hierarchy of nodes in the "
+ "publish instance."
+ )
+ ),
+ BoolDef(
+ "includeUserDefinedAttributes",
+ label="Include User Defined Attributes",
+ tooltip=(
+ "Whether to include all custom maya attributes found "
+ "on nodes as attributes in the Alembic data."
+ )
+ ),
+ ]
+ )
+
+ return defs
+
+
+def convert_legacy_alembic_creator_attributes(node_data, class_name):
+ """This is a legacy transfer of creator attributes to publish attributes
+ for ExtractAlembic/ExtractAnimation plugin.
+ """
+ publish_attributes = node_data["publish_attributes"]
+
+ if class_name in publish_attributes:
+ return node_data
+
+ attributes = [
+ "attr",
+ "attrPrefix",
+ "visibleOnly",
+ "writeColorSets",
+ "writeFaceSets",
+ "writeNormals",
+ "renderableOnly",
+ "visibleOnly",
+ "worldSpace",
+ "renderableOnly"
+ ]
+ plugin_attributes = {}
+ for attr in attributes:
+ if attr not in node_data["creator_attributes"]:
+ continue
+ value = node_data["creator_attributes"].pop(attr)
+
+ plugin_attributes[attr] = value
+
+ publish_attributes[class_name] = plugin_attributes
+
+ return node_data
+
+
+class CreateAnimation(plugin.MayaHiddenCreator):
+ """Animation output for character rigs
+
+ We hide the animation creator from the UI since the creation of it is
+ automated upon loading a rig. There's an inventory action to recreate it
+ for loaded rigs if by chance someone deleted the animation instance.
+ """
+
+ identifier = "io.openpype.creators.maya.animation"
+ name = "animationDefault"
+ label = "Animation"
+ product_type = "animation"
+ icon = "male"
+
+ write_color_sets = False
+ write_face_sets = False
+ include_parent_hierarchy = False
+ include_user_defined_attributes = False
+
+ def read_instance_node(self, node):
+ node_data = super(CreateAnimation, self).read_instance_node(node)
+ node_data = convert_legacy_alembic_creator_attributes(
+ node_data, "ExtractAnimation"
+ )
+ return node_data
+
+ def get_instance_attr_defs(self):
+ defs = super(CreateAnimation, self).get_instance_attr_defs()
+ defs += _get_animation_attr_defs(self)
+ return defs
+
+
+class CreatePointCache(plugin.MayaCreator):
+ """Alembic pointcache for animated data"""
+
+ identifier = "io.openpype.creators.maya.pointcache"
+ label = "Pointcache"
+ product_type = "pointcache"
+ icon = "gears"
+ write_color_sets = False
+ write_face_sets = False
+ include_user_defined_attributes = False
+
+ def read_instance_node(self, node):
+ node_data = super(CreatePointCache, self).read_instance_node(node)
+ node_data = convert_legacy_alembic_creator_attributes(
+ node_data, "ExtractAlembic"
+ )
+ return node_data
+
+ def get_instance_attr_defs(self):
+ defs = super(CreatePointCache, self).get_instance_attr_defs()
+ defs += _get_animation_attr_defs(self)
+ return defs
+
+ def create(self, product_name, instance_data, pre_create_data):
+ instance = super(CreatePointCache, self).create(
+ product_name, instance_data, pre_create_data
+ )
+ instance_node = instance.get("instance_node")
+
+ # For Arnold standin proxy
+ proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True)
+ cmds.sets(proxy_set, forceElement=instance_node)
diff --git a/client/ayon_core/hosts/maya/plugins/create/create_pointcache.py b/client/ayon_core/hosts/maya/plugins/create/create_pointcache.py
deleted file mode 100644
index 05e3a1a29f..0000000000
--- a/client/ayon_core/hosts/maya/plugins/create/create_pointcache.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from maya import cmds
-
-from ayon_core.hosts.maya.api import (
- lib,
- plugin
-)
-from ayon_core.lib import (
- BoolDef,
- TextDef
-)
-
-
-class CreatePointCache(plugin.MayaCreator):
- """Alembic pointcache for animated data"""
-
- identifier = "io.openpype.creators.maya.pointcache"
- label = "Pointcache"
- product_type = "pointcache"
- icon = "gears"
- write_color_sets = False
- write_face_sets = False
- include_user_defined_attributes = False
-
- def get_instance_attr_defs(self):
-
- defs = lib.collect_animation_defs()
-
- defs.extend([
- BoolDef("writeColorSets",
- label="Write vertex colors",
- tooltip="Write vertex colors with the geometry",
- default=False),
- BoolDef("writeFaceSets",
- label="Write face sets",
- tooltip="Write face sets with the geometry",
- default=False),
- BoolDef("renderableOnly",
- label="Renderable Only",
- tooltip="Only export renderable visible shapes",
- default=False),
- BoolDef("visibleOnly",
- label="Visible Only",
- tooltip="Only export dag objects visible during "
- "frame range",
- default=False),
- BoolDef("includeParentHierarchy",
- label="Include Parent Hierarchy",
- tooltip="Whether to include parent hierarchy of nodes in "
- "the publish instance",
- default=False),
- BoolDef("worldSpace",
- label="World-Space Export",
- default=True),
- BoolDef("refresh",
- label="Refresh viewport during export",
- default=False),
- BoolDef("includeUserDefinedAttributes",
- label="Include User Defined Attributes",
- default=self.include_user_defined_attributes),
- TextDef("attr",
- label="Custom Attributes",
- default="",
- placeholder="attr1, attr2"),
- TextDef("attrPrefix",
- label="Custom Attributes Prefix",
- default="",
- placeholder="prefix1, prefix2")
- ])
-
- # TODO: Implement these on a Deadline plug-in instead?
- """
- # Default to not send to farm.
- self.data["farm"] = False
- self.data["priority"] = 50
- """
-
- return defs
-
- def create(self, product_name, instance_data, pre_create_data):
-
- instance = super(CreatePointCache, self).create(
- product_name, instance_data, pre_create_data
- )
- instance_node = instance.get("instance_node")
-
- # For Arnold standin proxy
- proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True)
- cmds.sets(proxy_set, forceElement=instance_node)
diff --git a/client/ayon_core/hosts/maya/plugins/inventory/connect_geometry.py b/client/ayon_core/hosts/maya/plugins/inventory/connect_geometry.py
index 839a4dad90..5410546a2e 100644
--- a/client/ayon_core/hosts/maya/plugins/inventory/connect_geometry.py
+++ b/client/ayon_core/hosts/maya/plugins/inventory/connect_geometry.py
@@ -37,7 +37,7 @@ class ConnectGeometry(InventoryAction):
repre_id = container["representation"]
repre_context = repre_contexts_by_id[repre_id]
- product_type = repre_context["prouct"]["productType"]
+ product_type = repre_context["product"]["productType"]
containers_by_product_type.setdefault(product_type, [])
containers_by_product_type[product_type].append(container)
diff --git a/client/ayon_core/hosts/maya/plugins/inventory/connect_xgen.py b/client/ayon_core/hosts/maya/plugins/inventory/connect_xgen.py
index bf9e679928..166c419072 100644
--- a/client/ayon_core/hosts/maya/plugins/inventory/connect_xgen.py
+++ b/client/ayon_core/hosts/maya/plugins/inventory/connect_xgen.py
@@ -36,7 +36,7 @@ class ConnectXgen(InventoryAction):
repre_id = container["representation"]
repre_context = repre_contexts_by_id[repre_id]
- product_type = repre_context["prouct"]["productType"]
+ product_type = repre_context["product"]["productType"]
containers_by_product_type.setdefault(product_type, [])
containers_by_product_type[product_type].append(container)
diff --git a/client/ayon_core/hosts/maya/plugins/inventory/connect_yeti_rig.py b/client/ayon_core/hosts/maya/plugins/inventory/connect_yeti_rig.py
index 5916bf7b97..8f13cc6ae5 100644
--- a/client/ayon_core/hosts/maya/plugins/inventory/connect_yeti_rig.py
+++ b/client/ayon_core/hosts/maya/plugins/inventory/connect_yeti_rig.py
@@ -39,7 +39,7 @@ class ConnectYetiRig(InventoryAction):
repre_id = container["representation"]
repre_context = repre_contexts_by_id[repre_id]
- product_type = repre_context["prouct"]["productType"]
+ product_type = repre_context["product"]["productType"]
containers_by_product_type.setdefault(product_type, [])
containers_by_product_type[product_type].append(container)
diff --git a/client/ayon_core/hosts/maya/plugins/load/load_as_template.py b/client/ayon_core/hosts/maya/plugins/load/load_as_template.py
new file mode 100644
index 0000000000..f696d369e3
--- /dev/null
+++ b/client/ayon_core/hosts/maya/plugins/load/load_as_template.py
@@ -0,0 +1,39 @@
+from ayon_core.lib import (
+ BoolDef
+)
+from ayon_core.pipeline import (
+ load,
+ registered_host
+)
+from ayon_core.hosts.maya.api.workfile_template_builder import (
+ MayaTemplateBuilder
+)
+
+
+class LoadAsTemplate(load.LoaderPlugin):
+ """Load workfile as a template """
+
+ product_types = {"workfile", "mayaScene"}
+ label = "Load as template"
+ representations = ["ma", "mb"]
+ icon = "wrench"
+ color = "#775555"
+ order = 10
+
+ options = [
+ BoolDef("keep_placeholders",
+ label="Keep Placeholders",
+ default=False),
+ BoolDef("create_first_version",
+ label="Create First Version",
+ default=False),
+ ]
+
+ def load(self, context, name, namespace, data):
+ keep_placeholders = data.get("keep_placeholders", False)
+ create_first_version = data.get("create_first_version", False)
+ path = self.filepath_from_context(context)
+ builder = MayaTemplateBuilder(registered_host())
+ builder.build_template(template_path=path,
+ keep_placeholders=keep_placeholders,
+ create_first_version=create_first_version)
diff --git a/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py b/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py
index caea6b7a72..4ca9ae9d03 100644
--- a/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py
+++ b/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py
@@ -12,6 +12,7 @@ from ayon_core.pipeline import (
get_representation_path
)
from ayon_core.hosts.maya.api import lib
+from ayon_core.hosts.maya.api.yeti import create_yeti_variable
from ayon_core.hosts.maya.api.pipeline import containerise
from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type
@@ -23,8 +24,19 @@ SKIP_UPDATE_ATTRS = {
"viewportDensity",
"viewportWidth",
"viewportLength",
+ "renderDensity",
+ "renderWidth",
+ "renderLength",
+ "increaseRenderBounds"
}
+SKIP_ATTR_MESSAGE = (
+ "Skipping updating %s.%s to %s because it "
+ "is considered a local overridable attribute. "
+ "Either set manually or the load the cache "
+ "anew."
+)
+
def set_attribute(node, attr, value):
"""Wrapper of set attribute which ignores None values"""
@@ -209,9 +221,31 @@ class YetiCacheLoader(load.LoaderPlugin):
for attr, value in node_settings["attrs"].items():
if attr in SKIP_UPDATE_ATTRS:
+ self.log.info(
+ SKIP_ATTR_MESSAGE, yeti_node, attr, value
+ )
continue
set_attribute(attr, value, yeti_node)
+ # Set up user defined attributes
+ user_variables = node_settings.get("user_variables", {})
+ for attr, value in user_variables.items():
+ was_value_set = create_yeti_variable(
+ yeti_shape_node=yeti_node,
+ attr_name=attr,
+ value=value,
+ # We do not want to update the
+ # value if it already exists so
+ # that any local overrides that
+ # may have been applied still
+ # persist
+ force_value=False
+ )
+ if not was_value_set:
+ self.log.info(
+ SKIP_ATTR_MESSAGE, yeti_node, attr, value
+ )
+
cmds.setAttr("{}.representation".format(container_node),
repre_entity["id"],
typ="string")
@@ -332,6 +366,13 @@ class YetiCacheLoader(load.LoaderPlugin):
for attr, value in attributes.items():
set_attribute(attr, value, yeti_node)
+ # Set up user defined attributes
+ user_variables = node_settings.get("user_variables", {})
+ for attr, value in user_variables.items():
+ create_yeti_variable(yeti_shape_node=yeti_node,
+ attr_name=attr,
+ value=value)
+
# Connect to the time node
cmds.connectAttr("time1.outTime", "%s.currentTime" % yeti_node)
diff --git a/client/ayon_core/hosts/maya/plugins/load/load_yeti_rig.py b/client/ayon_core/hosts/maya/plugins/load/load_yeti_rig.py
index bf9525bae3..7444566ee1 100644
--- a/client/ayon_core/hosts/maya/plugins/load/load_yeti_rig.py
+++ b/client/ayon_core/hosts/maya/plugins/load/load_yeti_rig.py
@@ -1,8 +1,13 @@
+from typing import List
+
import maya.cmds as cmds
from ayon_core.hosts.maya.api import plugin
from ayon_core.hosts.maya.api import lib
+from ayon_core.pipeline import registered_host
+from ayon_core.pipeline.create import CreateContext
+
class YetiRigLoader(plugin.ReferenceLoader):
"""This loader will load Yeti rig."""
@@ -15,6 +20,9 @@ class YetiRigLoader(plugin.ReferenceLoader):
icon = "code-fork"
color = "orange"
+ # From settings
+ create_cache_instance_on_load = True
+
def process_reference(
self, context, name=None, namespace=None, options=None
):
@@ -49,4 +57,41 @@ class YetiRigLoader(plugin.ReferenceLoader):
)
self[:] = nodes
+ if self.create_cache_instance_on_load:
+ # Automatically create in instance to allow publishing the loaded
+ # yeti rig into a yeti cache
+ self._create_yeti_cache_instance(nodes, variant=namespace)
+
return nodes
+
+ def _create_yeti_cache_instance(self, nodes: List[str], variant: str):
+ """Create a yeticache product type instance to publish the output.
+
+ This is similar to how loading animation rig will automatically create
+ an animation instance for publishing any loaded character rigs, but
+ then for yeti rigs.
+
+ Args:
+ nodes (List[str]): Nodes generated on load.
+ variant (str): Variant for the yeti cache instance to create.
+
+ """
+
+ # Find the roots amongst the loaded nodes
+ yeti_nodes = cmds.ls(nodes, type="pgYetiMaya", long=True)
+ assert yeti_nodes, "No pgYetiMaya nodes in rig, this is a bug."
+
+ self.log.info("Creating variant: {}".format(variant))
+
+ creator_identifier = "io.openpype.creators.maya.yeticache"
+
+ host = registered_host()
+ create_context = CreateContext(host)
+
+ with lib.maintained_selection():
+ cmds.select(yeti_nodes, noExpand=True)
+ create_context.create(
+ creator_identifier=creator_identifier,
+ variant=variant,
+ pre_create_data={"use_selection": True}
+ )
diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py b/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py
index 2ab6511ece..391c80c84e 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py
@@ -58,4 +58,3 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin):
if instance.data.get("farm"):
instance.data["families"].append("publish.farm")
-
diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_file_dependencies.py b/client/ayon_core/hosts/maya/plugins/publish/collect_file_dependencies.py
index 93b46c511b..60853bd1ee 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/collect_file_dependencies.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/collect_file_dependencies.py
@@ -12,7 +12,7 @@ class CollectFileDependencies(pyblish.api.ContextPlugin):
families = ["renderlayer"]
@classmethod
- def apply_settings(cls, project_settings, system_settings):
+ def apply_settings(cls, project_settings):
# Disable plug-in if not used for deadline submission anyway
settings = project_settings["deadline"]["publish"]["MayaSubmitDeadline"] # noqa
cls.enabled = settings.get("asset_dependencies", True)
diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py b/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py
index 16fef2e168..3d586d48fb 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py
@@ -14,7 +14,9 @@ class CollectUserDefinedAttributes(pyblish.api.InstancePlugin):
def process(self, instance):
# Collect user defined attributes.
- if not instance.data.get("includeUserDefinedAttributes", False):
+ if not instance.data["creator_attributes"].get(
+ "includeUserDefinedAttributes"
+ ):
return
if "out_hierarchy" in instance.data:
diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py b/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py
index 067a7bc532..e1755e4212 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py
@@ -3,6 +3,7 @@ from maya import cmds
import pyblish.api
from ayon_core.hosts.maya.api import lib
+from ayon_core.hosts.maya.api.yeti import get_yeti_user_variables
SETTINGS = {
@@ -34,7 +35,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
- "increaseRenderBounds"
- "imageSearchPath"
- Other information is the name of the transform and it's Colorbleed ID
+ Other information is the name of the transform and its `cbId`
"""
order = pyblish.api.CollectorOrder + 0.45
@@ -54,6 +55,16 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
# Get specific node attributes
attr_data = {}
for attr in SETTINGS:
+ # Ignore non-existing attributes with a warning, e.g. cbId
+ # if they have not been generated yet
+ if not cmds.attributeQuery(attr, node=shape, exists=True):
+ self.log.warning(
+ "Attribute '{}' not found on Yeti node: {}".format(
+ attr, shape
+ )
+ )
+ continue
+
current = cmds.getAttr("%s.%s" % (shape, attr))
# change None to empty string as Maya doesn't support
# NoneType in attributes
@@ -61,6 +72,12 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
current = ""
attr_data[attr] = current
+ # Get user variable attributes
+ user_variable_attrs = {
+ attr: lib.get_attribute("{}.{}".format(shape, attr))
+ for attr in get_yeti_user_variables(shape)
+ }
+
# Get transform data
parent = cmds.listRelatives(shape, parent=True)[0]
transform_data = {"name": parent, "cbId": lib.get_id(parent)}
@@ -70,6 +87,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
"name": shape,
"cbId": lib.get_id(shape),
"attrs": attr_data,
+ "user_variables": user_variable_attrs
}
settings["nodes"].append(shape_data)
diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py b/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py
index 2c23f9b752..5f51dc38cb 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py
@@ -2,7 +2,7 @@ import os
import json
from ayon_core.pipeline import publish
-from ayon_core.hosts.maya.api.lib import extract_alembic
+from ayon_core.hosts.maya.api.alembic import extract_alembic
from maya import cmds
diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py b/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py
index 5de72f7674..d7f9594374 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py
@@ -1,17 +1,28 @@
import os
+from collections import OrderedDict
from maya import cmds
from ayon_core.pipeline import publish
+from ayon_core.hosts.maya.api.alembic import extract_alembic
from ayon_core.hosts.maya.api.lib import (
- extract_alembic,
suspended_refresh,
maintained_selection,
iter_visible_nodes_in_range
)
+from ayon_core.lib import (
+ BoolDef,
+ TextDef,
+ NumberDef,
+ EnumDef,
+ UISeparatorDef,
+ UILabelDef,
+)
+from ayon_core.pipeline.publish import AYONPyblishPluginMixin
+from ayon_core.pipeline import KnownPublishError
-class ExtractAlembic(publish.Extractor):
+class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
"""Produce an alembic of just point positions and normals.
Positions and normals, uvs, creases are preserved, but nothing more,
@@ -27,8 +38,35 @@ class ExtractAlembic(publish.Extractor):
targets = ["local", "remote"]
# From settings
+ attr = []
+ attrPrefix = []
+ autoSubd = False
bake_attributes = []
bake_attribute_prefixes = []
+ dataFormat = "ogawa"
+ eulerFilter = False
+ melPerFrameCallback = ""
+ melPostJobCallback = ""
+ overrides = []
+ preRoll = False
+ preRollStartFrame = 0
+ pythonPerFrameCallback = ""
+ pythonPostJobCallback = ""
+ renderableOnly = False
+ stripNamespaces = True
+ uvsOnly = False
+ uvWrite = False
+ userAttr = ""
+ userAttrPrefix = ""
+ verbose = False
+ visibleOnly = False
+ wholeFrameGeo = False
+ worldSpace = True
+ writeColorSets = False
+ writeFaceSets = False
+ writeNormals = True
+ writeUVSets = False
+ writeVisibility = False
def process(self, instance):
if instance.data.get("farm"):
@@ -41,16 +79,38 @@ class ExtractAlembic(publish.Extractor):
start = float(instance.data.get("frameStartHandle", 1))
end = float(instance.data.get("frameEndHandle", 1))
- attrs = instance.data.get("attr", "").split(";")
- attrs = [value for value in attrs if value.strip()]
+ attribute_values = self.get_attr_values_from_data(
+ instance.data
+ )
+
+ attrs = [
+ attr.strip()
+ for attr in attribute_values.get("attr", "").split(";")
+ if attr.strip()
+ ]
attrs += instance.data.get("userDefinedAttributes", [])
attrs += self.bake_attributes
attrs += ["cbId"]
- attr_prefixes = instance.data.get("attrPrefix", "").split(";")
- attr_prefixes = [value for value in attr_prefixes if value.strip()]
+ attr_prefixes = [
+ attr.strip()
+ for attr in attribute_values.get("attrPrefix", "").split(";")
+ if attr.strip()
+ ]
attr_prefixes += self.bake_attribute_prefixes
+ user_attrs = [
+ attr.strip()
+ for attr in attribute_values.get("userAttr", "").split(";")
+ if attr.strip()
+ ]
+
+ user_attr_prefixes = [
+ attr.strip()
+ for attr in attribute_values.get("userAttrPrefix", "").split(";")
+ if attr.strip()
+ ]
+
self.log.debug("Extracting pointcache..")
dirname = self.staging_dir(instance)
@@ -58,28 +118,83 @@ class ExtractAlembic(publish.Extractor):
filename = "{name}.abc".format(**instance.data)
path = os.path.join(parent_dir, filename)
- options = {
- "step": instance.data.get("step", 1.0),
- "attr": attrs,
- "attrPrefix": attr_prefixes,
- "writeVisibility": True,
- "writeCreases": True,
- "writeColorSets": instance.data.get("writeColorSets", False),
- "writeFaceSets": instance.data.get("writeFaceSets", False),
- "uvWrite": True,
- "selection": True,
- "worldSpace": instance.data.get("worldSpace", True)
- }
-
+ root = None
if not instance.data.get("includeParentHierarchy", True):
# Set the root nodes if we don't want to include parents
# The roots are to be considered the ones that are the actual
# direct members of the set
- options["root"] = roots
+ root = roots
- if int(cmds.about(version=True)) >= 2017:
- # Since Maya 2017 alembic supports multiple uv sets - write them.
- options["writeUVSets"] = True
+ kwargs = {
+ "file": path,
+ "attr": attrs,
+ "attrPrefix": attr_prefixes,
+ "userAttr": user_attrs,
+ "userAttrPrefix": user_attr_prefixes,
+ "dataFormat": attribute_values.get("dataFormat", self.dataFormat),
+ "endFrame": end,
+ "eulerFilter": attribute_values.get(
+ "eulerFilter", self.eulerFilter
+ ),
+ "preRoll": attribute_values.get("preRoll", self.preRoll),
+ "preRollStartFrame": attribute_values.get(
+ "preRollStartFrame", self.preRollStartFrame
+ ),
+ "renderableOnly": attribute_values.get(
+ "renderableOnly", self.renderableOnly
+ ),
+ "root": root,
+ "selection": True,
+ "startFrame": start,
+ "step": instance.data.get(
+ "creator_attributes", {}
+ ).get("step", 1.0),
+ "stripNamespaces": attribute_values.get(
+ "stripNamespaces", self.stripNamespaces
+ ),
+ "uvWrite": attribute_values.get("uvWrite", self.uvWrite),
+ "verbose": attribute_values.get("verbose", self.verbose),
+ "wholeFrameGeo": attribute_values.get(
+ "wholeFrameGeo", self.wholeFrameGeo
+ ),
+ "worldSpace": attribute_values.get("worldSpace", self.worldSpace),
+ "writeColorSets": attribute_values.get(
+ "writeColorSets", self.writeColorSets
+ ),
+ "writeCreases": attribute_values.get(
+ "writeCreases", self.writeCreases
+ ),
+ "writeFaceSets": attribute_values.get(
+ "writeFaceSets", self.writeFaceSets
+ ),
+ "writeUVSets": attribute_values.get(
+ "writeUVSets", self.writeUVSets
+ ),
+ "writeVisibility": attribute_values.get(
+ "writeVisibility", self.writeVisibility
+ ),
+ "autoSubd": attribute_values.get(
+ "autoSubd", self.autoSubd
+ ),
+ "uvsOnly": attribute_values.get(
+ "uvsOnly", self.uvsOnly
+ ),
+ "writeNormals": attribute_values.get(
+ "writeNormals", self.writeNormals
+ ),
+ "melPerFrameCallback": attribute_values.get(
+ "melPerFrameCallback", self.melPerFrameCallback
+ ),
+ "melPostJobCallback": attribute_values.get(
+ "melPostJobCallback", self.melPostJobCallback
+ ),
+ "pythonPerFrameCallback": attribute_values.get(
+ "pythonPerFrameCallback", self.pythonPostJobCallback
+ ),
+ "pythonPostJobCallback": attribute_values.get(
+ "pythonPostJobCallback", self.pythonPostJobCallback
+ )
+ }
if instance.data.get("visibleOnly", False):
# If we only want to include nodes that are visible in the frame
@@ -87,20 +202,19 @@ class ExtractAlembic(publish.Extractor):
# flag does not filter out those that are only hidden on some
# frames as it counts "animated" or "connected" visibilities as
# if it's always visible.
- nodes = list(iter_visible_nodes_in_range(nodes,
- start=start,
- end=end))
+ nodes = list(
+ iter_visible_nodes_in_range(nodes, start=start, end=end)
+ )
suspend = not instance.data.get("refresh", False)
with suspended_refresh(suspend=suspend):
with maintained_selection():
cmds.select(nodes, noExpand=True)
- extract_alembic(
- file=path,
- startFrame=start,
- endFrame=end,
- **options
+ self.log.debug(
+ "Running `extract_alembic` with the keyword arguments: "
+ "{}".format(kwargs)
)
+ extract_alembic(**kwargs)
if "representations" not in instance.data:
instance.data["representations"] = []
@@ -124,21 +238,17 @@ class ExtractAlembic(publish.Extractor):
return
path = path.replace(".abc", "_proxy.abc")
+ kwargs["file"] = path
if not instance.data.get("includeParentHierarchy", True):
# Set the root nodes if we don't want to include parents
# The roots are to be considered the ones that are the actual
# direct members of the set
- options["root"] = instance.data["proxyRoots"]
+ kwargs["root"] = instance.data["proxyRoots"]
with suspended_refresh(suspend=suspend):
with maintained_selection():
cmds.select(instance.data["proxy"])
- extract_alembic(
- file=path,
- startFrame=start,
- endFrame=end,
- **options
- )
+ extract_alembic(**kwargs)
representation = {
"name": "proxy",
@@ -152,24 +262,274 @@ class ExtractAlembic(publish.Extractor):
def get_members_and_roots(self, instance):
return instance[:], instance.data.get("setMembers")
+ @classmethod
+ def get_attribute_defs(cls):
+ if not cls.overrides:
+ return []
+
+ override_defs = OrderedDict({
+ "autoSubd": BoolDef(
+ "autoSubd",
+ label="Auto Subd",
+ default=cls.autoSubd,
+ tooltip=(
+ "If this flag is present and the mesh has crease edges, "
+ "crease vertices or holes, the mesh (OPolyMesh) would now "
+ "be written out as an OSubD and crease info will be stored"
+ " in the Alembic file. Otherwise, creases info won't be "
+ "preserved in Alembic file unless a custom Boolean "
+ "attribute SubDivisionMesh has been added to mesh node and"
+ " its value is true."
+ )
+ ),
+ "eulerFilter": BoolDef(
+ "eulerFilter",
+ label="Euler Filter",
+ default=cls.eulerFilter,
+ tooltip="Apply Euler filter while sampling rotations."
+ ),
+ "renderableOnly": BoolDef(
+ "renderableOnly",
+ label="Renderable Only",
+ default=cls.renderableOnly,
+ tooltip="Only export renderable visible shapes."
+ ),
+ "stripNamespaces": BoolDef(
+ "stripNamespaces",
+ label="Strip Namespaces",
+ default=cls.stripNamespaces,
+ tooltip=(
+ "Namespaces will be stripped off of the node before being "
+ "written to Alembic."
+ )
+ ),
+ "uvsOnly": BoolDef(
+ "uvsOnly",
+ label="UVs Only",
+ default=cls.uvsOnly,
+ tooltip=(
+ "If this flag is present, only uv data for PolyMesh and "
+ "SubD shapes will be written to the Alembic file."
+ )
+ ),
+ "uvWrite": BoolDef(
+ "uvWrite",
+ label="UV Write",
+ default=cls.uvWrite,
+ tooltip=(
+ "Uv data for PolyMesh and SubD shapes will be written to "
+ "the Alembic file."
+ )
+ ),
+ "verbose": BoolDef(
+ "verbose",
+ label="Verbose",
+ default=cls.verbose,
+ tooltip="Prints the current frame that is being evaluated."
+ ),
+ "visibleOnly": BoolDef(
+ "visibleOnly",
+ label="Visible Only",
+ default=cls.visibleOnly,
+ tooltip="Only export dag objects visible during frame range."
+ ),
+ "wholeFrameGeo": BoolDef(
+ "wholeFrameGeo",
+ label="Whole Frame Geo",
+ default=cls.wholeFrameGeo,
+ tooltip=(
+ "Data for geometry will only be written out on whole "
+ "frames."
+ )
+ ),
+ "worldSpace": BoolDef(
+ "worldSpace",
+ label="World Space",
+ default=cls.worldSpace,
+ tooltip="Any root nodes will be stored in world space."
+ ),
+ "writeColorSets": BoolDef(
+ "writeColorSets",
+ label="Write Color Sets",
+ default=cls.writeColorSets,
+ tooltip="Write vertex colors with the geometry."
+ ),
+ "writeFaceSets": BoolDef(
+ "writeFaceSets",
+ label="Write Face Sets",
+ default=cls.writeFaceSets,
+ tooltip="Write face sets with the geometry."
+ ),
+ "writeNormals": BoolDef(
+ "writeNormals",
+ label="Write Normals",
+ default=cls.writeNormals,
+ tooltip="Write normals with the deforming geometry."
+ ),
+ "writeUVSets": BoolDef(
+ "writeUVSets",
+ label="Write UV Sets",
+ default=cls.writeUVSets,
+ tooltip=(
+ "Write all uv sets on MFnMeshes as vector 2 indexed "
+ "geometry parameters with face varying scope."
+ )
+ ),
+ "writeVisibility": BoolDef(
+ "writeVisibility",
+ label="Write Visibility",
+ default=cls.writeVisibility,
+ tooltip=(
+ "Visibility state will be stored in the Alembic file. "
+ "Otherwise everything written out is treated as visible."
+ )
+ ),
+ "preRoll": BoolDef(
+ "preRoll",
+ label="Pre Roll",
+ default=cls.preRoll,
+ tooltip="This frame range will not be sampled."
+ ),
+ "preRollStartFrame": NumberDef(
+ "preRollStartFrame",
+ label="Pre Roll Start Frame",
+ tooltip=(
+ "The frame to start scene evaluation at. This is used"
+ " to set the starting frame for time dependent "
+ "translations and can be used to evaluate run-up that"
+ " isn't actually translated."
+ ),
+ default=cls.preRollStartFrame
+ ),
+ "dataFormat": EnumDef(
+ "dataFormat",
+ label="Data Format",
+ items=["ogawa", "HDF"],
+ default=cls.dataFormat,
+ tooltip="The data format to use to write the file."
+ ),
+ "attr": TextDef(
+ "attr",
+ label="Custom Attributes",
+ placeholder="attr1; attr2; ...",
+ default=cls.attr,
+ tooltip=(
+ "Attributes matching by name will be included in the "
+ "Alembic export. Attributes should be separated by "
+ "semi-colon `;`"
+ )
+ ),
+ "attrPrefix": TextDef(
+ "attrPrefix",
+ label="Custom Attributes Prefix",
+ placeholder="prefix1; prefix2; ...",
+ default=cls.attrPrefix,
+ tooltip=(
+ "Attributes starting with these prefixes will be included "
+ "in the Alembic export. Attributes should be separated by "
+ "semi-colon `;`"
+ )
+ ),
+ "userAttr": TextDef(
+ "userAttr",
+ label="User Attr",
+ placeholder="attr1; attr2; ...",
+ default=cls.userAttr,
+ tooltip=(
+ "Attributes matching by name will be included in the "
+ "Alembic export. Attributes should be separated by "
+ "semi-colon `;`"
+ )
+ ),
+ "userAttrPrefix": TextDef(
+ "userAttrPrefix",
+ label="User Attr Prefix",
+ placeholder="prefix1; prefix2; ...",
+ default=cls.userAttrPrefix,
+ tooltip=(
+ "Attributes starting with these prefixes will be included "
+ "in the Alembic export. Attributes should be separated by "
+ "semi-colon `;`"
+ )
+ ),
+ "melPerFrameCallback": TextDef(
+ "melPerFrameCallback",
+ label="Mel Per Frame Callback",
+ default=cls.melPerFrameCallback,
+ tooltip=(
+ "When each frame (and the static frame) is evaluated the "
+ "string specified is evaluated as a Mel command."
+ )
+ ),
+ "melPostJobCallback": TextDef(
+ "melPostJobCallback",
+ label="Mel Post Job Callback",
+ default=cls.melPostJobCallback,
+ tooltip=(
+ "When the translation has finished the string specified "
+ "is evaluated as a Mel command."
+ )
+ ),
+ "pythonPerFrameCallback": TextDef(
+ "pythonPerFrameCallback",
+ label="Python Per Frame Callback",
+ default=cls.pythonPerFrameCallback,
+ tooltip=(
+ "When each frame (and the static frame) is evaluated the "
+ "string specified is evaluated as a python command."
+ )
+ ),
+ "pythonPostJobCallback": TextDef(
+ "pythonPostJobCallback",
+ label="Python Post Frame Callback",
+ default=cls.pythonPostJobCallback,
+ tooltip=(
+ "When the translation has finished the string specified "
+ "is evaluated as a python command."
+ )
+ )
+ })
+
+ defs = super(ExtractAlembic, cls).get_attribute_defs()
+
+ defs.extend([
+ UISeparatorDef("sep_alembic_options"),
+ UILabelDef("Alembic Options"),
+ ])
+
+ # The Arguments that can be modified by the Publisher
+ overrides = set(cls.overrides)
+ for key, value in override_defs.items():
+ if key not in overrides:
+ continue
+
+ defs.append(value)
+
+ defs.append(
+ UISeparatorDef("sep_alembic_options_end")
+ )
+
+ return defs
+
class ExtractAnimation(ExtractAlembic):
- label = "Extract Animation"
+ label = "Extract Animation (Alembic)"
families = ["animation"]
def get_members_and_roots(self, instance):
-
# Collect the out set nodes
out_sets = [node for node in instance if node.endswith("out_SET")]
if len(out_sets) != 1:
- raise RuntimeError("Couldn't find exactly one out_SET: "
- "{0}".format(out_sets))
+ raise KnownPublishError(
+ "Couldn't find exactly one out_SET: {0}".format(out_sets)
+ )
out_set = out_sets[0]
- roots = cmds.sets(out_set, query=True)
+ roots = cmds.sets(out_set, query=True) or []
# Include all descendants
- nodes = roots + cmds.listRelatives(roots,
- allDescendents=True,
- fullPath=True) or []
+ nodes = roots
+ nodes += cmds.listRelatives(
+ roots, allDescendents=True, fullPath=True
+ ) or []
return nodes, roots
diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py b/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py
index 3637a58614..5aefdfc33a 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py
@@ -3,8 +3,8 @@ import os
from maya import cmds
from ayon_core.pipeline import publish
+from ayon_core.hosts.maya.api.alembic import extract_alembic
from ayon_core.hosts.maya.api.lib import (
- extract_alembic,
suspended_refresh,
maintained_selection,
iter_visible_nodes_in_range
diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_redshift_proxy.py b/client/ayon_core/hosts/maya/plugins/publish/extract_redshift_proxy.py
index 9286869c60..66dd805437 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/extract_redshift_proxy.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/extract_redshift_proxy.py
@@ -5,7 +5,13 @@ import os
from maya import cmds
from ayon_core.pipeline import publish
-from ayon_core.hosts.maya.api.lib import maintained_selection
+from ayon_core.hosts.maya.api.lib import (
+ maintained_selection,
+ renderlayer
+)
+from ayon_core.hosts.maya.api.render_setup_tools import (
+ allow_export_from_render_setup_layer
+)
class ExtractRedshiftProxy(publish.Extractor):
@@ -18,6 +24,9 @@ class ExtractRedshiftProxy(publish.Extractor):
def process(self, instance):
"""Extractor entry point."""
+ # Make sure Redshift is loaded
+ cmds.loadPlugin("redshift4maya", quiet=True)
+
staging_dir = self.staging_dir(instance)
file_name = "{}.rs".format(instance.name)
file_path = os.path.join(staging_dir, file_name)
@@ -60,14 +69,22 @@ class ExtractRedshiftProxy(publish.Extractor):
# Write out rs file
self.log.debug("Writing: '%s'" % file_path)
+
+ # Allow overriding what renderlayer to export from. By default force
+ # it to the default render layer. (Note that the renderlayer isn't
+ # currently exposed as an attribute to artists)
+ layer = instance.data.get("renderLayer", "defaultRenderLayer")
+
with maintained_selection():
- cmds.select(instance.data["setMembers"], noExpand=True)
- cmds.file(file_path,
- pr=False,
- force=True,
- type="Redshift Proxy",
- exportSelected=True,
- options=rs_options)
+ with renderlayer(layer):
+ with allow_export_from_render_setup_layer():
+ cmds.select(instance.data["setMembers"], noExpand=True)
+ cmds.file(file_path,
+ preserveReferences=False,
+ force=True,
+ type="Redshift Proxy",
+ exportSelected=True,
+ options=rs_options)
if "representations" not in instance.data:
instance.data["representations"] = []
diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py
index 1a389f3d33..b5cc7745a1 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py
@@ -5,8 +5,8 @@ import os
from maya import cmds # noqa
from ayon_core.pipeline import publish
+from ayon_core.hosts.maya.api.alembic import extract_alembic
from ayon_core.hosts.maya.api.lib import (
- extract_alembic,
suspended_refresh,
maintained_selection
)
diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py b/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py
index d799486184..54d295b479 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py
@@ -5,7 +5,7 @@ import copy
from maya import cmds
import pyblish.api
-from ayon_core.hosts.maya.api.lib import extract_alembic
+from ayon_core.hosts.maya.api.alembic import extract_alembic
from ayon_core.pipeline import publish
diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py b/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py
new file mode 100644
index 0000000000..5197100406
--- /dev/null
+++ b/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py
@@ -0,0 +1,98 @@
+import pyblish.api
+
+from ayon_core.pipeline import OptionalPyblishPluginMixin
+from ayon_core.pipeline.publish import RepairAction, PublishValidationError
+
+
+class ValidateAlembicDefaultsPointcache(
+ pyblish.api.InstancePlugin, OptionalPyblishPluginMixin
+):
+ """Validate the attributes on the instance are defaults.
+
+ The defaults are defined in the project settings.
+ """
+
+ order = pyblish.api.ValidatorOrder
+ families = ["pointcache"]
+ hosts = ["maya"]
+ label = "Validate Alembic Options Defaults"
+ actions = [RepairAction]
+ optional = True
+
+ plugin_name = "ExtractAlembic"
+
+ @classmethod
+ def _get_settings(cls, context):
+ maya_settings = context.data["project_settings"]["maya"]
+ settings = maya_settings["publish"]["ExtractAlembic"]
+ return settings
+
+ @classmethod
+ def _get_publish_attributes(cls, instance):
+ attributes = instance.data["publish_attributes"][
+ cls.plugin_name(
+ instance.data["publish_attributes"]
+ )
+ ]
+
+ return attributes
+
+ def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+
+ settings = self._get_settings(instance.context)
+
+ attributes = self._get_publish_attributes(instance)
+
+ msg = (
+ "Alembic Extract setting \"{}\" is not the default value:"
+ "\nCurrent: {}"
+ "\nDefault Value: {}\n"
+ )
+ errors = []
+ for key, value in attributes.items():
+ default_value = settings[key]
+
+ # Lists are best to compared sorted since we cant rely on the order
+ # of the items.
+ if isinstance(value, list):
+ value = sorted(value)
+ default_value = sorted(default_value)
+
+ if value != default_value:
+ errors.append(msg.format(key, value, default_value))
+
+ if errors:
+ raise PublishValidationError("\n".join(errors))
+
+ @classmethod
+ def repair(cls, instance):
+ # Find create instance twin.
+ create_context = instance.context.data["create_context"]
+ create_instance = create_context.get_instance_by_id(
+ instance.data["instance_id"]
+ )
+
+ # Set the settings values on the create context then save to workfile.
+ publish_attributes = instance.data["publish_attributes"]
+ plugin_name = cls.plugin_name(publish_attributes)
+ attributes = cls._get_publish_attributes(instance)
+ settings = cls._get_settings(instance.context)
+ create_publish_attributes = create_instance.data["publish_attributes"]
+ for key in attributes:
+ create_publish_attributes[plugin_name][key] = settings[key]
+
+ create_context.save_changes()
+
+
+class ValidateAlembicDefaultsAnimation(
+ ValidateAlembicDefaultsPointcache
+):
+ """Validate the attributes on the instance are defaults.
+
+ The defaults are defined in the project settings.
+ """
+ label = "Validate Alembic Options Defaults"
+ families = ["animation"]
+ plugin_name = "ExtractAnimation"
diff --git a/client/ayon_core/hosts/maya/plugins/workfile_build/load_placeholder.py b/client/ayon_core/hosts/maya/plugins/workfile_build/load_placeholder.py
new file mode 100644
index 0000000000..5e73933722
--- /dev/null
+++ b/client/ayon_core/hosts/maya/plugins/workfile_build/load_placeholder.py
@@ -0,0 +1,270 @@
+import json
+
+from maya import cmds
+
+from ayon_core.pipeline.workfile.workfile_template_builder import (
+ PlaceholderPlugin,
+ LoadPlaceholderItem,
+ PlaceholderLoadMixin,
+)
+from ayon_core.hosts.maya.api.lib import (
+ read,
+ imprint,
+ get_reference_node
+)
+from ayon_core.hosts.maya.api.workfile_template_builder import PLACEHOLDER_SET
+
+
+class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
+ identifier = "maya.load"
+ label = "Maya load"
+
+ def _collect_scene_placeholders(self):
+ # Cache placeholder data to shared data
+ placeholder_nodes = self.builder.get_shared_populate_data(
+ "placeholder_nodes"
+ )
+ if placeholder_nodes is None:
+ attributes = cmds.ls("*.plugin_identifier", long=True)
+ placeholder_nodes = {}
+ for attribute in attributes:
+ node_name = attribute.rpartition(".")[0]
+ placeholder_nodes[node_name] = (
+ self._parse_placeholder_node_data(node_name)
+ )
+
+ self.builder.set_shared_populate_data(
+ "placeholder_nodes", placeholder_nodes
+ )
+ return placeholder_nodes
+
+ def _parse_placeholder_node_data(self, node_name):
+ placeholder_data = read(node_name)
+ parent_name = (
+ cmds.getAttr(node_name + ".parent", asString=True)
+ or node_name.rpartition("|")[0]
+ or ""
+ )
+ if parent_name:
+ siblings = cmds.listRelatives(parent_name, children=True)
+ else:
+ siblings = cmds.ls(assemblies=True)
+ node_shortname = node_name.rpartition("|")[2]
+ current_index = cmds.getAttr(node_name + ".index", asString=True)
+ if current_index < 0:
+ current_index = siblings.index(node_shortname)
+
+ placeholder_data.update({
+ "parent": parent_name,
+ "index": current_index
+ })
+ return placeholder_data
+
+ def _create_placeholder_name(self, placeholder_data):
+ placeholder_name_parts = placeholder_data["builder_type"].split("_")
+
+ pos = 1
+ placeholder_product_type = placeholder_data.get("product_type")
+ if placeholder_product_type is None:
+ placeholder_product_type = placeholder_data.get("family")
+
+ if placeholder_product_type:
+ placeholder_name_parts.insert(pos, placeholder_product_type)
+ pos += 1
+
+ # add loader arguments if any
+ loader_args = placeholder_data["loader_args"]
+ if loader_args:
+ loader_args = json.loads(loader_args.replace('\'', '\"'))
+ values = [v for v in loader_args.values()]
+ for value in values:
+ placeholder_name_parts.insert(pos, value)
+ pos += 1
+
+ placeholder_name = "_".join(placeholder_name_parts)
+
+ return placeholder_name.capitalize()
+
+ def _get_loaded_repre_ids(self):
+ loaded_representation_ids = self.builder.get_shared_populate_data(
+ "loaded_representation_ids"
+ )
+ if loaded_representation_ids is None:
+ try:
+ containers = cmds.sets("AVALON_CONTAINERS", q=True)
+ except ValueError:
+ containers = []
+
+ loaded_representation_ids = {
+ cmds.getAttr(container + ".representation")
+ for container in containers
+ }
+ self.builder.set_shared_populate_data(
+ "loaded_representation_ids", loaded_representation_ids
+ )
+ return loaded_representation_ids
+
+ def create_placeholder(self, placeholder_data):
+ selection = cmds.ls(selection=True)
+ if len(selection) > 1:
+ raise ValueError("More then one item are selected")
+
+ parent = selection[0] if selection else None
+
+ placeholder_data["plugin_identifier"] = self.identifier
+
+ placeholder_name = self._create_placeholder_name(placeholder_data)
+
+ placeholder = cmds.spaceLocator(name=placeholder_name)[0]
+ if parent:
+ placeholder = cmds.parent(placeholder, selection[0])[0]
+
+ imprint(placeholder, placeholder_data)
+
+ # Add helper attributes to keep placeholder info
+ cmds.addAttr(
+ placeholder,
+ longName="parent",
+ hidden=True,
+ dataType="string"
+ )
+ cmds.addAttr(
+ placeholder,
+ longName="index",
+ hidden=True,
+ attributeType="short",
+ defaultValue=-1
+ )
+
+ cmds.setAttr(placeholder + ".parent", "", type="string")
+
+ def update_placeholder(self, placeholder_item, placeholder_data):
+ node_name = placeholder_item.scene_identifier
+ new_values = {}
+ for key, value in placeholder_data.items():
+ placeholder_value = placeholder_item.data.get(key)
+ if value != placeholder_value:
+ new_values[key] = value
+ placeholder_item.data[key] = value
+
+ for key in new_values.keys():
+ cmds.deleteAttr(node_name + "." + key)
+
+ imprint(node_name, new_values)
+
+ def collect_placeholders(self):
+ output = []
+ scene_placeholders = self._collect_scene_placeholders()
+ for node_name, placeholder_data in scene_placeholders.items():
+ if placeholder_data.get("plugin_identifier") != self.identifier:
+ continue
+
+ # TODO do data validations and maybe upgrades if they are invalid
+ output.append(
+ LoadPlaceholderItem(node_name, placeholder_data, self)
+ )
+
+ return output
+
+ def populate_placeholder(self, placeholder):
+ self.populate_load_placeholder(placeholder)
+
+ def repopulate_placeholder(self, placeholder):
+ repre_ids = self._get_loaded_repre_ids()
+ self.populate_load_placeholder(placeholder, repre_ids)
+
+ def get_placeholder_options(self, options=None):
+ return self.get_load_plugin_options(options)
+
+ def post_placeholder_process(self, placeholder, failed):
+ """Cleanup placeholder after load of its corresponding representations.
+
+ Args:
+ placeholder (PlaceholderItem): Item which was just used to load
+ representation.
+ failed (bool): Loading of representation failed.
+ """
+ # Hide placeholder and add them to placeholder set
+ node = placeholder.scene_identifier
+
+ # If we just populate the placeholders from current scene, the
+ # placeholder set will not be created so account for that.
+ if not cmds.objExists(PLACEHOLDER_SET):
+ cmds.sets(name=PLACEHOLDER_SET, empty=True)
+
+ cmds.sets(node, addElement=PLACEHOLDER_SET)
+ cmds.hide(node)
+ cmds.setAttr(node + ".hiddenInOutliner", True)
+
+ def delete_placeholder(self, placeholder):
+ """Remove placeholder if building was successful"""
+ cmds.delete(placeholder.scene_identifier)
+
+ def load_succeed(self, placeholder, container):
+ self._parent_in_hierarchy(placeholder, container)
+
+ def _parent_in_hierarchy(self, placeholder, container):
+ """Parent loaded container to placeholder's parent.
+
+ ie : Set loaded content as placeholder's sibling
+
+ Args:
+ container (str): Placeholder loaded containers
+ """
+
+ if not container:
+ return
+
+ roots = cmds.sets(container, q=True) or []
+ ref_node = None
+ try:
+ ref_node = get_reference_node(roots)
+ except AssertionError as e:
+ self.log.info(e.args[0])
+
+ nodes_to_parent = []
+ for root in roots:
+ if ref_node:
+ ref_root = cmds.referenceQuery(root, nodes=True)[0]
+ ref_root = (
+ cmds.listRelatives(ref_root, parent=True, path=True) or
+ [ref_root]
+ )
+ nodes_to_parent.extend(ref_root)
+ continue
+ if root.endswith("_RN"):
+ # Backwards compatibility for hardcoded reference names.
+ refRoot = cmds.referenceQuery(root, n=True)[0]
+ refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot]
+ nodes_to_parent.extend(refRoot)
+ elif root not in cmds.listSets(allSets=True):
+ nodes_to_parent.append(root)
+
+ elif not cmds.sets(root, q=True):
+ return
+
+ # Move loaded nodes to correct index in outliner hierarchy
+ placeholder_form = cmds.xform(
+ placeholder.scene_identifier,
+ q=True,
+ matrix=True,
+ worldSpace=True
+ )
+ scene_parent = cmds.listRelatives(
+ placeholder.scene_identifier, parent=True, fullPath=True
+ )
+ for node in set(nodes_to_parent):
+ cmds.reorder(node, front=True)
+ cmds.reorder(node, relative=placeholder.data["index"])
+ cmds.xform(node, matrix=placeholder_form, ws=True)
+ if scene_parent:
+ cmds.parent(node, scene_parent)
+ else:
+ if cmds.listRelatives(node, parent=True):
+ cmds.parent(node, world=True)
+
+ holding_sets = cmds.listSets(object=placeholder.scene_identifier)
+ if not holding_sets:
+ return
+ for holding_set in holding_sets:
+ cmds.sets(roots, forceElement=holding_set)
diff --git a/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py b/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py
index 88ef4b201a..c1d9f019e4 100644
--- a/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py
+++ b/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py
@@ -7,7 +7,7 @@ from maya import cmds
import ayon_api
from ayon_core.pipeline import get_current_project_name
-import ayon_core.hosts.maya.lib as maya_lib
+import ayon_core.hosts.maya.api.lib as maya_lib
from . import lib
from .alembic import get_alembic_ids_cache
diff --git a/client/ayon_core/hosts/nuke/api/lib.py b/client/ayon_core/hosts/nuke/api/lib.py
index 78cbe85097..e3505a16f2 100644
--- a/client/ayon_core/hosts/nuke/api/lib.py
+++ b/client/ayon_core/hosts/nuke/api/lib.py
@@ -1495,18 +1495,28 @@ class WorkfileSettings(object):
filter_knobs = [
"viewerProcess",
- "wipe_position"
+ "wipe_position",
+ "monitorOutOutputTransform"
]
+ display, viewer = get_viewer_config_from_string(
+ viewer_dict["viewerProcess"]
+ )
+ viewer_process = create_viewer_profile_string(
+ viewer, display, path_like=False
+ )
+ display, viewer = get_viewer_config_from_string(
+ viewer_dict["output_transform"]
+ )
+ output_transform = create_viewer_profile_string(
+ viewer, display, path_like=False
+ )
erased_viewers = []
for v in nuke.allNodes(filter="Viewer"):
# set viewProcess to preset from settings
- v["viewerProcess"].setValue(
- str(viewer_dict["viewerProcess"])
- )
+ v["viewerProcess"].setValue(viewer_process)
- if str(viewer_dict["viewerProcess"]) \
- not in v["viewerProcess"].value():
+ if viewer_process not in v["viewerProcess"].value():
copy_inputs = v.dependencies()
copy_knobs = {k: v[k].value() for k in v.knobs()
if k not in filter_knobs}
@@ -1524,11 +1534,11 @@ class WorkfileSettings(object):
# set copied knobs
for k, v in copy_knobs.items():
- print(k, v)
nv[k].setValue(v)
# set viewerProcess
- nv["viewerProcess"].setValue(str(viewer_dict["viewerProcess"]))
+ nv["viewerProcess"].setValue(viewer_process)
+ nv["monitorOutOutputTransform"].setValue(output_transform)
if erased_viewers:
log.warning(
@@ -1547,7 +1557,6 @@ class WorkfileSettings(object):
host_name="nuke"
)
- viewer_process_settings = imageio_host["viewer"]["viewerProcess"]
workfile_settings = imageio_host["workfile"]
color_management = workfile_settings["color_management"]
native_ocio_config = workfile_settings["native_ocio_config"]
@@ -1574,29 +1583,6 @@ class WorkfileSettings(object):
residual_path
))
- # get monitor lut from settings respecting Nuke version differences
- monitor_lut = workfile_settings["thumbnail_space"]
- monitor_lut_data = self._get_monitor_settings(
- viewer_process_settings, monitor_lut
- )
- monitor_lut_data["workingSpaceLUT"] = (
- workfile_settings["working_space"]
- )
-
- # then set the rest
- for knob, value_ in monitor_lut_data.items():
- # skip unfilled ocio config path
- # it will be dict in value
- if isinstance(value_, dict):
- continue
- # skip empty values
- if not value_:
- continue
- if self._root_node[knob].value() not in value_:
- self._root_node[knob].setValue(str(value_))
- log.debug("nuke.root()['{}'] changed to: {}".format(
- knob, value_))
-
# set ocio config path
if config_data:
config_path = config_data["path"].replace("\\", "/")
@@ -1611,6 +1597,31 @@ class WorkfileSettings(object):
if correct_settings:
self._set_ocio_config_path_to_workfile(config_data)
+ # get monitor lut from settings respecting Nuke version differences
+ monitor_lut_data = self._get_monitor_settings(
+ workfile_settings["monitor_out_lut"],
+ workfile_settings["monitor_lut"]
+ )
+ monitor_lut_data.update({
+ "workingSpaceLUT": workfile_settings["working_space"],
+ "int8Lut": workfile_settings["int_8_lut"],
+ "int16Lut": workfile_settings["int_16_lut"],
+ "logLut": workfile_settings["log_lut"],
+ "floatLut": workfile_settings["float_lut"]
+ })
+
+ # then set the rest
+ for knob, value_ in monitor_lut_data.items():
+ # skip unfilled ocio config path
+ # it will be dict in value
+ if isinstance(value_, dict):
+ continue
+ # skip empty values
+ if not value_:
+ continue
+ self._root_node[knob].setValue(str(value_))
+ log.debug("nuke.root()['{}'] changed to: {}".format(knob, value_))
+
def _get_monitor_settings(self, viewer_lut, monitor_lut):
""" Get monitor settings from viewer and monitor lut
diff --git a/client/ayon_core/hosts/nuke/api/pipeline.py b/client/ayon_core/hosts/nuke/api/pipeline.py
index 0d44aba2f9..d35a2e89e0 100644
--- a/client/ayon_core/hosts/nuke/api/pipeline.py
+++ b/client/ayon_core/hosts/nuke/api/pipeline.py
@@ -18,6 +18,7 @@ from ayon_core.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
register_inventory_action_path,
+ register_workfile_build_plugin_path,
AYON_INSTANCE_ID,
AVALON_INSTANCE_ID,
AVALON_CONTAINER_ID,
@@ -52,8 +53,6 @@ from .lib import (
MENU_LABEL,
)
from .workfile_template_builder import (
- NukePlaceholderLoadPlugin,
- NukePlaceholderCreatePlugin,
build_workfile_template,
create_placeholder,
update_placeholder,
@@ -76,6 +75,7 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
+WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build")
# registering pyblish gui regarding settings in presets
if os.getenv("PYBLISH_GUI", None):
@@ -105,18 +105,11 @@ class NukeHost(
def get_workfile_extensions(self):
return file_extensions()
- def get_workfile_build_placeholder_plugins(self):
- return [
- NukePlaceholderLoadPlugin,
- NukePlaceholderCreatePlugin
- ]
-
def get_containers(self):
return ls()
def install(self):
- ''' Installing all requarements for Nuke host
- '''
+ """Installing all requirements for Nuke host"""
pyblish.api.register_host("nuke")
@@ -125,6 +118,7 @@ class NukeHost(
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
register_inventory_action_path(INVENTORY_PATH)
+ register_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
# Register AYON event for workfiles loading.
register_event_callback("workio.open_file", check_inventory_versions)
@@ -178,7 +172,6 @@ def add_nuke_callbacks():
# set apply all workfile settings on script load and save
nuke.addOnScriptLoad(WorkfileSettings().set_context_settings)
-
if nuke_settings["dirmap"]["enabled"]:
log.info("Added Nuke's dir-mapping callback ...")
# Add dirmap for file paths.
diff --git a/client/ayon_core/hosts/nuke/api/plugin.py b/client/ayon_core/hosts/nuke/api/plugin.py
index 5b97fab0c2..fb56dec833 100644
--- a/client/ayon_core/hosts/nuke/api/plugin.py
+++ b/client/ayon_core/hosts/nuke/api/plugin.py
@@ -1151,7 +1151,6 @@ def _remove_old_knobs(node):
"OpenpypeDataGroup", "OpenpypeDataGroup_End", "deadlinePriority",
"deadlineChunkSize", "deadlineConcurrentTasks", "Deadline"
]
- print(node.name())
# remove all old knobs
for knob in node.allKnobs():
diff --git a/client/ayon_core/hosts/nuke/api/workfile_template_builder.py b/client/ayon_core/hosts/nuke/api/workfile_template_builder.py
index 495edd9e5f..aebf91c4a4 100644
--- a/client/ayon_core/hosts/nuke/api/workfile_template_builder.py
+++ b/client/ayon_core/hosts/nuke/api/workfile_template_builder.py
@@ -1,30 +1,17 @@
import collections
import nuke
+
from ayon_core.pipeline import registered_host
from ayon_core.pipeline.workfile.workfile_template_builder import (
AbstractTemplateBuilder,
PlaceholderPlugin,
- LoadPlaceholderItem,
- CreatePlaceholderItem,
- PlaceholderLoadMixin,
- PlaceholderCreateMixin,
)
from ayon_core.tools.workfile_template_build import (
WorkfileBuildPlaceholderDialog,
)
from .lib import (
- find_free_space_to_paste_nodes,
- get_extreme_positions,
- get_group_io_nodes,
imprint,
- refresh_node,
- refresh_nodes,
reset_selection,
- get_names_from_nodes,
- get_nodes_by_names,
- select_nodes,
- duplicate_node,
- node_tempfile,
get_main_window,
WorkfileSettings,
)
@@ -54,6 +41,7 @@ class NukeTemplateBuilder(AbstractTemplateBuilder):
return True
+
class NukePlaceholderPlugin(PlaceholderPlugin):
node_color = 4278190335
@@ -120,843 +108,6 @@ class NukePlaceholderPlugin(PlaceholderPlugin):
nuke.delete(placeholder_node)
-class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin):
- identifier = "nuke.load"
- label = "Nuke load"
-
- def _parse_placeholder_node_data(self, node):
- placeholder_data = super(
- NukePlaceholderLoadPlugin, self
- )._parse_placeholder_node_data(node)
-
- node_knobs = node.knobs()
- nb_children = 0
- if "nb_children" in node_knobs:
- nb_children = int(node_knobs["nb_children"].getValue())
- placeholder_data["nb_children"] = nb_children
-
- siblings = []
- if "siblings" in node_knobs:
- siblings = node_knobs["siblings"].values()
- placeholder_data["siblings"] = siblings
-
- node_full_name = node.fullName()
- placeholder_data["group_name"] = node_full_name.rpartition(".")[0]
- placeholder_data["last_loaded"] = []
- placeholder_data["delete"] = False
- return placeholder_data
-
- def _get_loaded_repre_ids(self):
- loaded_representation_ids = self.builder.get_shared_populate_data(
- "loaded_representation_ids"
- )
- if loaded_representation_ids is None:
- loaded_representation_ids = set()
- for node in nuke.allNodes():
- if "repre_id" in node.knobs():
- loaded_representation_ids.add(
- node.knob("repre_id").getValue()
- )
-
- self.builder.set_shared_populate_data(
- "loaded_representation_ids", loaded_representation_ids
- )
- return loaded_representation_ids
-
- def _before_placeholder_load(self, placeholder):
- placeholder.data["nodes_init"] = nuke.allNodes()
-
- def _before_repre_load(self, placeholder, representation):
- placeholder.data["last_repre_id"] = representation["id"]
-
- def collect_placeholders(self):
- output = []
- scene_placeholders = self._collect_scene_placeholders()
- for node_name, node in scene_placeholders.items():
- plugin_identifier_knob = node.knob("plugin_identifier")
- if (
- plugin_identifier_knob is None
- or plugin_identifier_knob.getValue() != self.identifier
- ):
- continue
-
- placeholder_data = self._parse_placeholder_node_data(node)
- # TODO do data validations and maybe updgrades if are invalid
- output.append(
- LoadPlaceholderItem(node_name, placeholder_data, self)
- )
-
- return output
-
- def populate_placeholder(self, placeholder):
- self.populate_load_placeholder(placeholder)
-
- def repopulate_placeholder(self, placeholder):
- repre_ids = self._get_loaded_repre_ids()
- self.populate_load_placeholder(placeholder, repre_ids)
-
- def get_placeholder_options(self, options=None):
- return self.get_load_plugin_options(options)
-
- def post_placeholder_process(self, placeholder, failed):
- """Cleanup placeholder after load of its corresponding representations.
-
- Args:
- placeholder (PlaceholderItem): Item which was just used to load
- representation.
- failed (bool): Loading of representation failed.
- """
- # deselect all selected nodes
- placeholder_node = nuke.toNode(placeholder.scene_identifier)
-
- # getting the latest nodes added
- # TODO get from shared populate data!
- nodes_init = placeholder.data["nodes_init"]
- nodes_loaded = list(set(nuke.allNodes()) - set(nodes_init))
- self.log.debug("Loaded nodes: {}".format(nodes_loaded))
- if not nodes_loaded:
- return
-
- placeholder.data["delete"] = True
-
- nodes_loaded = self._move_to_placeholder_group(
- placeholder, nodes_loaded
- )
- placeholder.data["last_loaded"] = nodes_loaded
- refresh_nodes(nodes_loaded)
-
- # positioning of the loaded nodes
- min_x, min_y, _, _ = get_extreme_positions(nodes_loaded)
- for node in nodes_loaded:
- xpos = (node.xpos() - min_x) + placeholder_node.xpos()
- ypos = (node.ypos() - min_y) + placeholder_node.ypos()
- node.setXYpos(xpos, ypos)
- refresh_nodes(nodes_loaded)
-
- # fix the problem of z_order for backdrops
- self._fix_z_order(placeholder)
-
- if placeholder.data.get("keep_placeholder"):
- self._imprint_siblings(placeholder)
-
- if placeholder.data["nb_children"] == 0:
- # save initial nodes positions and dimensions, update them
- # and set inputs and outputs of loaded nodes
- if placeholder.data.get("keep_placeholder"):
- self._imprint_inits()
- self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded)
-
- self._set_loaded_connections(placeholder)
-
- elif placeholder.data["siblings"]:
- # create copies of placeholder siblings for the new loaded nodes,
- # set their inputs and outputs and update all nodes positions and
- # dimensions and siblings names
-
- siblings = get_nodes_by_names(placeholder.data["siblings"])
- refresh_nodes(siblings)
- copies = self._create_sib_copies(placeholder)
- new_nodes = list(copies.values()) # copies nodes
- self._update_nodes(new_nodes, nodes_loaded)
- placeholder_node.removeKnob(placeholder_node.knob("siblings"))
- new_nodes_name = get_names_from_nodes(new_nodes)
- imprint(placeholder_node, {"siblings": new_nodes_name})
- self._set_copies_connections(placeholder, copies)
-
- self._update_nodes(
- nuke.allNodes(),
- new_nodes + nodes_loaded,
- 20
- )
-
- new_siblings = get_names_from_nodes(new_nodes)
- placeholder.data["siblings"] = new_siblings
-
- else:
- # if the placeholder doesn't have siblings, the loaded
- # nodes will be placed in a free space
-
- xpointer, ypointer = find_free_space_to_paste_nodes(
- nodes_loaded, direction="bottom", offset=200
- )
- node = nuke.createNode("NoOp")
- reset_selection()
- nuke.delete(node)
- for node in nodes_loaded:
- xpos = (node.xpos() - min_x) + xpointer
- ypos = (node.ypos() - min_y) + ypointer
- node.setXYpos(xpos, ypos)
-
- placeholder.data["nb_children"] += 1
- reset_selection()
-
- # go back to root group
- nuke.root().begin()
-
- def _move_to_placeholder_group(self, placeholder, nodes_loaded):
- """
- opening the placeholder's group and copying loaded nodes in it.
-
- Returns :
- nodes_loaded (list): the new list of pasted nodes
- """
-
- groups_name = placeholder.data["group_name"]
- reset_selection()
- select_nodes(nodes_loaded)
- if groups_name:
- with node_tempfile() as filepath:
- nuke.nodeCopy(filepath)
- for node in nuke.selectedNodes():
- nuke.delete(node)
- group = nuke.toNode(groups_name)
- group.begin()
- nuke.nodePaste(filepath)
- nodes_loaded = nuke.selectedNodes()
- return nodes_loaded
-
- def _fix_z_order(self, placeholder):
- """Fix the problem of z_order when a backdrop is loaded."""
-
- nodes_loaded = placeholder.data["last_loaded"]
- loaded_backdrops = []
- bd_orders = set()
- for node in nodes_loaded:
- if isinstance(node, nuke.BackdropNode):
- loaded_backdrops.append(node)
- bd_orders.add(node.knob("z_order").getValue())
-
- if not bd_orders:
- return
-
- sib_orders = set()
- for node_name in placeholder.data["siblings"]:
- node = nuke.toNode(node_name)
- if isinstance(node, nuke.BackdropNode):
- sib_orders.add(node.knob("z_order").getValue())
-
- if not sib_orders:
- return
-
- min_order = min(bd_orders)
- max_order = max(sib_orders)
- for backdrop_node in loaded_backdrops:
- z_order = backdrop_node.knob("z_order").getValue()
- backdrop_node.knob("z_order").setValue(
- z_order + max_order - min_order + 1)
-
- def _imprint_siblings(self, placeholder):
- """
- - add siblings names to placeholder attributes (nodes loaded with it)
- - add Id to the attributes of all the other nodes
- """
-
- loaded_nodes = placeholder.data["last_loaded"]
- loaded_nodes_set = set(loaded_nodes)
- data = {"repre_id": str(placeholder.data["last_repre_id"])}
-
- for node in loaded_nodes:
- node_knobs = node.knobs()
- if "builder_type" not in node_knobs:
- # save the id of representation for all imported nodes
- imprint(node, data)
- node.knob("repre_id").setVisible(False)
- refresh_node(node)
- continue
-
- if (
- "is_placeholder" not in node_knobs
- or (
- "is_placeholder" in node_knobs
- and node.knob("is_placeholder").value()
- )
- ):
- siblings = list(loaded_nodes_set - {node})
- siblings_name = get_names_from_nodes(siblings)
- siblings = {"siblings": siblings_name}
- imprint(node, siblings)
-
- def _imprint_inits(self):
- """Add initial positions and dimensions to the attributes"""
-
- for node in nuke.allNodes():
- refresh_node(node)
- imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()})
- node.knob("x_init").setVisible(False)
- node.knob("y_init").setVisible(False)
- width = node.screenWidth()
- height = node.screenHeight()
- if "bdwidth" in node.knobs():
- imprint(node, {"w_init": width, "h_init": height})
- node.knob("w_init").setVisible(False)
- node.knob("h_init").setVisible(False)
- refresh_node(node)
-
- def _update_nodes(
- self, placeholder, nodes, considered_nodes, offset_y=None
- ):
- """Adjust backdrop nodes dimensions and positions.
-
- Considering some nodes sizes.
-
- Args:
- nodes (list): list of nodes to update
- considered_nodes (list): list of nodes to consider while updating
- positions and dimensions
- offset (int): distance between copies
- """
-
- placeholder_node = nuke.toNode(placeholder.scene_identifier)
-
- min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes)
-
- diff_x = diff_y = 0
- contained_nodes = [] # for backdrops
-
- if offset_y is None:
- width_ph = placeholder_node.screenWidth()
- height_ph = placeholder_node.screenHeight()
- diff_y = max_y - min_y - height_ph
- diff_x = max_x - min_x - width_ph
- contained_nodes = [placeholder_node]
- min_x = placeholder_node.xpos()
- min_y = placeholder_node.ypos()
- else:
- siblings = get_nodes_by_names(placeholder.data["siblings"])
- minX, _, maxX, _ = get_extreme_positions(siblings)
- diff_y = max_y - min_y + 20
- diff_x = abs(max_x - min_x - maxX + minX)
- contained_nodes = considered_nodes
-
- if diff_y <= 0 and diff_x <= 0:
- return
-
- for node in nodes:
- refresh_node(node)
-
- if (
- node == placeholder_node
- or node in considered_nodes
- ):
- continue
-
- if (
- not isinstance(node, nuke.BackdropNode)
- or (
- isinstance(node, nuke.BackdropNode)
- and not set(contained_nodes) <= set(node.getNodes())
- )
- ):
- if offset_y is None and node.xpos() >= min_x:
- node.setXpos(node.xpos() + diff_x)
-
- if node.ypos() >= min_y:
- node.setYpos(node.ypos() + diff_y)
-
- else:
- width = node.screenWidth()
- height = node.screenHeight()
- node.knob("bdwidth").setValue(width + diff_x)
- node.knob("bdheight").setValue(height + diff_y)
-
- refresh_node(node)
-
- def _set_loaded_connections(self, placeholder):
- """
- set inputs and outputs of loaded nodes"""
-
- placeholder_node = nuke.toNode(placeholder.scene_identifier)
- input_node, output_node = get_group_io_nodes(
- placeholder.data["last_loaded"]
- )
- for node in placeholder_node.dependent():
- for idx in range(node.inputs()):
- if node.input(idx) == placeholder_node and output_node:
- node.setInput(idx, output_node)
-
- for node in placeholder_node.dependencies():
- for idx in range(placeholder_node.inputs()):
- if placeholder_node.input(idx) == node and input_node:
- input_node.setInput(0, node)
-
- def _create_sib_copies(self, placeholder):
- """ creating copies of the palce_holder siblings (the ones who were
- loaded with it) for the new nodes added
-
- Returns :
- copies (dict) : with copied nodes names and their copies
- """
-
- copies = {}
- siblings = get_nodes_by_names(placeholder.data["siblings"])
- for node in siblings:
- new_node = duplicate_node(node)
-
- x_init = int(new_node.knob("x_init").getValue())
- y_init = int(new_node.knob("y_init").getValue())
- new_node.setXYpos(x_init, y_init)
- if isinstance(new_node, nuke.BackdropNode):
- w_init = new_node.knob("w_init").getValue()
- h_init = new_node.knob("h_init").getValue()
- new_node.knob("bdwidth").setValue(w_init)
- new_node.knob("bdheight").setValue(h_init)
- refresh_node(node)
-
- if "repre_id" in node.knobs().keys():
- node.removeKnob(node.knob("repre_id"))
- copies[node.name()] = new_node
- return copies
-
- def _set_copies_connections(self, placeholder, copies):
- """Set inputs and outputs of the copies.
-
- Args:
- copies (dict): Copied nodes by their names.
- """
-
- last_input, last_output = get_group_io_nodes(
- placeholder.data["last_loaded"]
- )
- siblings = get_nodes_by_names(placeholder.data["siblings"])
- siblings_input, siblings_output = get_group_io_nodes(siblings)
- copy_input = copies[siblings_input.name()]
- copy_output = copies[siblings_output.name()]
-
- for node_init in siblings:
- if node_init == siblings_output:
- continue
-
- node_copy = copies[node_init.name()]
- for node in node_init.dependent():
- for idx in range(node.inputs()):
- if node.input(idx) != node_init:
- continue
-
- if node in siblings:
- copies[node.name()].setInput(idx, node_copy)
- else:
- last_input.setInput(0, node_copy)
-
- for node in node_init.dependencies():
- for idx in range(node_init.inputs()):
- if node_init.input(idx) != node:
- continue
-
- if node_init == siblings_input:
- copy_input.setInput(idx, node)
- elif node in siblings:
- node_copy.setInput(idx, copies[node.name()])
- else:
- node_copy.setInput(idx, last_output)
-
- siblings_input.setInput(0, copy_output)
-
-
-class NukePlaceholderCreatePlugin(
- NukePlaceholderPlugin, PlaceholderCreateMixin
-):
- identifier = "nuke.create"
- label = "Nuke create"
-
- def _parse_placeholder_node_data(self, node):
- placeholder_data = super(
- NukePlaceholderCreatePlugin, self
- )._parse_placeholder_node_data(node)
-
- node_knobs = node.knobs()
- nb_children = 0
- if "nb_children" in node_knobs:
- nb_children = int(node_knobs["nb_children"].getValue())
- placeholder_data["nb_children"] = nb_children
-
- siblings = []
- if "siblings" in node_knobs:
- siblings = node_knobs["siblings"].values()
- placeholder_data["siblings"] = siblings
-
- node_full_name = node.fullName()
- placeholder_data["group_name"] = node_full_name.rpartition(".")[0]
- placeholder_data["last_loaded"] = []
- placeholder_data["delete"] = False
- return placeholder_data
-
- def _before_instance_create(self, placeholder):
- placeholder.data["nodes_init"] = nuke.allNodes()
-
- def collect_placeholders(self):
- output = []
- scene_placeholders = self._collect_scene_placeholders()
- for node_name, node in scene_placeholders.items():
- plugin_identifier_knob = node.knob("plugin_identifier")
- if (
- plugin_identifier_knob is None
- or plugin_identifier_knob.getValue() != self.identifier
- ):
- continue
-
- placeholder_data = self._parse_placeholder_node_data(node)
-
- output.append(
- CreatePlaceholderItem(node_name, placeholder_data, self)
- )
-
- return output
-
- def populate_placeholder(self, placeholder):
- self.populate_create_placeholder(placeholder)
-
- def repopulate_placeholder(self, placeholder):
- self.populate_create_placeholder(placeholder)
-
- def get_placeholder_options(self, options=None):
- return self.get_create_plugin_options(options)
-
- def post_placeholder_process(self, placeholder, failed):
- """Cleanup placeholder after load of its corresponding representations.
-
- Args:
- placeholder (PlaceholderItem): Item which was just used to load
- representation.
- failed (bool): Loading of representation failed.
- """
- # deselect all selected nodes
- placeholder_node = nuke.toNode(placeholder.scene_identifier)
-
- # getting the latest nodes added
- nodes_init = placeholder.data["nodes_init"]
- nodes_created = list(set(nuke.allNodes()) - set(nodes_init))
- self.log.debug("Created nodes: {}".format(nodes_created))
- if not nodes_created:
- return
-
- placeholder.data["delete"] = True
-
- nodes_created = self._move_to_placeholder_group(
- placeholder, nodes_created
- )
- placeholder.data["last_created"] = nodes_created
- refresh_nodes(nodes_created)
-
- # positioning of the created nodes
- min_x, min_y, _, _ = get_extreme_positions(nodes_created)
- for node in nodes_created:
- xpos = (node.xpos() - min_x) + placeholder_node.xpos()
- ypos = (node.ypos() - min_y) + placeholder_node.ypos()
- node.setXYpos(xpos, ypos)
- refresh_nodes(nodes_created)
-
- # fix the problem of z_order for backdrops
- self._fix_z_order(placeholder)
-
- if placeholder.data.get("keep_placeholder"):
- self._imprint_siblings(placeholder)
-
- if placeholder.data["nb_children"] == 0:
- # save initial nodes positions and dimensions, update them
- # and set inputs and outputs of created nodes
-
- if placeholder.data.get("keep_placeholder"):
- self._imprint_inits()
- self._update_nodes(placeholder, nuke.allNodes(), nodes_created)
-
- self._set_created_connections(placeholder)
-
- elif placeholder.data["siblings"]:
- # create copies of placeholder siblings for the new created nodes,
- # set their inputs and outputs and update all nodes positions and
- # dimensions and siblings names
-
- siblings = get_nodes_by_names(placeholder.data["siblings"])
- refresh_nodes(siblings)
- copies = self._create_sib_copies(placeholder)
- new_nodes = list(copies.values()) # copies nodes
- self._update_nodes(new_nodes, nodes_created)
- placeholder_node.removeKnob(placeholder_node.knob("siblings"))
- new_nodes_name = get_names_from_nodes(new_nodes)
- imprint(placeholder_node, {"siblings": new_nodes_name})
- self._set_copies_connections(placeholder, copies)
-
- self._update_nodes(
- nuke.allNodes(),
- new_nodes + nodes_created,
- 20
- )
-
- new_siblings = get_names_from_nodes(new_nodes)
- placeholder.data["siblings"] = new_siblings
-
- else:
- # if the placeholder doesn't have siblings, the created
- # nodes will be placed in a free space
-
- xpointer, ypointer = find_free_space_to_paste_nodes(
- nodes_created, direction="bottom", offset=200
- )
- node = nuke.createNode("NoOp")
- reset_selection()
- nuke.delete(node)
- for node in nodes_created:
- xpos = (node.xpos() - min_x) + xpointer
- ypos = (node.ypos() - min_y) + ypointer
- node.setXYpos(xpos, ypos)
-
- placeholder.data["nb_children"] += 1
- reset_selection()
-
- # go back to root group
- nuke.root().begin()
-
- def _move_to_placeholder_group(self, placeholder, nodes_created):
- """
- opening the placeholder's group and copying created nodes in it.
-
- Returns :
- nodes_created (list): the new list of pasted nodes
- """
- groups_name = placeholder.data["group_name"]
- reset_selection()
- select_nodes(nodes_created)
- if groups_name:
- with node_tempfile() as filepath:
- nuke.nodeCopy(filepath)
- for node in nuke.selectedNodes():
- nuke.delete(node)
- group = nuke.toNode(groups_name)
- group.begin()
- nuke.nodePaste(filepath)
- nodes_created = nuke.selectedNodes()
- return nodes_created
-
- def _fix_z_order(self, placeholder):
- """Fix the problem of z_order when a backdrop is create."""
-
- nodes_created = placeholder.data["last_created"]
- created_backdrops = []
- bd_orders = set()
- for node in nodes_created:
- if isinstance(node, nuke.BackdropNode):
- created_backdrops.append(node)
- bd_orders.add(node.knob("z_order").getValue())
-
- if not bd_orders:
- return
-
- sib_orders = set()
- for node_name in placeholder.data["siblings"]:
- node = nuke.toNode(node_name)
- if isinstance(node, nuke.BackdropNode):
- sib_orders.add(node.knob("z_order").getValue())
-
- if not sib_orders:
- return
-
- min_order = min(bd_orders)
- max_order = max(sib_orders)
- for backdrop_node in created_backdrops:
- z_order = backdrop_node.knob("z_order").getValue()
- backdrop_node.knob("z_order").setValue(
- z_order + max_order - min_order + 1)
-
- def _imprint_siblings(self, placeholder):
- """
- - add siblings names to placeholder attributes (nodes created with it)
- - add Id to the attributes of all the other nodes
- """
-
- created_nodes = placeholder.data["last_created"]
- created_nodes_set = set(created_nodes)
-
- for node in created_nodes:
- node_knobs = node.knobs()
-
- if (
- "is_placeholder" not in node_knobs
- or (
- "is_placeholder" in node_knobs
- and node.knob("is_placeholder").value()
- )
- ):
- siblings = list(created_nodes_set - {node})
- siblings_name = get_names_from_nodes(siblings)
- siblings = {"siblings": siblings_name}
- imprint(node, siblings)
-
- def _imprint_inits(self):
- """Add initial positions and dimensions to the attributes"""
-
- for node in nuke.allNodes():
- refresh_node(node)
- imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()})
- node.knob("x_init").setVisible(False)
- node.knob("y_init").setVisible(False)
- width = node.screenWidth()
- height = node.screenHeight()
- if "bdwidth" in node.knobs():
- imprint(node, {"w_init": width, "h_init": height})
- node.knob("w_init").setVisible(False)
- node.knob("h_init").setVisible(False)
- refresh_node(node)
-
- def _update_nodes(
- self, placeholder, nodes, considered_nodes, offset_y=None
- ):
- """Adjust backdrop nodes dimensions and positions.
-
- Considering some nodes sizes.
-
- Args:
- nodes (list): list of nodes to update
- considered_nodes (list): list of nodes to consider while updating
- positions and dimensions
- offset (int): distance between copies
- """
-
- placeholder_node = nuke.toNode(placeholder.scene_identifier)
-
- min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes)
-
- diff_x = diff_y = 0
- contained_nodes = [] # for backdrops
-
- if offset_y is None:
- width_ph = placeholder_node.screenWidth()
- height_ph = placeholder_node.screenHeight()
- diff_y = max_y - min_y - height_ph
- diff_x = max_x - min_x - width_ph
- contained_nodes = [placeholder_node]
- min_x = placeholder_node.xpos()
- min_y = placeholder_node.ypos()
- else:
- siblings = get_nodes_by_names(placeholder.data["siblings"])
- minX, _, maxX, _ = get_extreme_positions(siblings)
- diff_y = max_y - min_y + 20
- diff_x = abs(max_x - min_x - maxX + minX)
- contained_nodes = considered_nodes
-
- if diff_y <= 0 and diff_x <= 0:
- return
-
- for node in nodes:
- refresh_node(node)
-
- if (
- node == placeholder_node
- or node in considered_nodes
- ):
- continue
-
- if (
- not isinstance(node, nuke.BackdropNode)
- or (
- isinstance(node, nuke.BackdropNode)
- and not set(contained_nodes) <= set(node.getNodes())
- )
- ):
- if offset_y is None and node.xpos() >= min_x:
- node.setXpos(node.xpos() + diff_x)
-
- if node.ypos() >= min_y:
- node.setYpos(node.ypos() + diff_y)
-
- else:
- width = node.screenWidth()
- height = node.screenHeight()
- node.knob("bdwidth").setValue(width + diff_x)
- node.knob("bdheight").setValue(height + diff_y)
-
- refresh_node(node)
-
- def _set_created_connections(self, placeholder):
- """
- set inputs and outputs of created nodes"""
-
- placeholder_node = nuke.toNode(placeholder.scene_identifier)
- input_node, output_node = get_group_io_nodes(
- placeholder.data["last_created"]
- )
- for node in placeholder_node.dependent():
- for idx in range(node.inputs()):
- if node.input(idx) == placeholder_node and output_node:
- node.setInput(idx, output_node)
-
- for node in placeholder_node.dependencies():
- for idx in range(placeholder_node.inputs()):
- if placeholder_node.input(idx) == node and input_node:
- input_node.setInput(0, node)
-
- def _create_sib_copies(self, placeholder):
- """ creating copies of the palce_holder siblings (the ones who were
- created with it) for the new nodes added
-
- Returns :
- copies (dict) : with copied nodes names and their copies
- """
-
- copies = {}
- siblings = get_nodes_by_names(placeholder.data["siblings"])
- for node in siblings:
- new_node = duplicate_node(node)
-
- x_init = int(new_node.knob("x_init").getValue())
- y_init = int(new_node.knob("y_init").getValue())
- new_node.setXYpos(x_init, y_init)
- if isinstance(new_node, nuke.BackdropNode):
- w_init = new_node.knob("w_init").getValue()
- h_init = new_node.knob("h_init").getValue()
- new_node.knob("bdwidth").setValue(w_init)
- new_node.knob("bdheight").setValue(h_init)
- refresh_node(node)
-
- if "repre_id" in node.knobs().keys():
- node.removeKnob(node.knob("repre_id"))
- copies[node.name()] = new_node
- return copies
-
- def _set_copies_connections(self, placeholder, copies):
- """Set inputs and outputs of the copies.
-
- Args:
- copies (dict): Copied nodes by their names.
- """
-
- last_input, last_output = get_group_io_nodes(
- placeholder.data["last_created"]
- )
- siblings = get_nodes_by_names(placeholder.data["siblings"])
- siblings_input, siblings_output = get_group_io_nodes(siblings)
- copy_input = copies[siblings_input.name()]
- copy_output = copies[siblings_output.name()]
-
- for node_init in siblings:
- if node_init == siblings_output:
- continue
-
- node_copy = copies[node_init.name()]
- for node in node_init.dependent():
- for idx in range(node.inputs()):
- if node.input(idx) != node_init:
- continue
-
- if node in siblings:
- copies[node.name()].setInput(idx, node_copy)
- else:
- last_input.setInput(0, node_copy)
-
- for node in node_init.dependencies():
- for idx in range(node_init.inputs()):
- if node_init.input(idx) != node:
- continue
-
- if node_init == siblings_input:
- copy_input.setInput(idx, node)
- elif node in siblings:
- node_copy.setInput(idx, copies[node.name()])
- else:
- node_copy.setInput(idx, last_output)
-
- siblings_input.setInput(0, copy_output)
-
-
def build_workfile_template(*args, **kwargs):
builder = NukeTemplateBuilder(registered_host())
builder.build_template(*args, **kwargs)
diff --git a/client/ayon_core/hosts/nuke/plugins/workfile_build/create_placeholder.py b/client/ayon_core/hosts/nuke/plugins/workfile_build/create_placeholder.py
new file mode 100644
index 0000000000..a5490021e4
--- /dev/null
+++ b/client/ayon_core/hosts/nuke/plugins/workfile_build/create_placeholder.py
@@ -0,0 +1,428 @@
+import nuke
+
+from ayon_core.pipeline.workfile.workfile_template_builder import (
+ CreatePlaceholderItem,
+ PlaceholderCreateMixin,
+)
+from ayon_core.hosts.nuke.api.lib import (
+ find_free_space_to_paste_nodes,
+ get_extreme_positions,
+ get_group_io_nodes,
+ imprint,
+ refresh_node,
+ refresh_nodes,
+ reset_selection,
+ get_names_from_nodes,
+ get_nodes_by_names,
+ select_nodes,
+ duplicate_node,
+ node_tempfile,
+)
+from ayon_core.hosts.nuke.api.workfile_template_builder import (
+ NukePlaceholderPlugin
+)
+
+
+class NukePlaceholderCreatePlugin(
+ NukePlaceholderPlugin, PlaceholderCreateMixin
+):
+ identifier = "nuke.create"
+ label = "Nuke create"
+
+ def _parse_placeholder_node_data(self, node):
+ placeholder_data = super(
+ NukePlaceholderCreatePlugin, self
+ )._parse_placeholder_node_data(node)
+
+ node_knobs = node.knobs()
+ nb_children = 0
+ if "nb_children" in node_knobs:
+ nb_children = int(node_knobs["nb_children"].getValue())
+ placeholder_data["nb_children"] = nb_children
+
+ siblings = []
+ if "siblings" in node_knobs:
+ siblings = node_knobs["siblings"].values()
+ placeholder_data["siblings"] = siblings
+
+ node_full_name = node.fullName()
+ placeholder_data["group_name"] = node_full_name.rpartition(".")[0]
+ placeholder_data["last_loaded"] = []
+ placeholder_data["delete"] = False
+ return placeholder_data
+
+ def _before_instance_create(self, placeholder):
+ placeholder.data["nodes_init"] = nuke.allNodes()
+
+ def collect_placeholders(self):
+ output = []
+ scene_placeholders = self._collect_scene_placeholders()
+ for node_name, node in scene_placeholders.items():
+ plugin_identifier_knob = node.knob("plugin_identifier")
+ if (
+ plugin_identifier_knob is None
+ or plugin_identifier_knob.getValue() != self.identifier
+ ):
+ continue
+
+ placeholder_data = self._parse_placeholder_node_data(node)
+
+ output.append(
+ CreatePlaceholderItem(node_name, placeholder_data, self)
+ )
+
+ return output
+
+ def populate_placeholder(self, placeholder):
+ self.populate_create_placeholder(placeholder)
+
+ def repopulate_placeholder(self, placeholder):
+ self.populate_create_placeholder(placeholder)
+
+ def get_placeholder_options(self, options=None):
+ return self.get_create_plugin_options(options)
+
+ def post_placeholder_process(self, placeholder, failed):
+ """Cleanup placeholder after load of its corresponding representations.
+
+ Args:
+ placeholder (PlaceholderItem): Item which was just used to load
+ representation.
+ failed (bool): Loading of representation failed.
+ """
+ # deselect all selected nodes
+ placeholder_node = nuke.toNode(placeholder.scene_identifier)
+
+ # getting the latest nodes added
+ nodes_init = placeholder.data["nodes_init"]
+ nodes_created = list(set(nuke.allNodes()) - set(nodes_init))
+ self.log.debug("Created nodes: {}".format(nodes_created))
+ if not nodes_created:
+ return
+
+ placeholder.data["delete"] = True
+
+ nodes_created = self._move_to_placeholder_group(
+ placeholder, nodes_created
+ )
+ placeholder.data["last_created"] = nodes_created
+ refresh_nodes(nodes_created)
+
+ # positioning of the created nodes
+ min_x, min_y, _, _ = get_extreme_positions(nodes_created)
+ for node in nodes_created:
+ xpos = (node.xpos() - min_x) + placeholder_node.xpos()
+ ypos = (node.ypos() - min_y) + placeholder_node.ypos()
+ node.setXYpos(xpos, ypos)
+ refresh_nodes(nodes_created)
+
+ # fix the problem of z_order for backdrops
+ self._fix_z_order(placeholder)
+
+ if placeholder.data.get("keep_placeholder"):
+ self._imprint_siblings(placeholder)
+
+ if placeholder.data["nb_children"] == 0:
+ # save initial nodes positions and dimensions, update them
+ # and set inputs and outputs of created nodes
+
+ if placeholder.data.get("keep_placeholder"):
+ self._imprint_inits()
+ self._update_nodes(placeholder, nuke.allNodes(), nodes_created)
+
+ self._set_created_connections(placeholder)
+
+ elif placeholder.data["siblings"]:
+ # create copies of placeholder siblings for the new created nodes,
+ # set their inputs and outputs and update all nodes positions and
+ # dimensions and siblings names
+
+ siblings = get_nodes_by_names(placeholder.data["siblings"])
+ refresh_nodes(siblings)
+ copies = self._create_sib_copies(placeholder)
+ new_nodes = list(copies.values()) # copies nodes
+ self._update_nodes(new_nodes, nodes_created)
+ placeholder_node.removeKnob(placeholder_node.knob("siblings"))
+ new_nodes_name = get_names_from_nodes(new_nodes)
+ imprint(placeholder_node, {"siblings": new_nodes_name})
+ self._set_copies_connections(placeholder, copies)
+
+ self._update_nodes(
+ nuke.allNodes(),
+ new_nodes + nodes_created,
+ 20
+ )
+
+ new_siblings = get_names_from_nodes(new_nodes)
+ placeholder.data["siblings"] = new_siblings
+
+ else:
+ # if the placeholder doesn't have siblings, the created
+ # nodes will be placed in a free space
+
+ xpointer, ypointer = find_free_space_to_paste_nodes(
+ nodes_created, direction="bottom", offset=200
+ )
+ node = nuke.createNode("NoOp")
+ reset_selection()
+ nuke.delete(node)
+ for node in nodes_created:
+ xpos = (node.xpos() - min_x) + xpointer
+ ypos = (node.ypos() - min_y) + ypointer
+ node.setXYpos(xpos, ypos)
+
+ placeholder.data["nb_children"] += 1
+ reset_selection()
+
+ # go back to root group
+ nuke.root().begin()
+
+ def _move_to_placeholder_group(self, placeholder, nodes_created):
+ """
+ opening the placeholder's group and copying created nodes in it.
+
+ Returns :
+ nodes_created (list): the new list of pasted nodes
+ """
+ groups_name = placeholder.data["group_name"]
+ reset_selection()
+ select_nodes(nodes_created)
+ if groups_name:
+ with node_tempfile() as filepath:
+ nuke.nodeCopy(filepath)
+ for node in nuke.selectedNodes():
+ nuke.delete(node)
+ group = nuke.toNode(groups_name)
+ group.begin()
+ nuke.nodePaste(filepath)
+ nodes_created = nuke.selectedNodes()
+ return nodes_created
+
+ def _fix_z_order(self, placeholder):
+ """Fix the problem of z_order when a backdrop is create."""
+
+ nodes_created = placeholder.data["last_created"]
+ created_backdrops = []
+ bd_orders = set()
+ for node in nodes_created:
+ if isinstance(node, nuke.BackdropNode):
+ created_backdrops.append(node)
+ bd_orders.add(node.knob("z_order").getValue())
+
+ if not bd_orders:
+ return
+
+ sib_orders = set()
+ for node_name in placeholder.data["siblings"]:
+ node = nuke.toNode(node_name)
+ if isinstance(node, nuke.BackdropNode):
+ sib_orders.add(node.knob("z_order").getValue())
+
+ if not sib_orders:
+ return
+
+ min_order = min(bd_orders)
+ max_order = max(sib_orders)
+ for backdrop_node in created_backdrops:
+ z_order = backdrop_node.knob("z_order").getValue()
+ backdrop_node.knob("z_order").setValue(
+ z_order + max_order - min_order + 1)
+
+ def _imprint_siblings(self, placeholder):
+ """
+ - add siblings names to placeholder attributes (nodes created with it)
+ - add Id to the attributes of all the other nodes
+ """
+
+ created_nodes = placeholder.data["last_created"]
+ created_nodes_set = set(created_nodes)
+
+ for node in created_nodes:
+ node_knobs = node.knobs()
+
+ if (
+ "is_placeholder" not in node_knobs
+ or (
+ "is_placeholder" in node_knobs
+ and node.knob("is_placeholder").value()
+ )
+ ):
+ siblings = list(created_nodes_set - {node})
+ siblings_name = get_names_from_nodes(siblings)
+ siblings = {"siblings": siblings_name}
+ imprint(node, siblings)
+
+ def _imprint_inits(self):
+ """Add initial positions and dimensions to the attributes"""
+
+ for node in nuke.allNodes():
+ refresh_node(node)
+ imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()})
+ node.knob("x_init").setVisible(False)
+ node.knob("y_init").setVisible(False)
+ width = node.screenWidth()
+ height = node.screenHeight()
+ if "bdwidth" in node.knobs():
+ imprint(node, {"w_init": width, "h_init": height})
+ node.knob("w_init").setVisible(False)
+ node.knob("h_init").setVisible(False)
+ refresh_node(node)
+
+ def _update_nodes(
+ self, placeholder, nodes, considered_nodes, offset_y=None
+ ):
+ """Adjust backdrop nodes dimensions and positions.
+
+ Considering some nodes sizes.
+
+ Args:
+ nodes (list): list of nodes to update
+ considered_nodes (list): list of nodes to consider while updating
+ positions and dimensions
+ offset (int): distance between copies
+ """
+
+ placeholder_node = nuke.toNode(placeholder.scene_identifier)
+
+ min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes)
+
+ diff_x = diff_y = 0
+ contained_nodes = [] # for backdrops
+
+ if offset_y is None:
+ width_ph = placeholder_node.screenWidth()
+ height_ph = placeholder_node.screenHeight()
+ diff_y = max_y - min_y - height_ph
+ diff_x = max_x - min_x - width_ph
+ contained_nodes = [placeholder_node]
+ min_x = placeholder_node.xpos()
+ min_y = placeholder_node.ypos()
+ else:
+ siblings = get_nodes_by_names(placeholder.data["siblings"])
+ minX, _, maxX, _ = get_extreme_positions(siblings)
+ diff_y = max_y - min_y + 20
+ diff_x = abs(max_x - min_x - maxX + minX)
+ contained_nodes = considered_nodes
+
+ if diff_y <= 0 and diff_x <= 0:
+ return
+
+ for node in nodes:
+ refresh_node(node)
+
+ if (
+ node == placeholder_node
+ or node in considered_nodes
+ ):
+ continue
+
+ if (
+ not isinstance(node, nuke.BackdropNode)
+ or (
+ isinstance(node, nuke.BackdropNode)
+ and not set(contained_nodes) <= set(node.getNodes())
+ )
+ ):
+ if offset_y is None and node.xpos() >= min_x:
+ node.setXpos(node.xpos() + diff_x)
+
+ if node.ypos() >= min_y:
+ node.setYpos(node.ypos() + diff_y)
+
+ else:
+ width = node.screenWidth()
+ height = node.screenHeight()
+ node.knob("bdwidth").setValue(width + diff_x)
+ node.knob("bdheight").setValue(height + diff_y)
+
+ refresh_node(node)
+
+ def _set_created_connections(self, placeholder):
+ """
+ set inputs and outputs of created nodes"""
+
+ placeholder_node = nuke.toNode(placeholder.scene_identifier)
+ input_node, output_node = get_group_io_nodes(
+ placeholder.data["last_created"]
+ )
+ for node in placeholder_node.dependent():
+ for idx in range(node.inputs()):
+ if node.input(idx) == placeholder_node and output_node:
+ node.setInput(idx, output_node)
+
+ for node in placeholder_node.dependencies():
+ for idx in range(placeholder_node.inputs()):
+ if placeholder_node.input(idx) == node and input_node:
+ input_node.setInput(0, node)
+
+ def _create_sib_copies(self, placeholder):
+ """ creating copies of the palce_holder siblings (the ones who were
+ created with it) for the new nodes added
+
+ Returns :
+ copies (dict) : with copied nodes names and their copies
+ """
+
+ copies = {}
+ siblings = get_nodes_by_names(placeholder.data["siblings"])
+ for node in siblings:
+ new_node = duplicate_node(node)
+
+ x_init = int(new_node.knob("x_init").getValue())
+ y_init = int(new_node.knob("y_init").getValue())
+ new_node.setXYpos(x_init, y_init)
+ if isinstance(new_node, nuke.BackdropNode):
+ w_init = new_node.knob("w_init").getValue()
+ h_init = new_node.knob("h_init").getValue()
+ new_node.knob("bdwidth").setValue(w_init)
+ new_node.knob("bdheight").setValue(h_init)
+ refresh_node(node)
+
+ if "repre_id" in node.knobs().keys():
+ node.removeKnob(node.knob("repre_id"))
+ copies[node.name()] = new_node
+ return copies
+
+ def _set_copies_connections(self, placeholder, copies):
+ """Set inputs and outputs of the copies.
+
+ Args:
+ copies (dict): Copied nodes by their names.
+ """
+
+ last_input, last_output = get_group_io_nodes(
+ placeholder.data["last_created"]
+ )
+ siblings = get_nodes_by_names(placeholder.data["siblings"])
+ siblings_input, siblings_output = get_group_io_nodes(siblings)
+ copy_input = copies[siblings_input.name()]
+ copy_output = copies[siblings_output.name()]
+
+ for node_init in siblings:
+ if node_init == siblings_output:
+ continue
+
+ node_copy = copies[node_init.name()]
+ for node in node_init.dependent():
+ for idx in range(node.inputs()):
+ if node.input(idx) != node_init:
+ continue
+
+ if node in siblings:
+ copies[node.name()].setInput(idx, node_copy)
+ else:
+ last_input.setInput(0, node_copy)
+
+ for node in node_init.dependencies():
+ for idx in range(node_init.inputs()):
+ if node_init.input(idx) != node:
+ continue
+
+ if node_init == siblings_input:
+ copy_input.setInput(idx, node)
+ elif node in siblings:
+ node_copy.setInput(idx, copies[node.name()])
+ else:
+ node_copy.setInput(idx, last_output)
+
+ siblings_input.setInput(0, copy_output)
diff --git a/client/ayon_core/hosts/nuke/plugins/workfile_build/load_placeholder.py b/client/ayon_core/hosts/nuke/plugins/workfile_build/load_placeholder.py
new file mode 100644
index 0000000000..258f48c9d3
--- /dev/null
+++ b/client/ayon_core/hosts/nuke/plugins/workfile_build/load_placeholder.py
@@ -0,0 +1,455 @@
+import nuke
+
+from ayon_core.pipeline.workfile.workfile_template_builder import (
+ LoadPlaceholderItem,
+ PlaceholderLoadMixin,
+)
+from ayon_core.hosts.nuke.api.lib import (
+ find_free_space_to_paste_nodes,
+ get_extreme_positions,
+ get_group_io_nodes,
+ imprint,
+ refresh_node,
+ refresh_nodes,
+ reset_selection,
+ get_names_from_nodes,
+ get_nodes_by_names,
+ select_nodes,
+ duplicate_node,
+ node_tempfile,
+)
+from ayon_core.hosts.nuke.api.workfile_template_builder import (
+ NukePlaceholderPlugin
+)
+
+
+class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin):
+ identifier = "nuke.load"
+ label = "Nuke load"
+
+ def _parse_placeholder_node_data(self, node):
+ placeholder_data = super(
+ NukePlaceholderLoadPlugin, self
+ )._parse_placeholder_node_data(node)
+
+ node_knobs = node.knobs()
+ nb_children = 0
+ if "nb_children" in node_knobs:
+ nb_children = int(node_knobs["nb_children"].getValue())
+ placeholder_data["nb_children"] = nb_children
+
+ siblings = []
+ if "siblings" in node_knobs:
+ siblings = node_knobs["siblings"].values()
+ placeholder_data["siblings"] = siblings
+
+ node_full_name = node.fullName()
+ placeholder_data["group_name"] = node_full_name.rpartition(".")[0]
+ placeholder_data["last_loaded"] = []
+ placeholder_data["delete"] = False
+ return placeholder_data
+
+ def _get_loaded_repre_ids(self):
+ loaded_representation_ids = self.builder.get_shared_populate_data(
+ "loaded_representation_ids"
+ )
+ if loaded_representation_ids is None:
+ loaded_representation_ids = set()
+ for node in nuke.allNodes():
+ if "repre_id" in node.knobs():
+ loaded_representation_ids.add(
+ node.knob("repre_id").getValue()
+ )
+
+ self.builder.set_shared_populate_data(
+ "loaded_representation_ids", loaded_representation_ids
+ )
+ return loaded_representation_ids
+
+ def _before_placeholder_load(self, placeholder):
+ placeholder.data["nodes_init"] = nuke.allNodes()
+
+ def _before_repre_load(self, placeholder, representation):
+ placeholder.data["last_repre_id"] = representation["id"]
+
+ def collect_placeholders(self):
+ output = []
+ scene_placeholders = self._collect_scene_placeholders()
+ for node_name, node in scene_placeholders.items():
+ plugin_identifier_knob = node.knob("plugin_identifier")
+ if (
+ plugin_identifier_knob is None
+ or plugin_identifier_knob.getValue() != self.identifier
+ ):
+ continue
+
+ placeholder_data = self._parse_placeholder_node_data(node)
+ # TODO do data validations and maybe updgrades if are invalid
+ output.append(
+ LoadPlaceholderItem(node_name, placeholder_data, self)
+ )
+
+ return output
+
+ def populate_placeholder(self, placeholder):
+ self.populate_load_placeholder(placeholder)
+
+ def repopulate_placeholder(self, placeholder):
+ repre_ids = self._get_loaded_repre_ids()
+ self.populate_load_placeholder(placeholder, repre_ids)
+
+ def get_placeholder_options(self, options=None):
+ return self.get_load_plugin_options(options)
+
+ def post_placeholder_process(self, placeholder, failed):
+ """Cleanup placeholder after load of its corresponding representations.
+
+ Args:
+ placeholder (PlaceholderItem): Item which was just used to load
+ representation.
+ failed (bool): Loading of representation failed.
+ """
+ # deselect all selected nodes
+ placeholder_node = nuke.toNode(placeholder.scene_identifier)
+
+ # getting the latest nodes added
+ # TODO get from shared populate data!
+ nodes_init = placeholder.data["nodes_init"]
+ nodes_loaded = list(set(nuke.allNodes()) - set(nodes_init))
+ self.log.debug("Loaded nodes: {}".format(nodes_loaded))
+ if not nodes_loaded:
+ return
+
+ placeholder.data["delete"] = True
+
+ nodes_loaded = self._move_to_placeholder_group(
+ placeholder, nodes_loaded
+ )
+ placeholder.data["last_loaded"] = nodes_loaded
+ refresh_nodes(nodes_loaded)
+
+ # positioning of the loaded nodes
+ min_x, min_y, _, _ = get_extreme_positions(nodes_loaded)
+ for node in nodes_loaded:
+ xpos = (node.xpos() - min_x) + placeholder_node.xpos()
+ ypos = (node.ypos() - min_y) + placeholder_node.ypos()
+ node.setXYpos(xpos, ypos)
+ refresh_nodes(nodes_loaded)
+
+ # fix the problem of z_order for backdrops
+ self._fix_z_order(placeholder)
+
+ if placeholder.data.get("keep_placeholder"):
+ self._imprint_siblings(placeholder)
+
+ if placeholder.data["nb_children"] == 0:
+ # save initial nodes positions and dimensions, update them
+ # and set inputs and outputs of loaded nodes
+ if placeholder.data.get("keep_placeholder"):
+ self._imprint_inits()
+ self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded)
+
+ self._set_loaded_connections(placeholder)
+
+ elif placeholder.data["siblings"]:
+ # create copies of placeholder siblings for the new loaded nodes,
+ # set their inputs and outputs and update all nodes positions and
+ # dimensions and siblings names
+
+ siblings = get_nodes_by_names(placeholder.data["siblings"])
+ refresh_nodes(siblings)
+ copies = self._create_sib_copies(placeholder)
+ new_nodes = list(copies.values()) # copies nodes
+ self._update_nodes(new_nodes, nodes_loaded)
+ placeholder_node.removeKnob(placeholder_node.knob("siblings"))
+ new_nodes_name = get_names_from_nodes(new_nodes)
+ imprint(placeholder_node, {"siblings": new_nodes_name})
+ self._set_copies_connections(placeholder, copies)
+
+ self._update_nodes(
+ nuke.allNodes(),
+ new_nodes + nodes_loaded,
+ 20
+ )
+
+ new_siblings = get_names_from_nodes(new_nodes)
+ placeholder.data["siblings"] = new_siblings
+
+ else:
+ # if the placeholder doesn't have siblings, the loaded
+ # nodes will be placed in a free space
+
+ xpointer, ypointer = find_free_space_to_paste_nodes(
+ nodes_loaded, direction="bottom", offset=200
+ )
+ node = nuke.createNode("NoOp")
+ reset_selection()
+ nuke.delete(node)
+ for node in nodes_loaded:
+ xpos = (node.xpos() - min_x) + xpointer
+ ypos = (node.ypos() - min_y) + ypointer
+ node.setXYpos(xpos, ypos)
+
+ placeholder.data["nb_children"] += 1
+ reset_selection()
+
+ # go back to root group
+ nuke.root().begin()
+
+ def _move_to_placeholder_group(self, placeholder, nodes_loaded):
+ """
+ opening the placeholder's group and copying loaded nodes in it.
+
+ Returns :
+ nodes_loaded (list): the new list of pasted nodes
+ """
+
+ groups_name = placeholder.data["group_name"]
+ reset_selection()
+ select_nodes(nodes_loaded)
+ if groups_name:
+ with node_tempfile() as filepath:
+ nuke.nodeCopy(filepath)
+ for node in nuke.selectedNodes():
+ nuke.delete(node)
+ group = nuke.toNode(groups_name)
+ group.begin()
+ nuke.nodePaste(filepath)
+ nodes_loaded = nuke.selectedNodes()
+ return nodes_loaded
+
+ def _fix_z_order(self, placeholder):
+ """Fix the problem of z_order when a backdrop is loaded."""
+
+ nodes_loaded = placeholder.data["last_loaded"]
+ loaded_backdrops = []
+ bd_orders = set()
+ for node in nodes_loaded:
+ if isinstance(node, nuke.BackdropNode):
+ loaded_backdrops.append(node)
+ bd_orders.add(node.knob("z_order").getValue())
+
+ if not bd_orders:
+ return
+
+ sib_orders = set()
+ for node_name in placeholder.data["siblings"]:
+ node = nuke.toNode(node_name)
+ if isinstance(node, nuke.BackdropNode):
+ sib_orders.add(node.knob("z_order").getValue())
+
+ if not sib_orders:
+ return
+
+ min_order = min(bd_orders)
+ max_order = max(sib_orders)
+ for backdrop_node in loaded_backdrops:
+ z_order = backdrop_node.knob("z_order").getValue()
+ backdrop_node.knob("z_order").setValue(
+ z_order + max_order - min_order + 1)
+
+ def _imprint_siblings(self, placeholder):
+ """
+ - add siblings names to placeholder attributes (nodes loaded with it)
+ - add Id to the attributes of all the other nodes
+ """
+
+ loaded_nodes = placeholder.data["last_loaded"]
+ loaded_nodes_set = set(loaded_nodes)
+ data = {"repre_id": str(placeholder.data["last_repre_id"])}
+
+ for node in loaded_nodes:
+ node_knobs = node.knobs()
+ if "builder_type" not in node_knobs:
+ # save the id of representation for all imported nodes
+ imprint(node, data)
+ node.knob("repre_id").setVisible(False)
+ refresh_node(node)
+ continue
+
+ if (
+ "is_placeholder" not in node_knobs
+ or (
+ "is_placeholder" in node_knobs
+ and node.knob("is_placeholder").value()
+ )
+ ):
+ siblings = list(loaded_nodes_set - {node})
+ siblings_name = get_names_from_nodes(siblings)
+ siblings = {"siblings": siblings_name}
+ imprint(node, siblings)
+
+ def _imprint_inits(self):
+ """Add initial positions and dimensions to the attributes"""
+
+ for node in nuke.allNodes():
+ refresh_node(node)
+ imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()})
+ node.knob("x_init").setVisible(False)
+ node.knob("y_init").setVisible(False)
+ width = node.screenWidth()
+ height = node.screenHeight()
+ if "bdwidth" in node.knobs():
+ imprint(node, {"w_init": width, "h_init": height})
+ node.knob("w_init").setVisible(False)
+ node.knob("h_init").setVisible(False)
+ refresh_node(node)
+
+ def _update_nodes(
+ self, placeholder, nodes, considered_nodes, offset_y=None
+ ):
+ """Adjust backdrop nodes dimensions and positions.
+
+ Considering some nodes sizes.
+
+ Args:
+ nodes (list): list of nodes to update
+ considered_nodes (list): list of nodes to consider while updating
+ positions and dimensions
+ offset (int): distance between copies
+ """
+
+ placeholder_node = nuke.toNode(placeholder.scene_identifier)
+
+ min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes)
+
+ diff_x = diff_y = 0
+ contained_nodes = [] # for backdrops
+
+ if offset_y is None:
+ width_ph = placeholder_node.screenWidth()
+ height_ph = placeholder_node.screenHeight()
+ diff_y = max_y - min_y - height_ph
+ diff_x = max_x - min_x - width_ph
+ contained_nodes = [placeholder_node]
+ min_x = placeholder_node.xpos()
+ min_y = placeholder_node.ypos()
+ else:
+ siblings = get_nodes_by_names(placeholder.data["siblings"])
+ minX, _, maxX, _ = get_extreme_positions(siblings)
+ diff_y = max_y - min_y + 20
+ diff_x = abs(max_x - min_x - maxX + minX)
+ contained_nodes = considered_nodes
+
+ if diff_y <= 0 and diff_x <= 0:
+ return
+
+ for node in nodes:
+ refresh_node(node)
+
+ if (
+ node == placeholder_node
+ or node in considered_nodes
+ ):
+ continue
+
+ if (
+ not isinstance(node, nuke.BackdropNode)
+ or (
+ isinstance(node, nuke.BackdropNode)
+ and not set(contained_nodes) <= set(node.getNodes())
+ )
+ ):
+ if offset_y is None and node.xpos() >= min_x:
+ node.setXpos(node.xpos() + diff_x)
+
+ if node.ypos() >= min_y:
+ node.setYpos(node.ypos() + diff_y)
+
+ else:
+ width = node.screenWidth()
+ height = node.screenHeight()
+ node.knob("bdwidth").setValue(width + diff_x)
+ node.knob("bdheight").setValue(height + diff_y)
+
+ refresh_node(node)
+
+ def _set_loaded_connections(self, placeholder):
+ """
+ set inputs and outputs of loaded nodes"""
+
+ placeholder_node = nuke.toNode(placeholder.scene_identifier)
+ input_node, output_node = get_group_io_nodes(
+ placeholder.data["last_loaded"]
+ )
+ for node in placeholder_node.dependent():
+ for idx in range(node.inputs()):
+ if node.input(idx) == placeholder_node and output_node:
+ node.setInput(idx, output_node)
+
+ for node in placeholder_node.dependencies():
+ for idx in range(placeholder_node.inputs()):
+ if placeholder_node.input(idx) == node and input_node:
+ input_node.setInput(0, node)
+
+ def _create_sib_copies(self, placeholder):
+ """ creating copies of the palce_holder siblings (the ones who were
+ loaded with it) for the new nodes added
+
+ Returns :
+ copies (dict) : with copied nodes names and their copies
+ """
+
+ copies = {}
+ siblings = get_nodes_by_names(placeholder.data["siblings"])
+ for node in siblings:
+ new_node = duplicate_node(node)
+
+ x_init = int(new_node.knob("x_init").getValue())
+ y_init = int(new_node.knob("y_init").getValue())
+ new_node.setXYpos(x_init, y_init)
+ if isinstance(new_node, nuke.BackdropNode):
+ w_init = new_node.knob("w_init").getValue()
+ h_init = new_node.knob("h_init").getValue()
+ new_node.knob("bdwidth").setValue(w_init)
+ new_node.knob("bdheight").setValue(h_init)
+ refresh_node(node)
+
+ if "repre_id" in node.knobs().keys():
+ node.removeKnob(node.knob("repre_id"))
+ copies[node.name()] = new_node
+ return copies
+
+ def _set_copies_connections(self, placeholder, copies):
+ """Set inputs and outputs of the copies.
+
+ Args:
+ copies (dict): Copied nodes by their names.
+ """
+
+ last_input, last_output = get_group_io_nodes(
+ placeholder.data["last_loaded"]
+ )
+ siblings = get_nodes_by_names(placeholder.data["siblings"])
+ siblings_input, siblings_output = get_group_io_nodes(siblings)
+ copy_input = copies[siblings_input.name()]
+ copy_output = copies[siblings_output.name()]
+
+ for node_init in siblings:
+ if node_init == siblings_output:
+ continue
+
+ node_copy = copies[node_init.name()]
+ for node in node_init.dependent():
+ for idx in range(node.inputs()):
+ if node.input(idx) != node_init:
+ continue
+
+ if node in siblings:
+ copies[node.name()].setInput(idx, node_copy)
+ else:
+ last_input.setInput(0, node_copy)
+
+ for node in node_init.dependencies():
+ for idx in range(node_init.inputs()):
+ if node_init.input(idx) != node:
+ continue
+
+ if node_init == siblings_input:
+ copy_input.setInput(idx, node)
+ elif node in siblings:
+ node_copy.setInput(idx, copies[node.name()])
+ else:
+ node_copy.setInput(idx, last_output)
+
+ siblings_input.setInput(0, copy_output)
diff --git a/client/ayon_core/hosts/substancepainter/api/lib.py b/client/ayon_core/hosts/substancepainter/api/lib.py
index 1cb480b552..64c39943ce 100644
--- a/client/ayon_core/hosts/substancepainter/api/lib.py
+++ b/client/ayon_core/hosts/substancepainter/api/lib.py
@@ -586,7 +586,6 @@ def prompt_new_file_with_mesh(mesh_filepath):
# TODO: find a way to improve the process event to
# load more complicated mesh
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 3000)
-
file_dialog.done(file_dialog.Accepted)
app.processEvents(QtCore.QEventLoop.AllEvents)
@@ -606,7 +605,7 @@ def prompt_new_file_with_mesh(mesh_filepath):
mesh_select.setVisible(False)
# Ensure UI is visually up-to-date
- app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents)
+ app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 8000)
# Trigger the 'select file' dialog to set the path and have the
# new file dialog to use the path.
@@ -623,8 +622,6 @@ def prompt_new_file_with_mesh(mesh_filepath):
"Failed to set mesh path with the prompt dialog:"
f"{mesh_filepath}\n\n"
"Creating new project directly with the mesh path instead.")
- else:
- dialog.done(dialog.Accepted)
new_action = _get_new_project_action()
if not new_action:
diff --git a/client/ayon_core/hosts/substancepainter/plugins/load/load_mesh.py b/client/ayon_core/hosts/substancepainter/plugins/load/load_mesh.py
index 01cb65dd5c..d5aac1191c 100644
--- a/client/ayon_core/hosts/substancepainter/plugins/load/load_mesh.py
+++ b/client/ayon_core/hosts/substancepainter/plugins/load/load_mesh.py
@@ -1,3 +1,5 @@
+import copy
+from qtpy import QtWidgets, QtCore
from ayon_core.pipeline import (
load,
get_representation_path,
@@ -8,10 +10,133 @@ from ayon_core.hosts.substancepainter.api.pipeline import (
set_container_metadata,
remove_container_metadata
)
-from ayon_core.hosts.substancepainter.api.lib import prompt_new_file_with_mesh
import substance_painter.project
-import qargparse
+
+
+def _convert(substance_attr):
+ """Return Substance Painter Python API Project attribute from string.
+
+ This converts a string like "ProjectWorkflow.Default" to for example
+ the Substance Painter Python API equivalent object, like:
+ `substance_painter.project.ProjectWorkflow.Default`
+
+ Args:
+ substance_attr (str): The `substance_painter.project` attribute,
+ for example "ProjectWorkflow.Default"
+
+ Returns:
+ Any: Substance Python API object of the project attribute.
+
+ Raises:
+ ValueError: If attribute does not exist on the
+ `substance_painter.project` python api.
+ """
+ root = substance_painter.project
+ for attr in substance_attr.split("."):
+ root = getattr(root, attr, None)
+ if root is None:
+ raise ValueError(
+ "Substance Painter project attribute"
+ f" does not exist: {substance_attr}")
+
+ return root
+
+
+def get_template_by_name(name: str, templates: list[dict]) -> dict:
+ return next(
+ template for template in templates
+ if template["name"] == name
+ )
+
+
+class SubstanceProjectConfigurationWindow(QtWidgets.QDialog):
+ """The pop-up dialog allows users to choose material
+ duplicate options for importing Max objects when updating
+ or switching assets.
+ """
+ def __init__(self, project_templates):
+ super(SubstanceProjectConfigurationWindow, self).__init__()
+ self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint)
+
+ self.configuration = None
+ self.template_names = [template["name"] for template
+ in project_templates]
+ self.project_templates = project_templates
+
+ self.widgets = {
+ "label": QtWidgets.QLabel(
+ "Select your template for project configuration"),
+ "template_options": QtWidgets.QComboBox(),
+ "import_cameras": QtWidgets.QCheckBox("Import Cameras"),
+ "preserve_strokes": QtWidgets.QCheckBox("Preserve Strokes"),
+ "clickbox": QtWidgets.QWidget(),
+ "combobox": QtWidgets.QWidget(),
+ "buttons": QtWidgets.QDialogButtonBox(
+ QtWidgets.QDialogButtonBox.Ok
+ | QtWidgets.QDialogButtonBox.Cancel)
+ }
+
+ self.widgets["template_options"].addItems(self.template_names)
+
+ template_name = self.widgets["template_options"].currentText()
+ self._update_to_match_template(template_name)
+ # Build clickboxes
+ layout = QtWidgets.QHBoxLayout(self.widgets["clickbox"])
+ layout.addWidget(self.widgets["import_cameras"])
+ layout.addWidget(self.widgets["preserve_strokes"])
+ # Build combobox
+ layout = QtWidgets.QHBoxLayout(self.widgets["combobox"])
+ layout.addWidget(self.widgets["template_options"])
+ # Build buttons
+ layout = QtWidgets.QHBoxLayout(self.widgets["buttons"])
+ # Build layout.
+ layout = QtWidgets.QVBoxLayout(self)
+ layout.addWidget(self.widgets["label"])
+ layout.addWidget(self.widgets["combobox"])
+ layout.addWidget(self.widgets["clickbox"])
+ layout.addWidget(self.widgets["buttons"])
+
+ self.widgets["template_options"].currentTextChanged.connect(
+ self._update_to_match_template)
+ self.widgets["buttons"].accepted.connect(self.on_accept)
+ self.widgets["buttons"].rejected.connect(self.on_reject)
+
+ def on_accept(self):
+ self.configuration = self.get_project_configuration()
+ self.close()
+
+ def on_reject(self):
+ self.close()
+
+ def _update_to_match_template(self, template_name):
+ template = get_template_by_name(template_name, self.project_templates)
+ self.widgets["import_cameras"].setChecked(template["import_cameras"])
+ self.widgets["preserve_strokes"].setChecked(
+ template["preserve_strokes"])
+
+ def get_project_configuration(self):
+ templates = self.project_templates
+ template_name = self.widgets["template_options"].currentText()
+ template = get_template_by_name(template_name, templates)
+ template = copy.deepcopy(template) # do not edit the original
+ template["import_cameras"] = self.widgets["import_cameras"].isChecked()
+ template["preserve_strokes"] = (
+ self.widgets["preserve_strokes"].isChecked()
+ )
+ for key in ["normal_map_format",
+ "project_workflow",
+ "tangent_space_mode"]:
+ template[key] = _convert(template[key])
+ return template
+
+ @classmethod
+ def prompt(cls, templates):
+ dialog = cls(templates)
+ dialog.exec_()
+ configuration = dialog.configuration
+ dialog.deleteLater()
+ return configuration
class SubstanceLoadProjectMesh(load.LoaderPlugin):
@@ -25,48 +150,35 @@ class SubstanceLoadProjectMesh(load.LoaderPlugin):
icon = "code-fork"
color = "orange"
- options = [
- qargparse.Boolean(
- "preserve_strokes",
- default=True,
- help="Preserve strokes positions on mesh.\n"
- "(only relevant when loading into existing project)"
- ),
- qargparse.Boolean(
- "import_cameras",
- default=True,
- help="Import cameras from the mesh file."
- )
- ]
+ # Defined via settings
+ project_templates = []
- def load(self, context, name, namespace, data):
+ def load(self, context, name, namespace, options=None):
# Get user inputs
- import_cameras = data.get("import_cameras", True)
- preserve_strokes = data.get("preserve_strokes", True)
- sp_settings = substance_painter.project.Settings(
- import_cameras=import_cameras
- )
+ result = SubstanceProjectConfigurationWindow.prompt(
+ self.project_templates)
+ if not result:
+ # cancelling loader action
+ return
if not substance_painter.project.is_open():
# Allow to 'initialize' a new project
path = self.filepath_from_context(context)
- # TODO: improve the prompt dialog function to not
- # only works for simple polygon scene
- result = prompt_new_file_with_mesh(mesh_filepath=path)
- if not result:
- self.log.info("User cancelled new project prompt."
- "Creating new project directly from"
- " Substance Painter API Instead.")
- settings = substance_painter.project.create(
- mesh_file_path=path, settings=sp_settings
- )
-
+ sp_settings = substance_painter.project.Settings(
+ import_cameras=result["import_cameras"],
+ normal_map_format=result["normal_map_format"],
+ project_workflow=result["project_workflow"],
+ tangent_space_mode=result["tangent_space_mode"],
+ default_texture_resolution=result["default_texture_resolution"]
+ )
+ settings = substance_painter.project.create(
+ mesh_file_path=path, settings=sp_settings
+ )
else:
# Reload the mesh
settings = substance_painter.project.MeshReloadingSettings(
- import_cameras=import_cameras,
- preserve_strokes=preserve_strokes
- )
+ import_cameras=result["import_cameras"],
+ preserve_strokes=result["preserve_strokes"])
def on_mesh_reload(status: substance_painter.project.ReloadMeshStatus): # noqa
if status == substance_painter.project.ReloadMeshStatus.SUCCESS: # noqa
@@ -92,7 +204,7 @@ class SubstanceLoadProjectMesh(load.LoaderPlugin):
# from the user's original choice. We don't store 'preserve_strokes'
# as we always preserve strokes on updates.
container["options"] = {
- "import_cameras": import_cameras,
+ "import_cameras": result["import_cameras"],
}
set_container_metadata(project_mesh_object_name, container)
diff --git a/client/ayon_core/hosts/traypublisher/addon.py b/client/ayon_core/hosts/traypublisher/addon.py
index 70bdfe9a64..3dd275f223 100644
--- a/client/ayon_core/hosts/traypublisher/addon.py
+++ b/client/ayon_core/hosts/traypublisher/addon.py
@@ -1,5 +1,6 @@
import os
+from pathlib import Path
from ayon_core.lib import get_ayon_launcher_args
from ayon_core.lib.execute import run_detached_process
from ayon_core.addon import (
@@ -57,3 +58,62 @@ def launch():
from ayon_core.tools import traypublisher
traypublisher.main()
+
+
+@cli_main.command()
+@click_wrap.option(
+ "--filepath",
+ help="Full path to CSV file with data",
+ type=str,
+ required=True
+)
+@click_wrap.option(
+ "--project",
+ help="Project name in which the context will be used",
+ type=str,
+ required=True
+)
+@click_wrap.option(
+ "--folder-path",
+ help="Asset name in which the context will be used",
+ type=str,
+ required=True
+)
+@click_wrap.option(
+ "--task",
+ help="Task name under Asset in which the context will be used",
+ type=str,
+ required=False
+)
+@click_wrap.option(
+ "--ignore-validators",
+ help="Option to ignore validators",
+ type=bool,
+ is_flag=True,
+ required=False
+)
+def ingestcsv(
+ filepath,
+ project,
+ folder_path,
+ task,
+ ignore_validators
+):
+ """Ingest CSV file into project.
+
+ This command will ingest CSV file into project. CSV file must be in
+ specific format. See documentation for more information.
+ """
+ from .csv_publish import csvpublish
+
+ # use Path to check if csv_filepath exists
+ if not Path(filepath).exists():
+ raise FileNotFoundError(f"File {filepath} does not exist.")
+
+ csvpublish(
+ filepath,
+ project,
+ folder_path,
+ task,
+ ignore_validators
+ )
diff --git a/client/ayon_core/hosts/traypublisher/csv_publish.py b/client/ayon_core/hosts/traypublisher/csv_publish.py
new file mode 100644
index 0000000000..b43792a357
--- /dev/null
+++ b/client/ayon_core/hosts/traypublisher/csv_publish.py
@@ -0,0 +1,86 @@
+import os
+
+import pyblish.api
+import pyblish.util
+
+from ayon_api import get_folder_by_path, get_task_by_name
+from ayon_core.lib.attribute_definitions import FileDefItem
+from ayon_core.pipeline import install_host
+from ayon_core.pipeline.create import CreateContext
+
+from ayon_core.hosts.traypublisher.api import TrayPublisherHost
+
+
+def csvpublish(
+ filepath,
+ project_name,
+ folder_path,
+ task_name=None,
+ ignore_validators=False
+):
+ """Publish CSV file.
+
+ Args:
+ filepath (str): Path to CSV file.
+ project_name (str): Project name.
+ folder_path (str): Folder path.
+ task_name (Optional[str]): Task name.
+ ignore_validators (Optional[bool]): Option to ignore validators.
+ """
+
+ # initialization of host
+ host = TrayPublisherHost()
+ install_host(host)
+
+ # setting host context into project
+ host.set_project_name(project_name)
+
+ # form precreate data with field values
+ file_field = FileDefItem.from_paths([filepath], False).pop().to_dict()
+ precreate_data = {
+ "csv_filepath_data": file_field,
+ }
+
+ # create context initialization
+ create_context = CreateContext(host, headless=True)
+ folder_entity = get_folder_by_path(
+ project_name,
+ folder_path=folder_path,
+ )
+
+ if not folder_entity:
+ ValueError(
+ f"Folder path '{folder_path}' doesn't "
+ f"exists at project '{project_name}'."
+ )
+
+ task_entity = get_task_by_name(
+ project_name,
+ folder_entity["id"],
+ task_name,
+ )
+
+ if not task_entity:
+ ValueError(
+ f"Task name '{task_name}' doesn't "
+ f"exists at folder '{folder_path}'."
+ )
+
+ create_context.create(
+ "io.ayon.creators.traypublisher.csv_ingest",
+ "Main",
+ folder_entity=folder_entity,
+ task_entity=task_entity,
+ pre_create_data=precreate_data,
+ )
+
+ # publishing context initialization
+ pyblish_context = pyblish.api.Context()
+ pyblish_context.data["create_context"] = create_context
+
+ # redefine targets (skip 'local' to disable validators)
+ if ignore_validators:
+ targets = ["default", "ingest"]
+
+ # publishing
+ pyblish.util.publish(context=pyblish_context, targets=targets)
diff --git a/client/ayon_core/hosts/traypublisher/plugins/create/create_csv_ingest.py b/client/ayon_core/hosts/traypublisher/plugins/create/create_csv_ingest.py
new file mode 100644
index 0000000000..8143e8b45b
--- /dev/null
+++ b/client/ayon_core/hosts/traypublisher/plugins/create/create_csv_ingest.py
@@ -0,0 +1,741 @@
+import os
+import re
+import csv
+import clique
+from io import StringIO
+from copy import deepcopy, copy
+
+from ayon_api import get_folder_by_path, get_task_by_name
+from ayon_core.pipeline.create import get_product_name
+from ayon_core.pipeline import CreatedInstance
+from ayon_core.lib import FileDef, BoolDef
+from ayon_core.lib.transcoding import (
+ VIDEO_EXTENSIONS, IMAGE_EXTENSIONS
+)
+from ayon_core.pipeline.create import CreatorError
+from ayon_core.hosts.traypublisher.api.plugin import (
+ TrayPublishCreator
+)
+
+
+class IngestCSV(TrayPublishCreator):
+ """CSV ingest creator class"""
+
+ icon = "fa.file"
+
+ label = "CSV Ingest"
+ product_type = "csv_ingest_file"
+ identifier = "io.ayon.creators.traypublisher.csv_ingest"
+
+ default_variants = ["Main"]
+
+ description = "Ingest products' data from CSV file"
+ detailed_description = """
+Ingest products' data from CSV file following column and representation
+configuration in project settings.
+"""
+
+ # Position in the list of creators.
+ order = 10
+
+ # settings for this creator
+ columns_config = {}
+ representations_config = {}
+
+ def create(self, subset_name, instance_data, pre_create_data):
+ """Create an product from each row found in the CSV.
+
+ Args:
+ subset_name (str): The subset name.
+ instance_data (dict): The instance data.
+ pre_create_data (dict):
+ """
+
+ csv_filepath_data = pre_create_data.get("csv_filepath_data", {})
+
+ folder = csv_filepath_data.get("directory", "")
+ if not os.path.exists(folder):
+ raise CreatorError(
+ f"Directory '{folder}' does not exist."
+ )
+ filename = csv_filepath_data.get("filenames", [])
+ self._process_csv_file(subset_name, instance_data, folder, filename[0])
+
+ def _process_csv_file(
+ self, subset_name, instance_data, staging_dir, filename):
+ """Process CSV file.
+
+ Args:
+ subset_name (str): The subset name.
+ instance_data (dict): The instance data.
+ staging_dir (str): The staging directory.
+ filename (str): The filename.
+ """
+
+ # create new instance from the csv file via self function
+ self._pass_data_to_csv_instance(
+ instance_data,
+ staging_dir,
+ filename
+ )
+
+ csv_instance = CreatedInstance(
+ self.product_type, subset_name, instance_data, self
+ )
+ self._store_new_instance(csv_instance)
+
+ csv_instance["csvFileData"] = {
+ "filename": filename,
+ "staging_dir": staging_dir,
+ }
+
+ # from special function get all data from csv file and convert them
+ # to new instances
+ csv_data_for_instances = self._get_data_from_csv(
+ staging_dir, filename)
+
+ # create instances from csv data via self function
+ self._create_instances_from_csv_data(
+ csv_data_for_instances, staging_dir
+ )
+
+ def _create_instances_from_csv_data(
+ self,
+ csv_data_for_instances,
+ staging_dir
+ ):
+ """Create instances from csv data"""
+
+ for folder_path, prepared_data in csv_data_for_instances.items():
+ project_name = self.create_context.get_current_project_name()
+ products = prepared_data["products"]
+
+ for instance_name, product_data in products.items():
+ # get important instance variables
+ task_name = product_data["task_name"]
+ task_type = product_data["task_type"]
+ variant = product_data["variant"]
+ product_type = product_data["product_type"]
+ version = product_data["version"]
+
+ # create subset/product name
+ product_name = get_product_name(
+ project_name,
+ task_name,
+ task_type,
+ self.host_name,
+ product_type,
+ variant
+ )
+
+ # make sure frame start/end is inherited from csv columns
+ # expected frame range data are handles excluded
+ for _, repre_data in product_data["representations"].items(): # noqa: E501
+ frame_start = repre_data["frameStart"]
+ frame_end = repre_data["frameEnd"]
+ handle_start = repre_data["handleStart"]
+ handle_end = repre_data["handleEnd"]
+ fps = repre_data["fps"]
+ break
+
+ # try to find any version comment in representation data
+ version_comment = next(
+ iter(
+ repre_data["comment"]
+ for repre_data in product_data["representations"].values() # noqa: E501
+ if repre_data["comment"]
+ ),
+ None
+ )
+
+ # try to find any slate switch in representation data
+ slate_exists = any(
+ repre_data["slate"]
+ for _, repre_data in product_data["representations"].items() # noqa: E501
+ )
+
+ # get representations from product data
+ representations = product_data["representations"]
+ label = f"{folder_path}_{product_name}_v{version:>03}"
+
+ families = ["csv_ingest"]
+ if slate_exists:
+ # adding slate to families mainly for loaders to be able
+ # to filter out slates
+ families.append("slate")
+
+ # make product data
+ product_data = {
+ "name": instance_name,
+ "folderPath": folder_path,
+ "families": families,
+ "label": label,
+ "task": task_name,
+ "variant": variant,
+ "source": "csv",
+ "frameStart": frame_start,
+ "frameEnd": frame_end,
+ "handleStart": handle_start,
+ "handleEnd": handle_end,
+ "fps": fps,
+ "version": version,
+ "comment": version_comment,
+ }
+
+ # create new instance
+ new_instance = CreatedInstance(
+ product_type, product_name, product_data, self
+ )
+ self._store_new_instance(new_instance)
+
+ if not new_instance.get("prepared_data_for_repres"):
+ new_instance["prepared_data_for_repres"] = []
+
+ base_thumbnail_repre_data = {
+ "name": "thumbnail",
+ "ext": None,
+ "files": None,
+ "stagingDir": None,
+ "stagingDir_persistent": True,
+ "tags": ["thumbnail", "delete"],
+ }
+ # need to populate all thumbnails for all representations
+ # so we can check if unique thumbnail per representation
+ # is needed
+ thumbnails = [
+ repre_data["thumbnailPath"]
+ for repre_data in representations.values()
+ if repre_data["thumbnailPath"]
+ ]
+ multiple_thumbnails = len(set(thumbnails)) > 1
+ explicit_output_name = None
+ thumbnails_processed = False
+ for filepath, repre_data in representations.items():
+ # check if any review derivate tag is present
+ reviewable = any(
+ tag for tag in repre_data.get("tags", [])
+ # tag can be `ftrackreview` or `review`
+ if "review" in tag
+ )
+ # since we need to populate multiple thumbnails as
+ # representation with outputName for (Ftrack instance
+ # integrator) pairing with reviewable video representations
+ if (
+ thumbnails
+ and multiple_thumbnails
+ and reviewable
+ ):
+ # multiple unique thumbnails per representation needs
+ # grouping by outputName
+ # mainly used in Ftrack instance integrator
+ explicit_output_name = repre_data["representationName"]
+ relative_thumbnail_path = repre_data["thumbnailPath"]
+ # representation might not have thumbnail path
+ # so ignore this one
+ if not relative_thumbnail_path:
+ continue
+ thumb_dir, thumb_file = \
+ self._get_refactor_thumbnail_path(
+ staging_dir, relative_thumbnail_path)
+ filename, ext = os.path.splitext(thumb_file)
+ thumbnail_repr_data = deepcopy(
+ base_thumbnail_repre_data)
+ thumbnail_repr_data.update({
+ "name": "thumbnail_{}".format(filename),
+ "ext": ext[1:],
+ "files": thumb_file,
+ "stagingDir": thumb_dir,
+ "outputName": explicit_output_name,
+ })
+ new_instance["prepared_data_for_repres"].append({
+ "type": "thumbnail",
+ "colorspace": None,
+ "representation": thumbnail_repr_data,
+ })
+ # also add thumbnailPath for ayon to integrate
+ if not new_instance.get("thumbnailPath"):
+ new_instance["thumbnailPath"] = (
+ os.path.join(thumb_dir, thumb_file)
+ )
+ elif (
+ thumbnails
+ and not multiple_thumbnails
+ and not thumbnails_processed
+ or not reviewable
+ ):
+ """
+ For case where we have only one thumbnail
+ and not reviewable medias. This needs to be processed
+ only once per instance.
+ """
+ if not thumbnails:
+ continue
+ # here we will use only one thumbnail for
+ # all representations
+ relative_thumbnail_path = repre_data["thumbnailPath"]
+ # popping last thumbnail from list since it is only one
+ # and we do not need to iterate again over it
+ if not relative_thumbnail_path:
+ relative_thumbnail_path = thumbnails.pop()
+ thumb_dir, thumb_file = \
+ self._get_refactor_thumbnail_path(
+ staging_dir, relative_thumbnail_path)
+ _, ext = os.path.splitext(thumb_file)
+ thumbnail_repr_data = deepcopy(
+ base_thumbnail_repre_data)
+ thumbnail_repr_data.update({
+ "ext": ext[1:],
+ "files": thumb_file,
+ "stagingDir": thumb_dir
+ })
+ new_instance["prepared_data_for_repres"].append({
+ "type": "thumbnail",
+ "colorspace": None,
+ "representation": thumbnail_repr_data,
+ })
+ # also add thumbnailPath for ayon to integrate
+ if not new_instance.get("thumbnailPath"):
+ new_instance["thumbnailPath"] = (
+ os.path.join(thumb_dir, thumb_file)
+ )
+
+ thumbnails_processed = True
+
+ # get representation data
+ representation_data = self._get_representation_data(
+ filepath, repre_data, staging_dir,
+ explicit_output_name
+ )
+
+ new_instance["prepared_data_for_repres"].append({
+ "type": "media",
+ "colorspace": repre_data["colorspace"],
+ "representation": representation_data,
+ })
+
+ def _get_refactor_thumbnail_path(
+ self, staging_dir, relative_thumbnail_path):
+ thumbnail_abs_path = os.path.join(
+ staging_dir, relative_thumbnail_path)
+ return os.path.split(
+ thumbnail_abs_path)
+
+ def _get_representation_data(
+ self, filepath, repre_data, staging_dir, explicit_output_name=None
+ ):
+ """Get representation data
+
+ Args:
+ filepath (str): Filepath to representation file.
+ repre_data (dict): Representation data from CSV file.
+ staging_dir (str): Staging directory.
+ explicit_output_name (Optional[str]): Explicit output name.
+ For grouping purposes with reviewable components.
+ Defaults to None.
+ """
+
+ # get extension of file
+ basename = os.path.basename(filepath)
+ extension = os.path.splitext(filepath)[-1].lower()
+
+ # validate filepath is having correct extension based on output
+ repre_name = repre_data["representationName"]
+ repre_config_data = None
+ for repre in self.representations_config["representations"]:
+ if repre["name"] == repre_name:
+ repre_config_data = repre
+ break
+
+ if not repre_config_data:
+ raise CreatorError(
+ f"Representation '{repre_name}' not found "
+ "in config representation data."
+ )
+
+ validate_extensions = repre_config_data["extensions"]
+ if extension not in validate_extensions:
+ raise CreatorError(
+ f"File extension '{extension}' not valid for "
+ f"output '{validate_extensions}'."
+ )
+
+ is_sequence = (extension in IMAGE_EXTENSIONS)
+ # convert ### string in file name to %03d
+ # this is for correct frame range validation
+ # example: file.###.exr -> file.%03d.exr
+ if "#" in basename:
+ padding = len(basename.split("#")) - 1
+ basename = basename.replace("#" * padding, f"%0{padding}d")
+ is_sequence = True
+
+ # make absolute path to file
+ absfilepath = os.path.normpath(os.path.join(staging_dir, filepath))
+ dirname = os.path.dirname(absfilepath)
+
+ # check if dirname exists
+ if not os.path.isdir(dirname):
+ raise CreatorError(
+ f"Directory '{dirname}' does not exist."
+ )
+
+ # collect all data from dirname
+ paths_for_collection = []
+ for file in os.listdir(dirname):
+ filepath = os.path.join(dirname, file)
+ paths_for_collection.append(filepath)
+
+ collections, _ = clique.assemble(paths_for_collection)
+
+ if collections:
+ collections = collections[0]
+ else:
+ if is_sequence:
+ raise CreatorError(
+ f"No collections found in directory '{dirname}'."
+ )
+
+ frame_start = None
+ frame_end = None
+ if is_sequence:
+ files = [os.path.basename(file) for file in collections]
+ frame_start = list(collections.indexes)[0]
+ frame_end = list(collections.indexes)[-1]
+ else:
+ files = basename
+
+ tags = deepcopy(repre_data["tags"])
+ # if slate in repre_data is True then remove one frame from start
+ if repre_data["slate"]:
+ tags.append("has_slate")
+
+ # get representation data
+ representation_data = {
+ "name": repre_name,
+ "ext": extension[1:],
+ "files": files,
+ "stagingDir": dirname,
+ "stagingDir_persistent": True,
+ "tags": tags,
+ }
+ if extension in VIDEO_EXTENSIONS:
+ representation_data.update({
+ "fps": repre_data["fps"],
+ "outputName": repre_name,
+ })
+
+ if explicit_output_name:
+ representation_data["outputName"] = explicit_output_name
+
+ if frame_start:
+ representation_data["frameStart"] = frame_start
+ if frame_end:
+ representation_data["frameEnd"] = frame_end
+
+ return representation_data
+
+ def _get_data_from_csv(
+ self, package_dir, filename
+ ):
+ """Generate instances from the csv file"""
+ # get current project name and code from context.data
+ project_name = self.create_context.get_current_project_name()
+
+ csv_file_path = os.path.join(
+ package_dir, filename
+ )
+
+ # make sure csv file contains columns from following list
+ required_columns = [
+ column["name"] for column in self.columns_config["columns"]
+ if column["required_column"]
+ ]
+
+ # read csv file
+ with open(csv_file_path, "r") as csv_file:
+ csv_content = csv_file.read()
+
+ # read csv file with DictReader
+ csv_reader = csv.DictReader(
+ StringIO(csv_content),
+ delimiter=self.columns_config["csv_delimiter"]
+ )
+
+ # fix fieldnames
+ # sometimes someone can keep extra space at the start or end of
+ # the column name
+ all_columns = [
+ " ".join(column.rsplit()) for column in csv_reader.fieldnames]
+
+ # return back fixed fieldnames
+ csv_reader.fieldnames = all_columns
+
+ # check if csv file contains all required columns
+ if any(column not in all_columns for column in required_columns):
+ raise CreatorError(
+ f"Missing required columns: {required_columns}"
+ )
+
+ csv_data = {}
+ # get data from csv file
+ for row in csv_reader:
+ # Get required columns first
+ # TODO: will need to be folder path in CSV
+ # TODO: `context_asset_name` is now `folder_path`
+ folder_path = self._get_row_value_with_validation(
+ "Folder Path", row)
+ task_name = self._get_row_value_with_validation(
+ "Task Name", row)
+ version = self._get_row_value_with_validation(
+ "Version", row)
+
+ # Get optional columns
+ variant = self._get_row_value_with_validation(
+ "Variant", row)
+ product_type = self._get_row_value_with_validation(
+ "Product Type", row)
+
+ pre_product_name = (
+ f"{task_name}{variant}{product_type}"
+ f"{version}".replace(" ", "").lower()
+ )
+
+ # get representation data
+ filename, representation_data = \
+ self._get_representation_row_data(row)
+
+ # TODO: batch query of all folder paths and task names
+
+ # get folder entity from folder path
+ folder_entity = get_folder_by_path(
+ project_name, folder_path)
+
+ # make sure asset exists
+ if not folder_entity:
+ raise CreatorError(
+ f"Asset '{folder_path}' not found."
+ )
+
+ # first get all tasks on the folder entity and then find
+ task_entity = get_task_by_name(
+ project_name, folder_entity["id"], task_name)
+
+ # check if task name is valid task in asset doc
+ if not task_entity:
+ raise CreatorError(
+ f"Task '{task_name}' not found in asset doc."
+ )
+
+ # get all csv data into one dict and make sure there are no
+ # duplicates data are already validated and sorted under
+ # correct existing asset also check if asset exists and if
+ # task name is valid task in asset doc and representations
+ # are distributed under products following variants
+ if folder_path not in csv_data:
+ csv_data[folder_path] = {
+ "folder_entity": folder_entity,
+ "products": {
+ pre_product_name: {
+ "task_name": task_name,
+ "task_type": task_entity["taskType"],
+ "variant": variant,
+ "product_type": product_type,
+ "version": version,
+ "representations": {
+ filename: representation_data,
+ },
+ }
+ }
+ }
+ else:
+ csv_products = csv_data[folder_path]["products"]
+ if pre_product_name not in csv_products:
+ csv_products[pre_product_name] = {
+ "task_name": task_name,
+ "task_type": task_entity["taskType"],
+ "variant": variant,
+ "product_type": product_type,
+ "version": version,
+ "representations": {
+ filename: representation_data,
+ },
+ }
+ else:
+ csv_representations = \
+ csv_products[pre_product_name]["representations"]
+ if filename in csv_representations:
+ raise CreatorError(
+ f"Duplicate filename '{filename}' in csv file."
+ )
+ csv_representations[filename] = representation_data
+
+ return csv_data
+
+ def _get_representation_row_data(self, row_data):
+ """Get representation row data"""
+ # Get required columns first
+ file_path = self._get_row_value_with_validation(
+ "File Path", row_data)
+ frame_start = self._get_row_value_with_validation(
+ "Frame Start", row_data)
+ frame_end = self._get_row_value_with_validation(
+ "Frame End", row_data)
+ handle_start = self._get_row_value_with_validation(
+ "Handle Start", row_data)
+ handle_end = self._get_row_value_with_validation(
+ "Handle End", row_data)
+ fps = self._get_row_value_with_validation(
+ "FPS", row_data)
+
+ # Get optional columns
+ thumbnail_path = self._get_row_value_with_validation(
+ "Version Thumbnail", row_data)
+ colorspace = self._get_row_value_with_validation(
+ "Representation Colorspace", row_data)
+ comment = self._get_row_value_with_validation(
+ "Version Comment", row_data)
+ repre = self._get_row_value_with_validation(
+ "Representation", row_data)
+ slate_exists = self._get_row_value_with_validation(
+ "Slate Exists", row_data)
+ repre_tags = self._get_row_value_with_validation(
+ "Representation Tags", row_data)
+
+ # convert tags value to list
+ tags_list = copy(self.representations_config["default_tags"])
+ if repre_tags:
+ tags_list = []
+ tags_delimiter = self.representations_config["tags_delimiter"]
+ # strip spaces from repre_tags
+ if tags_delimiter in repre_tags:
+ tags = repre_tags.split(tags_delimiter)
+ for _tag in tags:
+ tags_list.append(("".join(_tag.strip())).lower())
+ else:
+ tags_list.append(repre_tags)
+
+ representation_data = {
+ "colorspace": colorspace,
+ "comment": comment,
+ "representationName": repre,
+ "slate": slate_exists,
+ "tags": tags_list,
+ "thumbnailPath": thumbnail_path,
+ "frameStart": int(frame_start),
+ "frameEnd": int(frame_end),
+ "handleStart": int(handle_start),
+ "handleEnd": int(handle_end),
+ "fps": float(fps),
+ }
+ return file_path, representation_data
+
+ def _get_row_value_with_validation(
+ self, column_name, row_data, default_value=None
+ ):
+ """Get row value with validation"""
+
+ # get column data from column config
+ column_data = None
+ for column in self.columns_config["columns"]:
+ if column["name"] == column_name:
+ column_data = column
+ break
+
+ if not column_data:
+ raise CreatorError(
+ f"Column '{column_name}' not found in column config."
+ )
+
+ # get column value from row
+ column_value = row_data.get(column_name)
+ column_required = column_data["required_column"]
+
+ # check if column value is not empty string and column is required
+ if column_value == "" and column_required:
+ raise CreatorError(
+ f"Value in column '{column_name}' is required."
+ )
+
+ # get column type
+ column_type = column_data["type"]
+ # get column validation regex
+ column_validation = column_data["validation_pattern"]
+ # get column default value
+ column_default = default_value or column_data["default"]
+
+ if column_type in ["number", "decimal"] and column_default == 0:
+ column_default = None
+
+ # check if column value is not empty string
+ if column_value == "":
+ # set default value if column value is empty string
+ column_value = column_default
+
+ # set column value to correct type following column type
+ if column_type == "number" and column_value is not None:
+ column_value = int(column_value)
+ elif column_type == "decimal" and column_value is not None:
+ column_value = float(column_value)
+ elif column_type == "bool":
+ column_value = column_value in ["true", "True"]
+
+ # check if column value matches validation regex
+ if (
+ column_value is not None and
+ not re.match(str(column_validation), str(column_value))
+ ):
+ raise CreatorError(
+ f"Column '{column_name}' value '{column_value}' "
+ f"does not match validation regex '{column_validation}' \n"
+ f"Row data: {row_data} \n"
+ f"Column data: {column_data}"
+ )
+
+ return column_value
+
+ def _pass_data_to_csv_instance(
+ self, instance_data, staging_dir, filename
+ ):
+ """Pass CSV representation file to instance data"""
+
+ representation = {
+ "name": "csv",
+ "ext": "csv",
+ "files": filename,
+ "stagingDir": staging_dir,
+ "stagingDir_persistent": True,
+ }
+
+ instance_data.update({
+ "label": f"CSV: {filename}",
+ "representations": [representation],
+ "stagingDir": staging_dir,
+ "stagingDir_persistent": True,
+ })
+
+ def get_instance_attr_defs(self):
+ return [
+ BoolDef(
+ "add_review_family",
+ default=True,
+ label="Review"
+ )
+ ]
+
+ def get_pre_create_attr_defs(self):
+ """Creating pre-create attributes at creator plugin.
+
+ Returns:
+ list: list of attribute object instances
+ """
+ # Use same attributes as for instance attributes
+ attr_defs = [
+ FileDef(
+ "csv_filepath_data",
+ folders=False,
+ extensions=[".csv"],
+ allow_sequences=False,
+ single_item=True,
+ label="CSV File",
+ ),
+ ]
+ return attr_defs
diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_csv_ingest_instance_data.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_csv_ingest_instance_data.py
new file mode 100644
index 0000000000..33536d0854
--- /dev/null
+++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_csv_ingest_instance_data.py
@@ -0,0 +1,47 @@
+from pprint import pformat
+import pyblish.api
+from ayon_core.pipeline import publish
+
+
+class CollectCSVIngestInstancesData(
+ pyblish.api.InstancePlugin,
+ publish.AYONPyblishPluginMixin,
+ publish.ColormanagedPyblishPluginMixin
+):
+ """Collect CSV Ingest data from instance.
+ """
+
+ label = "Collect CSV Ingest instances data"
+ order = pyblish.api.CollectorOrder + 0.1
+ hosts = ["traypublisher"]
+ families = ["csv_ingest"]
+
+ def process(self, instance):
+
+ # expecting [(colorspace, repre_data), ...]
+ prepared_repres_data_items = instance.data[
+ "prepared_data_for_repres"]
+
+ for prep_repre_data in prepared_repres_data_items:
+ type = prep_repre_data["type"]
+ colorspace = prep_repre_data["colorspace"]
+ repre_data = prep_repre_data["representation"]
+
+ # thumbnails should be skipped
+ if type == "media":
+ # colorspace name is passed from CSV column
+ self.set_representation_colorspace(
+ repre_data, instance.context, colorspace
+ )
+ elif type == "media" and colorspace is None:
+ # TODO: implement colorspace file rules file parsing
+ self.log.warning(
+ "Colorspace is not defined in csv for following"
+ f" representation: {pformat(repre_data)}"
+ )
+ pass
+ elif type == "thumbnail":
+ # thumbnails should be skipped
+ pass
+
+ instance.data["representations"].append(repre_data)
diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/extract_csv_file.py b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_csv_file.py
new file mode 100644
index 0000000000..4bdf7c0493
--- /dev/null
+++ b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_csv_file.py
@@ -0,0 +1,31 @@
+import pyblish.api
+
+from ayon_core.pipeline import publish
+
+
+class ExtractCSVFile(publish.Extractor):
+ """
+ Extractor export CSV file
+ """
+
+ label = "Extract CSV file"
+ order = pyblish.api.ExtractorOrder - 0.45
+ families = ["csv_ingest_file"]
+ hosts = ["traypublisher"]
+
+ def process(self, instance):
+
+ csv_file_data = instance.data["csvFileData"]
+
+ representation_csv = {
+ 'name': "csv_data",
+ 'ext': "csv",
+ 'files': csv_file_data["filename"],
+ "stagingDir": csv_file_data["staging_dir"],
+ "stagingDir_persistent": True
+ }
+
+ instance.data["representations"].append(representation_csv)
+
+ self.log.info("Added CSV file representation: {}".format(
+ representation_csv))
diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py
index 3a62536507..0b4f8e16c1 100644
--- a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py
+++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py
@@ -16,6 +16,7 @@ class ValidateExistingVersion(
order = ValidateContentsOrder
hosts = ["traypublisher"]
+ targets = ["local"]
actions = [RepairAction]
diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py
index 4f11571efe..13f13b05bb 100644
--- a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py
+++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py
@@ -16,6 +16,8 @@ class ValidateFrameRange(OptionalPyblishPluginMixin,
label = "Validate Frame Range"
hosts = ["traypublisher"]
families = ["render", "plate"]
+ targets = ["local"]
+
order = ValidateContentsOrder
optional = True
diff --git a/client/ayon_core/hosts/unreal/ue_workers.py b/client/ayon_core/hosts/unreal/ue_workers.py
index e3f8729c2e..256c0557be 100644
--- a/client/ayon_core/hosts/unreal/ue_workers.py
+++ b/client/ayon_core/hosts/unreal/ue_workers.py
@@ -260,11 +260,11 @@ class UEProjectGenerationWorker(UEWorker):
self.failed.emit(msg, return_code)
raise RuntimeError(msg)
- # ensure we have PySide2 installed in engine
+ # ensure we have PySide2/6 installed in engine
self.progress.emit(0)
self.stage_begin.emit(
- (f"Checking PySide2 installation... {stage_count} "
+ (f"Checking Qt bindings installation... {stage_count} "
f" out of {stage_count}"))
python_path = None
if platform.system().lower() == "windows":
@@ -287,11 +287,30 @@ class UEProjectGenerationWorker(UEWorker):
msg = f"Unreal Python not found at {python_path}"
self.failed.emit(msg, 1)
raise RuntimeError(msg)
- pyside_cmd = [python_path.as_posix(),
- "-m",
- "pip",
- "install",
- "pyside2"]
+
+ pyside_version = "PySide2"
+ ue_version = self.ue_version.split(".")
+ if int(ue_version[0]) == 5 and int(ue_version[1]) >= 4:
+ # Use PySide6 6.6.3 because 6.7.0 had a bug
+ # - 'QPushButton' can't be added to 'QBoxLayout'
+ pyside_version = "PySide6==6.6.3"
+
+ site_packages_prefix = python_path.parent.as_posix()
+
+ pyside_cmd = [
+ python_path.as_posix(),
+ "-m", "pip",
+ "install",
+ "--ignore-installed",
+ pyside_version,
+
+ ]
+
+ if platform.system().lower() == "windows":
+ pyside_cmd += ["--target", site_packages_prefix]
+
+ print(f"--- Installing {pyside_version} ...")
+ print(" ".join(pyside_cmd))
pyside_install = subprocess.Popen(pyside_cmd,
stdout=subprocess.PIPE,
@@ -306,8 +325,8 @@ class UEProjectGenerationWorker(UEWorker):
return_code = pyside_install.wait()
if return_code and return_code != 0:
- msg = ("Failed to create the project! "
- "The installation of PySide2 has failed!")
+ msg = (f"Failed to create the project! {return_code} "
+ f"The installation of {pyside_version} has failed!: {pyside_install}")
self.failed.emit(msg, return_code)
raise RuntimeError(msg)
diff --git a/client/ayon_core/lib/__init__.py b/client/ayon_core/lib/__init__.py
index 408262ca42..e436396c6c 100644
--- a/client/ayon_core/lib/__init__.py
+++ b/client/ayon_core/lib/__init__.py
@@ -27,6 +27,10 @@ from .local_settings import (
get_openpype_username,
)
from .ayon_connection import initialize_ayon_connection
+from .cache import (
+ CacheItem,
+ NestedCacheItem,
+)
from .events import (
emit_event,
register_event_callback
@@ -157,6 +161,9 @@ __all__ = [
"initialize_ayon_connection",
+ "CacheItem",
+ "NestedCacheItem",
+
"emit_event",
"register_event_callback",
diff --git a/client/ayon_core/lib/cache.py b/client/ayon_core/lib/cache.py
new file mode 100644
index 0000000000..dc83520f76
--- /dev/null
+++ b/client/ayon_core/lib/cache.py
@@ -0,0 +1,250 @@
+import time
+import collections
+
+InitInfo = collections.namedtuple(
+ "InitInfo",
+ ["default_factory", "lifetime"]
+)
+
+
+def _default_factory_func():
+ return None
+
+
+class CacheItem:
+ """Simple cache item with lifetime and default factory for default value.
+
+ Default factory should return default value that is used on init
+ and on reset.
+
+ Args:
+ default_factory (Optional[callable]): Function that returns default
+ value used on init and on reset.
+ lifetime (Optional[int]): Lifetime of the cache data in seconds.
+ Default lifetime is 120 seconds.
+
+ """
+ def __init__(self, default_factory=None, lifetime=None):
+ if lifetime is None:
+ lifetime = 120
+ self._lifetime = lifetime
+ self._last_update = None
+ if default_factory is None:
+ default_factory = _default_factory_func
+ self._default_factory = default_factory
+ self._data = default_factory()
+
+ @property
+ def is_valid(self):
+ """Is cache valid to use.
+
+ Return:
+ bool: True if cache is valid, False otherwise.
+
+ """
+ if self._last_update is None:
+ return False
+
+ return (time.time() - self._last_update) < self._lifetime
+
+ def set_lifetime(self, lifetime):
+ """Change lifetime of cache item.
+
+ Args:
+ lifetime (int): Lifetime of the cache data in seconds.
+ """
+
+ self._lifetime = lifetime
+
+ def set_invalid(self):
+ """Set cache as invalid."""
+
+ self._last_update = None
+
+ def reset(self):
+ """Set cache as invalid and reset data."""
+
+ self._last_update = None
+ self._data = self._default_factory()
+
+ def get_data(self):
+ """Receive cached data.
+
+ Returns:
+ Any: Any data that are cached.
+
+ """
+ return self._data
+
+ def update_data(self, data):
+ """Update cache data.
+
+ Args:
+ data (Any): Any data that are cached.
+
+ """
+ self._data = data
+ self._last_update = time.time()
+
+
+class NestedCacheItem:
+ """Helper for cached items stored in nested structure.
+
+ Example:
+ >>> cache = NestedCacheItem(levels=2, default_factory=lambda: 0)
+ >>> cache["a"]["b"].is_valid
+ False
+ >>> cache["a"]["b"].get_data()
+ 0
+ >>> cache["a"]["b"] = 1
+ >>> cache["a"]["b"].is_valid
+ True
+ >>> cache["a"]["b"].get_data()
+ 1
+ >>> cache.reset()
+ >>> cache["a"]["b"].is_valid
+ False
+
+ Args:
+ levels (int): Number of nested levels where read cache is stored.
+ default_factory (Optional[callable]): Function that returns default
+ value used on init and on reset.
+ lifetime (Optional[int]): Lifetime of the cache data in seconds.
+ Default value is based on default value of 'CacheItem'.
+ _init_info (Optional[InitInfo]): Private argument. Init info for
+ nested cache where created from parent item.
+
+ """
+ def __init__(
+ self, levels=1, default_factory=None, lifetime=None, _init_info=None
+ ):
+ if levels < 1:
+ raise ValueError("Nested levels must be greater than 0")
+ self._data_by_key = {}
+ if _init_info is None:
+ _init_info = InitInfo(default_factory, lifetime)
+ self._init_info = _init_info
+ self._levels = levels
+
+ def __getitem__(self, key):
+ """Get cached data.
+
+ Args:
+ key (str): Key of the cache item.
+
+ Returns:
+ Union[NestedCacheItem, CacheItem]: Cache item.
+
+ """
+ cache = self._data_by_key.get(key)
+ if cache is None:
+ if self._levels > 1:
+ cache = NestedCacheItem(
+ levels=self._levels - 1,
+ _init_info=self._init_info
+ )
+ else:
+ cache = CacheItem(
+ self._init_info.default_factory,
+ self._init_info.lifetime
+ )
+ self._data_by_key[key] = cache
+ return cache
+
+ def __setitem__(self, key, value):
+ """Update cached data.
+
+ Args:
+ key (str): Key of the cache item.
+ value (Any): Any data that are cached.
+
+ """
+ if self._levels > 1:
+ raise AttributeError((
+ "{} does not support '__setitem__'. Lower nested level by {}"
+ ).format(self.__class__.__name__, self._levels - 1))
+ cache = self[key]
+ cache.update_data(value)
+
+ def get(self, key):
+ """Get cached data.
+
+ Args:
+ key (str): Key of the cache item.
+
+ Returns:
+ Union[NestedCacheItem, CacheItem]: Cache item.
+
+ """
+ return self[key]
+
+ def cached_count(self):
+ """Amount of cached items.
+
+ Returns:
+ int: Amount of cached items.
+
+ """
+ return len(self._data_by_key)
+
+ def clear_key(self, key):
+ """Clear cached item by key.
+
+ Args:
+ key (str): Key of the cache item.
+
+ """
+ self._data_by_key.pop(key, None)
+
+ def clear_invalid(self):
+ """Clear all invalid cache items.
+
+ Note:
+ To clear all cache items use 'reset'.
+
+ """
+ changed = {}
+ children_are_nested = self._levels > 1
+ for key, cache in tuple(self._data_by_key.items()):
+ if children_are_nested:
+ output = cache.clear_invalid()
+ if output:
+ changed[key] = output
+ if not cache.cached_count():
+ self._data_by_key.pop(key)
+ elif not cache.is_valid:
+ changed[key] = cache.get_data()
+ self._data_by_key.pop(key)
+ return changed
+
+ def reset(self):
+ """Reset cache.
+
+ Note:
+ To clear only invalid cache items use 'clear_invalid'.
+
+ """
+ self._data_by_key = {}
+
+ def set_lifetime(self, lifetime):
+ """Change lifetime of all children cache items.
+
+ Args:
+ lifetime (int): Lifetime of the cache data in seconds.
+
+ """
+ self._init_info.lifetime = lifetime
+ for cache in self._data_by_key.values():
+ cache.set_lifetime(lifetime)
+
+ @property
+ def is_valid(self):
+ """Raise reasonable error when called on wrong level.
+
+ Raises:
+ AttributeError: If called on nested cache item.
+
+ """
+ raise AttributeError((
+ "{} does not support 'is_valid'. Lower nested level by '{}'"
+ ).format(self.__class__.__name__, self._levels))
diff --git a/client/ayon_core/pipeline/__init__.py b/client/ayon_core/pipeline/__init__.py
index d1a181a353..8fd00ee6b6 100644
--- a/client/ayon_core/pipeline/__init__.py
+++ b/client/ayon_core/pipeline/__init__.py
@@ -97,6 +97,15 @@ from .context_tools import (
get_current_folder_path,
get_current_task_name
)
+
+from .workfile import (
+ discover_workfile_build_plugins,
+ register_workfile_build_plugin,
+ deregister_workfile_build_plugin,
+ register_workfile_build_plugin_path,
+ deregister_workfile_build_plugin_path,
+)
+
install = install_host
uninstall = uninstall_host
@@ -198,6 +207,13 @@ __all__ = (
"get_current_folder_path",
"get_current_task_name",
+ # Workfile templates
+ "discover_workfile_build_plugins",
+ "register_workfile_build_plugin",
+ "deregister_workfile_build_plugin",
+ "register_workfile_build_plugin_path",
+ "deregister_workfile_build_plugin_path",
+
# Backwards compatible function names
"install",
"uninstall",
diff --git a/client/ayon_core/pipeline/anatomy/anatomy.py b/client/ayon_core/pipeline/anatomy/anatomy.py
index 2aa8eeddbc..98bbaa9bdc 100644
--- a/client/ayon_core/pipeline/anatomy/anatomy.py
+++ b/client/ayon_core/pipeline/anatomy/anatomy.py
@@ -3,11 +3,16 @@ import re
import copy
import platform
import collections
-import time
import ayon_api
-from ayon_core.lib import Logger, get_local_site_id, StringTemplate
+from ayon_core.lib import (
+ Logger,
+ get_local_site_id,
+ StringTemplate,
+ CacheItem,
+ NestedCacheItem,
+)
from ayon_core.addon import AddonsManager
from .exceptions import RootCombinationError, ProjectNotSet
@@ -397,62 +402,11 @@ class BaseAnatomy(object):
)
-class CacheItem:
- """Helper to cache data.
-
- Helper does not handle refresh of data and does not mark data as outdated.
- Who uses the object should check of outdated state on his own will.
- """
-
- default_lifetime = 10
-
- def __init__(self, lifetime=None):
- self._data = None
- self._cached = None
- self._lifetime = lifetime or self.default_lifetime
-
- @property
- def data(self):
- """Cached data/object.
-
- Returns:
- Any: Whatever was cached.
- """
-
- return self._data
-
- @property
- def is_outdated(self):
- """Item has outdated cache.
-
- Lifetime of cache item expired or was not yet set.
-
- Returns:
- bool: Item is outdated.
- """
-
- if self._cached is None:
- return True
- return (time.time() - self._cached) > self._lifetime
-
- def update_data(self, data):
- """Update cache of data.
-
- Args:
- data (Any): Data to cache.
- """
-
- self._data = data
- self._cached = time.time()
-
-
class Anatomy(BaseAnatomy):
- _sitesync_addon_cache = CacheItem()
- _project_cache = collections.defaultdict(CacheItem)
- _default_site_id_cache = collections.defaultdict(CacheItem)
- _root_overrides_cache = collections.defaultdict(
- lambda: collections.defaultdict(CacheItem)
- )
+ _project_cache = NestedCacheItem(lifetime=10)
+ _sitesync_addon_cache = CacheItem(lifetime=60)
+ _default_site_id_cache = NestedCacheItem(lifetime=60)
+ _root_overrides_cache = NestedCacheItem(2, lifetime=60)
def __init__(
self, project_name=None, site_name=None, project_entity=None
@@ -477,18 +431,18 @@ class Anatomy(BaseAnatomy):
@classmethod
def get_project_entity_from_cache(cls, project_name):
project_cache = cls._project_cache[project_name]
- if project_cache.is_outdated:
+ if not project_cache.is_valid:
project_cache.update_data(ayon_api.get_project(project_name))
- return copy.deepcopy(project_cache.data)
+ return copy.deepcopy(project_cache.get_data())
@classmethod
def get_sitesync_addon(cls):
- if cls._sitesync_addon_cache.is_outdated:
+ if not cls._sitesync_addon_cache.is_valid:
manager = AddonsManager()
cls._sitesync_addon_cache.update_data(
manager.get_enabled_addon("sitesync")
)
- return cls._sitesync_addon_cache.data
+ return cls._sitesync_addon_cache.get_data()
@classmethod
def _get_studio_roots_overrides(cls, project_name):
@@ -533,14 +487,14 @@ class Anatomy(BaseAnatomy):
elif not site_name:
# Use sync server to receive active site name
project_cache = cls._default_site_id_cache[project_name]
- if project_cache.is_outdated:
+ if not project_cache.is_valid:
project_cache.update_data(
sitesync_addon.get_active_site_type(project_name)
)
- site_name = project_cache.data
+ site_name = project_cache.get_data()
site_cache = cls._root_overrides_cache[project_name][site_name]
- if site_cache.is_outdated:
+ if not site_cache.is_valid:
if site_name == "studio":
# Handle studio root overrides without sync server
# - studio root overrides can be done even without sync server
@@ -553,4 +507,4 @@ class Anatomy(BaseAnatomy):
project_name, site_name
)
site_cache.update_data(roots_overrides)
- return site_cache.data
+ return site_cache.get_data()
diff --git a/client/ayon_core/pipeline/create/context.py b/client/ayon_core/pipeline/create/context.py
index ca9896fb3f..b8618738fb 100644
--- a/client/ayon_core/pipeline/create/context.py
+++ b/client/ayon_core/pipeline/create/context.py
@@ -1790,10 +1790,10 @@ class CreateContext:
creator_identifier = creator_class.identifier
if creator_identifier in creators:
- self.log.warning((
- "Duplicated Creator identifier. "
- "Using first and skipping following"
- ))
+ self.log.warning(
+ "Duplicate Creator identifier: '%s'. Using first Creator "
+ "and skipping: %s", creator_identifier, creator_class
+ )
continue
# Filter by host name
diff --git a/client/ayon_core/pipeline/farm/pyblish_functions.py b/client/ayon_core/pipeline/farm/pyblish_functions.py
index eb6f8569d9..72deee185e 100644
--- a/client/ayon_core/pipeline/farm/pyblish_functions.py
+++ b/client/ayon_core/pipeline/farm/pyblish_functions.py
@@ -225,6 +225,7 @@ def create_skeleton_instance(
instance_skeleton_data = {
"productType": product_type,
"productName": data["productName"],
+ "task": data["task"],
"families": families,
"folderPath": data["folderPath"],
"frameStart": time_data.start,
diff --git a/client/ayon_core/pipeline/workfile/__init__.py b/client/ayon_core/pipeline/workfile/__init__.py
index 36766e3a04..05f939024c 100644
--- a/client/ayon_core/pipeline/workfile/__init__.py
+++ b/client/ayon_core/pipeline/workfile/__init__.py
@@ -21,6 +21,15 @@ from .utils import (
from .build_workfile import BuildWorkfile
+from .workfile_template_builder import (
+ discover_workfile_build_plugins,
+ register_workfile_build_plugin,
+ deregister_workfile_build_plugin,
+ register_workfile_build_plugin_path,
+ deregister_workfile_build_plugin_path,
+)
+
+
__all__ = (
"get_workfile_template_key_from_context",
"get_workfile_template_key",
@@ -39,4 +48,10 @@ __all__ = (
"should_open_workfiles_tool_on_launch",
"BuildWorkfile",
+
+ "discover_workfile_build_plugins",
+ "register_workfile_build_plugin",
+ "deregister_workfile_build_plugin",
+ "register_workfile_build_plugin_path",
+ "deregister_workfile_build_plugin_path",
)
diff --git a/client/ayon_core/pipeline/workfile/workfile_template_builder.py b/client/ayon_core/pipeline/workfile/workfile_template_builder.py
index 5e63ba444a..bb94d87483 100644
--- a/client/ayon_core/pipeline/workfile/workfile_template_builder.py
+++ b/client/ayon_core/pipeline/workfile/workfile_template_builder.py
@@ -36,6 +36,7 @@ from ayon_core.lib import (
filter_profiles,
attribute_definitions,
)
+from ayon_core.lib.events import EventSystem, EventCallback, Event
from ayon_core.lib.attribute_definitions import get_attributes_keys
from ayon_core.pipeline import Anatomy
from ayon_core.pipeline.load import (
@@ -43,6 +44,13 @@ from ayon_core.pipeline.load import (
get_representation_contexts,
load_with_repre_context,
)
+from ayon_core.pipeline.plugin_discover import (
+ discover,
+ register_plugin,
+ register_plugin_path,
+ deregister_plugin,
+ deregister_plugin_path
+)
from ayon_core.pipeline.create import (
discover_legacy_creator_plugins,
@@ -124,6 +132,8 @@ class AbstractTemplateBuilder(object):
self._current_task_entity = _NOT_SET
self._linked_folder_entities = _NOT_SET
+ self._event_system = EventSystem()
+
@property
def project_name(self):
if isinstance(self._host, HostBase):
@@ -211,10 +221,14 @@ class AbstractTemplateBuilder(object):
Returns:
List[PlaceholderPlugin]: Plugin classes available for host.
"""
+ plugins = []
+ # Backwards compatibility
if hasattr(self._host, "get_workfile_build_placeholder_plugins"):
return self._host.get_workfile_build_placeholder_plugins()
- return []
+
+ plugins.extend(discover(PlaceholderPlugin))
+ return plugins
@property
def host(self):
@@ -257,6 +271,8 @@ class AbstractTemplateBuilder(object):
self._project_settings = None
+ self._event_system = EventSystem()
+
self.clear_shared_data()
self.clear_shared_populate_data()
@@ -329,7 +345,7 @@ class AbstractTemplateBuilder(object):
is good practice to check if the same value is not already stored under
different key or if the key is not already used for something else.
- Key should be self explanatory to content.
+ Key should be self-explanatory to content.
- wrong: 'folder'
- good: 'folder_name'
@@ -375,7 +391,7 @@ class AbstractTemplateBuilder(object):
is good practice to check if the same value is not already stored under
different key or if the key is not already used for something else.
- Key should be self explanatory to content.
+ Key should be self-explanatory to content.
- wrong: 'folder'
- good: 'folder_path'
@@ -395,7 +411,7 @@ class AbstractTemplateBuilder(object):
is good practice to check if the same value is not already stored under
different key or if the key is not already used for something else.
- Key should be self explanatory to content.
+ Key should be self-explanatory to content.
- wrong: 'folder'
- good: 'folder_path'
@@ -466,7 +482,7 @@ class AbstractTemplateBuilder(object):
return list(sorted(
placeholders,
- key=lambda i: i.order
+ key=lambda placeholder: placeholder.order
))
def build_template(
@@ -498,15 +514,21 @@ class AbstractTemplateBuilder(object):
process if version is created
"""
- template_preset = self.get_template_preset()
-
- if template_path is None:
- template_path = template_preset["path"]
-
- if keep_placeholders is None:
- keep_placeholders = template_preset["keep_placeholder"]
- if create_first_version is None:
- create_first_version = template_preset["create_first_version"]
+ if any(
+ value is None
+ for value in [
+ template_path,
+ keep_placeholders,
+ create_first_version,
+ ]
+ ):
+ template_preset = self.get_template_preset()
+ if template_path is None:
+ template_path = template_preset["path"]
+ if keep_placeholders is None:
+ keep_placeholders = template_preset["keep_placeholder"]
+ if create_first_version is None:
+ create_first_version = template_preset["create_first_version"]
# check if first version is created
created_version_workfile = False
@@ -685,7 +707,7 @@ class AbstractTemplateBuilder(object):
for placeholder in placeholders
}
all_processed = len(placeholders) == 0
- # Counter is checked at the ned of a loop so the loop happens at least
+ # Counter is checked at the end of a loop so the loop happens at least
# once.
iter_counter = 0
while not all_processed:
@@ -729,6 +751,16 @@ class AbstractTemplateBuilder(object):
placeholder.set_finished()
+ # Trigger on_depth_processed event
+ self.emit_event(
+ topic="template.depth_processed",
+ data={
+ "depth": iter_counter,
+ "placeholders_by_scene_id": placeholder_by_scene_id
+ },
+ source="builder"
+ )
+
# Clear shared data before getting new placeholders
self.clear_shared_populate_data()
@@ -747,6 +779,16 @@ class AbstractTemplateBuilder(object):
placeholder_by_scene_id[identifier] = placeholder
placeholders.append(placeholder)
+ # Trigger on_finished event
+ self.emit_event(
+ topic="template.finished",
+ data={
+ "depth": iter_counter,
+ "placeholders_by_scene_id": placeholder_by_scene_id,
+ },
+ source="builder"
+ )
+
self.refresh()
def _get_build_profiles(self):
@@ -772,12 +814,14 @@ class AbstractTemplateBuilder(object):
- 'project_settings/{host name}/templated_workfile_build/profiles'
Returns:
- str: Path to a template file with placeholders.
+ dict: Dictionary with `path`, `keep_placeholder` and
+ `create_first_version` settings from the template preset
+ for current context.
Raises:
TemplateProfileNotFound: When profiles are not filled.
TemplateLoadFailed: Profile was found but path is not set.
- TemplateNotFound: Path was set but file does not exists.
+ TemplateNotFound: Path was set but file does not exist.
"""
host_name = self.host_name
@@ -872,6 +916,30 @@ class AbstractTemplateBuilder(object):
"create_first_version": create_first_version
}
+ def emit_event(self, topic, data=None, source=None) -> Event:
+ return self._event_system.emit(topic, data, source)
+
+ def add_event_callback(self, topic, callback, order=None):
+ return self._event_system.add_callback(topic, callback, order=order)
+
+ def add_on_finished_callback(
+ self, callback, order=None
+ ) -> EventCallback:
+ return self.add_event_callback(
+ topic="template.finished",
+ callback=callback,
+ order=order
+ )
+
+ def add_on_depth_processed_callback(
+ self, callback, order=None
+ ) -> EventCallback:
+ return self.add_event_callback(
+ topic="template.depth_processed",
+ callback=callback,
+ order=order
+ )
+
@six.add_metaclass(ABCMeta)
class PlaceholderPlugin(object):
@@ -1045,7 +1113,7 @@ class PlaceholderPlugin(object):
Using shared data from builder but stored under plugin identifier.
- Key should be self explanatory to content.
+ Key should be self-explanatory to content.
- wrong: 'folder'
- good: 'folder_path'
@@ -1085,7 +1153,7 @@ class PlaceholderPlugin(object):
Using shared data from builder but stored under plugin identifier.
- Key should be self explanatory to content.
+ Key should be self-explanatory to content.
- wrong: 'folder'
- good: 'folder_path'
@@ -1107,10 +1175,10 @@ class PlaceholderItem(object):
"""Item representing single item in scene that is a placeholder to process.
Items are always created and updated by their plugins. Each plugin can use
- modified class of 'PlacehoderItem' but only to add more options instead of
+ modified class of 'PlaceholderItem' but only to add more options instead of
new other.
- Scene identifier is used to avoid processing of the palceholder item
+ Scene identifier is used to avoid processing of the placeholder item
multiple times so must be unique across whole workfile builder.
Args:
@@ -1162,7 +1230,7 @@ class PlaceholderItem(object):
"""Placeholder data which can modify how placeholder is processed.
Possible general keys
- - order: Can define the order in which is palceholder processed.
+ - order: Can define the order in which is placeholder processed.
Lower == earlier.
Other keys are defined by placeholder and should validate them on item
@@ -1264,11 +1332,9 @@ class PlaceholderLoadMixin(object):
"""Unified attribute definitions for load placeholder.
Common function for placeholder plugins used for loading of
- repsentations. Use it in 'get_placeholder_options'.
+ representations. Use it in 'get_placeholder_options'.
Args:
- plugin (PlaceholderPlugin): Plugin used for loading of
- representations.
options (Dict[str, Any]): Already available options which are used
as defaults for attributes.
@@ -1468,7 +1534,9 @@ class PlaceholderLoadMixin(object):
product_name_regex = None
if product_name_regex_value:
product_name_regex = re.compile(product_name_regex_value)
- product_type = placeholder.data["family"]
+ product_type = placeholder.data.get("product_type")
+ if product_type is None:
+ product_type = placeholder.data["family"]
builder_type = placeholder.data["builder_type"]
folder_ids = []
@@ -1529,35 +1597,22 @@ class PlaceholderLoadMixin(object):
pass
- def _reduce_last_version_repre_entities(self, representations):
- """Reduce representations to last verison."""
+ def _reduce_last_version_repre_entities(self, repre_contexts):
+ """Reduce representations to last version."""
- mapping = {}
- # TODO use representation context with entities
- # - using 'folder', 'subset' and 'version' from context on
- # representation is danger
- for repre_entity in representations:
- repre_context = repre_entity["context"]
-
- folder_name = repre_context["asset"]
- product_name = repre_context["subset"]
- version = repre_context.get("version", -1)
-
- if folder_name not in mapping:
- mapping[folder_name] = {}
-
- product_mapping = mapping[folder_name]
- if product_name not in product_mapping:
- product_mapping[product_name] = collections.defaultdict(list)
-
- version_mapping = product_mapping[product_name]
- version_mapping[version].append(repre_entity)
+ version_mapping_by_product_id = {}
+ for repre_context in repre_contexts:
+ product_id = repre_context["product"]["id"]
+ version = repre_context["version"]["version"]
+ version_mapping = version_mapping_by_product_id.setdefault(
+ product_id, {}
+ )
+ version_mapping.setdefault(version, []).append(repre_context)
output = []
- for product_mapping in mapping.values():
- for version_mapping in product_mapping.values():
- last_version = tuple(sorted(version_mapping.keys()))[-1]
- output.extend(version_mapping[last_version])
+ for version_mapping in version_mapping_by_product_id.values():
+ last_version = max(version_mapping.keys())
+ output.extend(version_mapping[last_version])
return output
def populate_load_placeholder(self, placeholder, ignore_repre_ids=None):
@@ -1585,32 +1640,33 @@ class PlaceholderLoadMixin(object):
loader_name = placeholder.data["loader"]
loader_args = self.parse_loader_args(placeholder.data["loader_args"])
- placeholder_representations = self._get_representations(placeholder)
+ placeholder_representations = [
+ repre_entity
+ for repre_entity in self._get_representations(placeholder)
+ if repre_entity["id"] not in ignore_repre_ids
+ ]
- filtered_representations = []
- for representation in self._reduce_last_version_repre_entities(
- placeholder_representations
- ):
- repre_id = representation["id"]
- if repre_id not in ignore_repre_ids:
- filtered_representations.append(representation)
-
- if not filtered_representations:
+ repre_load_contexts = get_representation_contexts(
+ self.project_name, placeholder_representations
+ )
+ filtered_repre_contexts = self._reduce_last_version_repre_entities(
+ repre_load_contexts.values()
+ )
+ if not filtered_repre_contexts:
self.log.info((
"There's no representation for this placeholder: {}"
).format(placeholder.scene_identifier))
+ if not placeholder.data.get("keep_placeholder", True):
+ self.delete_placeholder(placeholder)
return
- repre_load_contexts = get_representation_contexts(
- self.project_name, filtered_representations
- )
loaders_by_name = self.builder.get_loaders_by_name()
self._before_placeholder_load(
placeholder
)
failed = False
- for repre_load_context in repre_load_contexts.values():
+ for repre_load_context in filtered_repre_contexts:
folder_path = repre_load_context["folder"]["path"]
product_name = repre_load_context["product"]["name"]
representation = repre_load_context["representation"]
@@ -1695,8 +1751,6 @@ class PlaceholderCreateMixin(object):
publishable instances. Use it with 'get_placeholder_options'.
Args:
- plugin (PlaceholderPlugin): Plugin used for creating of
- publish instances.
options (Dict[str, Any]): Already available options which are used
as defaults for attributes.
@@ -1918,3 +1972,23 @@ class CreatePlaceholderItem(PlaceholderItem):
def create_failed(self, creator_data):
self._failed_created_publish_instances.append(creator_data)
+
+
+def discover_workfile_build_plugins(*args, **kwargs):
+ return discover(PlaceholderPlugin, *args, **kwargs)
+
+
+def register_workfile_build_plugin(plugin: PlaceholderPlugin):
+ register_plugin(PlaceholderPlugin, plugin)
+
+
+def deregister_workfile_build_plugin(plugin: PlaceholderPlugin):
+ deregister_plugin(PlaceholderPlugin, plugin)
+
+
+def register_workfile_build_plugin_path(path: str):
+ register_plugin_path(PlaceholderPlugin, path)
+
+
+def deregister_workfile_build_plugin_path(path: str):
+ deregister_plugin_path(PlaceholderPlugin, path)
diff --git a/client/ayon_core/plugins/load/delete_old_versions.py b/client/ayon_core/plugins/load/delete_old_versions.py
index 8e04fd9827..62302e7123 100644
--- a/client/ayon_core/plugins/load/delete_old_versions.py
+++ b/client/ayon_core/plugins/load/delete_old_versions.py
@@ -1,501 +1,426 @@
-# TODO This plugin is not converted for AYON
-#
-# import collections
-# import os
-# import uuid
-#
-# import clique
-# import ayon_api
-# from pymongo import UpdateOne
-# import qargparse
-# from qtpy import QtWidgets, QtCore
-#
-# from ayon_core import style
-# from ayon_core.addon import AddonsManager
-# from ayon_core.lib import format_file_size
-# from ayon_core.pipeline import load, Anatomy
-# from ayon_core.pipeline.load import (
-# get_representation_path_with_anatomy,
-# InvalidRepresentationContext,
-# )
-#
-#
-# class DeleteOldVersions(load.ProductLoaderPlugin):
-# """Deletes specific number of old version"""
-#
-# is_multiple_contexts_compatible = True
-# sequence_splitter = "__sequence_splitter__"
-#
-# representations = {"*"}
-# product_types = {"*"}
-# tool_names = ["library_loader"]
-#
-# label = "Delete Old Versions"
-# order = 35
-# icon = "trash"
-# color = "#d8d8d8"
-#
-# options = [
-# qargparse.Integer(
-# "versions_to_keep", default=2, min=0, help="Versions to keep:"
-# ),
-# qargparse.Boolean(
-# "remove_publish_folder", help="Remove publish folder:"
-# )
-# ]
-#
-# def delete_whole_dir_paths(self, dir_paths, delete=True):
-# size = 0
-#
-# for dir_path in dir_paths:
-# # Delete all files and fodlers in dir path
-# for root, dirs, files in os.walk(dir_path, topdown=False):
-# for name in files:
-# file_path = os.path.join(root, name)
-# size += os.path.getsize(file_path)
-# if delete:
-# os.remove(file_path)
-# self.log.debug("Removed file: {}".format(file_path))
-#
-# for name in dirs:
-# if delete:
-# os.rmdir(os.path.join(root, name))
-#
-# if not delete:
-# continue
-#
-# # Delete even the folder and it's parents folders if they are empty
-# while True:
-# if not os.path.exists(dir_path):
-# dir_path = os.path.dirname(dir_path)
-# continue
-#
-# if len(os.listdir(dir_path)) != 0:
-# break
-#
-# os.rmdir(os.path.join(dir_path))
-#
-# return size
-#
-# def path_from_representation(self, representation, anatomy):
-# try:
-# context = representation["context"]
-# except KeyError:
-# return (None, None)
-#
-# try:
-# path = get_representation_path_with_anatomy(
-# representation, anatomy
-# )
-# except InvalidRepresentationContext:
-# return (None, None)
-#
-# sequence_path = None
-# if "frame" in context:
-# context["frame"] = self.sequence_splitter
-# sequence_path = get_representation_path_with_anatomy(
-# representation, anatomy
-# )
-#
-# if sequence_path:
-# sequence_path = sequence_path.normalized()
-#
-# return (path.normalized(), sequence_path)
-#
-# def delete_only_repre_files(self, dir_paths, file_paths, delete=True):
-# size = 0
-#
-# for dir_id, dir_path in dir_paths.items():
-# dir_files = os.listdir(dir_path)
-# collections, remainders = clique.assemble(dir_files)
-# for file_path, seq_path in file_paths[dir_id]:
-# file_path_base = os.path.split(file_path)[1]
-# # Just remove file if `frame` key was not in context or
-# # filled path is in remainders (single file sequence)
-# if not seq_path or file_path_base in remainders:
-# if not os.path.exists(file_path):
-# self.log.debug(
-# "File was not found: {}".format(file_path)
-# )
-# continue
-#
-# size += os.path.getsize(file_path)
-#
-# if delete:
-# os.remove(file_path)
-# self.log.debug("Removed file: {}".format(file_path))
-#
-# if file_path_base in remainders:
-# remainders.remove(file_path_base)
-# continue
-#
-# seq_path_base = os.path.split(seq_path)[1]
-# head, tail = seq_path_base.split(self.sequence_splitter)
-#
-# final_col = None
-# for collection in collections:
-# if head != collection.head or tail != collection.tail:
-# continue
-# final_col = collection
-# break
-#
-# if final_col is not None:
-# # Fill full path to head
-# final_col.head = os.path.join(dir_path, final_col.head)
-# for _file_path in final_col:
-# if os.path.exists(_file_path):
-#
-# size += os.path.getsize(_file_path)
-#
-# if delete:
-# os.remove(_file_path)
-# self.log.debug(
-# "Removed file: {}".format(_file_path)
-# )
-#
-# _seq_path = final_col.format("{head}{padding}{tail}")
-# self.log.debug("Removed files: {}".format(_seq_path))
-# collections.remove(final_col)
-#
-# elif os.path.exists(file_path):
-# size += os.path.getsize(file_path)
-#
-# if delete:
-# os.remove(file_path)
-# self.log.debug("Removed file: {}".format(file_path))
-# else:
-# self.log.debug(
-# "File was not found: {}".format(file_path)
-# )
-#
-# # Delete as much as possible parent folders
-# if not delete:
-# return size
-#
-# for dir_path in dir_paths.values():
-# while True:
-# if not os.path.exists(dir_path):
-# dir_path = os.path.dirname(dir_path)
-# continue
-#
-# if len(os.listdir(dir_path)) != 0:
-# break
-#
-# self.log.debug("Removed folder: {}".format(dir_path))
-# os.rmdir(dir_path)
-#
-# return size
-#
-# def message(self, text):
-# msgBox = QtWidgets.QMessageBox()
-# msgBox.setText(text)
-# msgBox.setStyleSheet(style.load_stylesheet())
-# msgBox.setWindowFlags(
-# msgBox.windowFlags() | QtCore.Qt.FramelessWindowHint
-# )
-# msgBox.exec_()
-#
-# def get_data(self, context, versions_count):
-# product_entity = context["product"]
-# folder_entity = context["folder"]
-# project_name = context["project"]["name"]
-# anatomy = Anatomy(project_name)
-#
-# versions = list(ayon_api.get_versions(
-# project_name, product_ids=[product_entity["id"]]
-# ))
-#
-# versions_by_parent = collections.defaultdict(list)
-# for ent in versions:
-# versions_by_parent[ent["productId"]].append(ent)
-#
-# def sort_func(ent):
-# return int(ent["version"])
-#
-# all_last_versions = []
-# for _parent_id, _versions in versions_by_parent.items():
-# for idx, version in enumerate(
-# sorted(_versions, key=sort_func, reverse=True)
-# ):
-# if idx >= versions_count:
-# break
-# all_last_versions.append(version)
-#
-# self.log.debug("Collected versions ({})".format(len(versions)))
-#
-# # Filter latest versions
-# for version in all_last_versions:
-# versions.remove(version)
-#
-# # Update versions_by_parent without filtered versions
-# versions_by_parent = collections.defaultdict(list)
-# for ent in versions:
-# versions_by_parent[ent["productId"]].append(ent)
-#
-# # Filter already deleted versions
-# versions_to_pop = []
-# for version in versions:
-# version_tags = version["data"].get("tags")
-# if version_tags and "deleted" in version_tags:
-# versions_to_pop.append(version)
-#
-# for version in versions_to_pop:
-# msg = "Folder: \"{}\" | Product: \"{}\" | Version: \"{}\"".format(
-# folder_entity["path"],
-# product_entity["name"],
-# version["version"]
-# )
-# self.log.debug((
-# "Skipping version. Already tagged as `deleted`. < {} >"
-# ).format(msg))
-# versions.remove(version)
-#
-# version_ids = [ent["id"] for ent in versions]
-#
-# self.log.debug(
-# "Filtered versions to delete ({})".format(len(version_ids))
-# )
-#
-# if not version_ids:
-# msg = "Skipping processing. Nothing to delete on {}/{}".format(
-# folder_entity["path"], product_entity["name"]
-# )
-# self.log.info(msg)
-# print(msg)
-# return
-#
-# repres = list(ayon_api.get_representations(
-# project_name, version_ids=version_ids
-# ))
-#
-# self.log.debug(
-# "Collected representations to remove ({})".format(len(repres))
-# )
-#
-# dir_paths = {}
-# file_paths_by_dir = collections.defaultdict(list)
-# for repre in repres:
-# file_path, seq_path = self.path_from_representation(
-# repre, anatomy
-# )
-# if file_path is None:
-# self.log.debug((
-# "Could not format path for represenation \"{}\""
-# ).format(str(repre)))
-# continue
-#
-# dir_path = os.path.dirname(file_path)
-# dir_id = None
-# for _dir_id, _dir_path in dir_paths.items():
-# if _dir_path == dir_path:
-# dir_id = _dir_id
-# break
-#
-# if dir_id is None:
-# dir_id = uuid.uuid4()
-# dir_paths[dir_id] = dir_path
-#
-# file_paths_by_dir[dir_id].append([file_path, seq_path])
-#
-# dir_ids_to_pop = []
-# for dir_id, dir_path in dir_paths.items():
-# if os.path.exists(dir_path):
-# continue
-#
-# dir_ids_to_pop.append(dir_id)
-#
-# # Pop dirs from both dictionaries
-# for dir_id in dir_ids_to_pop:
-# dir_paths.pop(dir_id)
-# paths = file_paths_by_dir.pop(dir_id)
-# # TODO report of missing directories?
-# paths_msg = ", ".join([
-# "'{}'".format(path[0].replace("\\", "/")) for path in paths
-# ])
-# self.log.debug((
-# "Folder does not exist. Deleting it's files skipped: {}"
-# ).format(paths_msg))
-#
-# return {
-# "dir_paths": dir_paths,
-# "file_paths_by_dir": file_paths_by_dir,
-# "versions": versions,
-# "folder": folder_entity,
-# "product": product_entity,
-# "archive_product": versions_count == 0
-# }
-#
-# def main(self, project_name, data, remove_publish_folder):
-# # Size of files.
-# size = 0
-# if not data:
-# return size
-#
-# if remove_publish_folder:
-# size = self.delete_whole_dir_paths(data["dir_paths"].values())
-# else:
-# size = self.delete_only_repre_files(
-# data["dir_paths"], data["file_paths_by_dir"]
-# )
-#
-# mongo_changes_bulk = []
-# for version in data["versions"]:
-# orig_version_tags = version["data"].get("tags") or []
-# version_tags = [tag for tag in orig_version_tags]
-# if "deleted" not in version_tags:
-# version_tags.append("deleted")
-#
-# if version_tags == orig_version_tags:
-# continue
-#
-# update_query = {"id": version["id"]}
-# update_data = {"$set": {"data.tags": version_tags}}
-# mongo_changes_bulk.append(UpdateOne(update_query, update_data))
-#
-# if data["archive_product"]:
-# mongo_changes_bulk.append(UpdateOne(
-# {
-# "id": data["product"]["id"],
-# "type": "subset"
-# },
-# {"$set": {"type": "archived_subset"}}
-# ))
-#
-# if mongo_changes_bulk:
-# dbcon = AvalonMongoDB()
-# dbcon.Session["AYON_PROJECT_NAME"] = project_name
-# dbcon.install()
-# dbcon.bulk_write(mongo_changes_bulk)
-# dbcon.uninstall()
-#
-# self._ftrack_delete_versions(data)
-#
-# return size
-#
-# def _ftrack_delete_versions(self, data):
-# """Delete version on ftrack.
-#
-# Handling of ftrack logic in this plugin is not ideal. But in OP3 it is
-# almost impossible to solve the issue other way.
-#
-# Note:
-# Asset versions on ftrack are not deleted but marked as
-# "not published" which cause that they're invisible.
-#
-# Args:
-# data (dict): Data sent to product loader with full context.
-# """
-#
-# # First check for ftrack id on folder entity
-# # - skip if ther is none
-# ftrack_id = data["folder"]["attrib"].get("ftrackId")
-# if not ftrack_id:
-# self.log.info((
-# "Folder does not have filled ftrack id. Skipped delete"
-# " of ftrack version."
-# ))
-# return
-#
-# # Check if ftrack module is enabled
-# addons_manager = AddonsManager()
-# ftrack_addon = addons_manager.get("ftrack")
-# if not ftrack_addon or not ftrack_addon.enabled:
-# return
-#
-# import ftrack_api
-#
-# session = ftrack_api.Session()
-# product_name = data["product"]["name"]
-# versions = {
-# '"{}"'.format(version_doc["name"])
-# for version_doc in data["versions"]
-# }
-# asset_versions = session.query(
-# (
-# "select id, is_published from AssetVersion where"
-# " asset.parent.id is \"{}\""
-# " and asset.name is \"{}\""
-# " and version in ({})"
-# ).format(
-# ftrack_id,
-# product_name,
-# ",".join(versions)
-# )
-# ).all()
-#
-# # Set attribute `is_published` to `False` on ftrack AssetVersions
-# for asset_version in asset_versions:
-# asset_version["is_published"] = False
-#
-# try:
-# session.commit()
-#
-# except Exception:
-# msg = (
-# "Could not set `is_published` attribute to `False`"
-# " for selected AssetVersions."
-# )
-# self.log.error(msg)
-# self.message(msg)
-#
-# def load(self, contexts, name=None, namespace=None, options=None):
-# try:
-# size = 0
-# for count, context in enumerate(contexts):
-# versions_to_keep = 2
-# remove_publish_folder = False
-# if options:
-# versions_to_keep = options.get(
-# "versions_to_keep", versions_to_keep
-# )
-# remove_publish_folder = options.get(
-# "remove_publish_folder", remove_publish_folder
-# )
-#
-# data = self.get_data(context, versions_to_keep)
-# if not data:
-# continue
-#
-# project_name = context["project"]["name"]
-# size += self.main(project_name, data, remove_publish_folder)
-# print("Progressing {}/{}".format(count + 1, len(contexts)))
-#
-# msg = "Total size of files: {}".format(format_file_size(size))
-# self.log.info(msg)
-# self.message(msg)
-#
-# except Exception:
-# self.log.error("Failed to delete versions.", exc_info=True)
-#
-#
-# class CalculateOldVersions(DeleteOldVersions):
-# """Calculate file size of old versions"""
-# label = "Calculate Old Versions"
-# order = 30
-# tool_names = ["library_loader"]
-#
-# options = [
-# qargparse.Integer(
-# "versions_to_keep", default=2, min=0, help="Versions to keep:"
-# ),
-# qargparse.Boolean(
-# "remove_publish_folder", help="Remove publish folder:"
-# )
-# ]
-#
-# def main(self, project_name, data, remove_publish_folder):
-# size = 0
-#
-# if not data:
-# return size
-#
-# if remove_publish_folder:
-# size = self.delete_whole_dir_paths(
-# data["dir_paths"].values(), delete=False
-# )
-# else:
-# size = self.delete_only_repre_files(
-# data["dir_paths"], data["file_paths_by_dir"], delete=False
-# )
-#
-# return size
+import collections
+import os
+import uuid
+
+import clique
+import ayon_api
+from ayon_api.operations import OperationsSession
+import qargparse
+from qtpy import QtWidgets, QtCore
+
+from ayon_core import style
+from ayon_core.lib import format_file_size
+from ayon_core.pipeline import load, Anatomy
+from ayon_core.pipeline.load import (
+ get_representation_path_with_anatomy,
+ InvalidRepresentationContext,
+)
+
+
+class DeleteOldVersions(load.ProductLoaderPlugin):
+ """Deletes specific number of old version"""
+
+ is_multiple_contexts_compatible = True
+ sequence_splitter = "__sequence_splitter__"
+
+ representations = ["*"]
+ product_types = {"*"}
+ tool_names = ["library_loader"]
+
+ label = "Delete Old Versions"
+ order = 35
+ icon = "trash"
+ color = "#d8d8d8"
+
+ options = [
+ qargparse.Integer(
+ "versions_to_keep", default=2, min=0, help="Versions to keep:"
+ ),
+ qargparse.Boolean(
+ "remove_publish_folder", help="Remove publish folder:"
+ )
+ ]
+
+ def delete_whole_dir_paths(self, dir_paths, delete=True):
+ size = 0
+
+ for dir_path in dir_paths:
+ # Delete all files and fodlers in dir path
+ for root, dirs, files in os.walk(dir_path, topdown=False):
+ for name in files:
+ file_path = os.path.join(root, name)
+ size += os.path.getsize(file_path)
+ if delete:
+ os.remove(file_path)
+ self.log.debug("Removed file: {}".format(file_path))
+
+ for name in dirs:
+ if delete:
+ os.rmdir(os.path.join(root, name))
+
+ if not delete:
+ continue
+
+ # Delete even the folder and it's parents folders if they are empty
+ while True:
+ if not os.path.exists(dir_path):
+ dir_path = os.path.dirname(dir_path)
+ continue
+
+ if len(os.listdir(dir_path)) != 0:
+ break
+
+ os.rmdir(os.path.join(dir_path))
+
+ return size
+
+ def path_from_representation(self, representation, anatomy):
+ try:
+ context = representation["context"]
+ except KeyError:
+ return (None, None)
+
+ try:
+ path = get_representation_path_with_anatomy(
+ representation, anatomy
+ )
+ except InvalidRepresentationContext:
+ return (None, None)
+
+ sequence_path = None
+ if "frame" in context:
+ context["frame"] = self.sequence_splitter
+ sequence_path = get_representation_path_with_anatomy(
+ representation, anatomy
+ )
+
+ if sequence_path:
+ sequence_path = sequence_path.normalized()
+
+ return (path.normalized(), sequence_path)
+
+ def delete_only_repre_files(self, dir_paths, file_paths, delete=True):
+ size = 0
+
+ for dir_id, dir_path in dir_paths.items():
+ dir_files = os.listdir(dir_path)
+ collections, remainders = clique.assemble(dir_files)
+ for file_path, seq_path in file_paths[dir_id]:
+ file_path_base = os.path.split(file_path)[1]
+ # Just remove file if `frame` key was not in context or
+ # filled path is in remainders (single file sequence)
+ if not seq_path or file_path_base in remainders:
+ if not os.path.exists(file_path):
+ self.log.debug(
+ "File was not found: {}".format(file_path)
+ )
+ continue
+
+ size += os.path.getsize(file_path)
+
+ if delete:
+ os.remove(file_path)
+ self.log.debug("Removed file: {}".format(file_path))
+
+ if file_path_base in remainders:
+ remainders.remove(file_path_base)
+ continue
+
+ seq_path_base = os.path.split(seq_path)[1]
+ head, tail = seq_path_base.split(self.sequence_splitter)
+
+ final_col = None
+ for collection in collections:
+ if head != collection.head or tail != collection.tail:
+ continue
+ final_col = collection
+ break
+
+ if final_col is not None:
+ # Fill full path to head
+ final_col.head = os.path.join(dir_path, final_col.head)
+ for _file_path in final_col:
+ if os.path.exists(_file_path):
+
+ size += os.path.getsize(_file_path)
+
+ if delete:
+ os.remove(_file_path)
+ self.log.debug(
+ "Removed file: {}".format(_file_path)
+ )
+
+ _seq_path = final_col.format("{head}{padding}{tail}")
+ self.log.debug("Removed files: {}".format(_seq_path))
+ collections.remove(final_col)
+
+ elif os.path.exists(file_path):
+ size += os.path.getsize(file_path)
+
+ if delete:
+ os.remove(file_path)
+ self.log.debug("Removed file: {}".format(file_path))
+ else:
+ self.log.debug(
+ "File was not found: {}".format(file_path)
+ )
+
+ # Delete as much as possible parent folders
+ if not delete:
+ return size
+
+ for dir_path in dir_paths.values():
+ while True:
+ if not os.path.exists(dir_path):
+ dir_path = os.path.dirname(dir_path)
+ continue
+
+ if len(os.listdir(dir_path)) != 0:
+ break
+
+ self.log.debug("Removed folder: {}".format(dir_path))
+ os.rmdir(dir_path)
+
+ return size
+
+ def message(self, text):
+ msgBox = QtWidgets.QMessageBox()
+ msgBox.setText(text)
+ msgBox.setStyleSheet(style.load_stylesheet())
+ msgBox.setWindowFlags(
+ msgBox.windowFlags() | QtCore.Qt.FramelessWindowHint
+ )
+ msgBox.exec_()
+
+ def get_data(self, context, versions_count):
+ product_entity = context["product"]
+ folder_entity = context["folder"]
+ project_name = context["project"]["name"]
+ anatomy = Anatomy(project_name, project_entity=context["project"])
+
+ version_fields = ayon_api.get_default_fields_for_type("version")
+ version_fields.add("tags")
+ versions = list(ayon_api.get_versions(
+ project_name,
+ product_ids=[product_entity["id"]],
+ active=None,
+ hero=False,
+ fields=version_fields
+ ))
+ self.log.debug(
+ "Version Number ({})".format(len(versions))
+ )
+ versions_by_parent = collections.defaultdict(list)
+ for ent in versions:
+ versions_by_parent[ent["productId"]].append(ent)
+
+ def sort_func(ent):
+ return int(ent["version"])
+
+ all_last_versions = []
+ for _parent_id, _versions in versions_by_parent.items():
+ for idx, version in enumerate(
+ sorted(_versions, key=sort_func, reverse=True)
+ ):
+ if idx >= versions_count:
+ break
+ all_last_versions.append(version)
+
+ self.log.debug("Collected versions ({})".format(len(versions)))
+
+ # Filter latest versions
+ for version in all_last_versions:
+ versions.remove(version)
+
+ # Update versions_by_parent without filtered versions
+ versions_by_parent = collections.defaultdict(list)
+ for ent in versions:
+ versions_by_parent[ent["productId"]].append(ent)
+
+ # Filter already deleted versions
+ versions_to_pop = []
+ for version in versions:
+ if "deleted" in version["tags"]:
+ versions_to_pop.append(version)
+
+ for version in versions_to_pop:
+ msg = "Folder: \"{}\" | Product: \"{}\" | Version: \"{}\"".format(
+ folder_entity["path"],
+ product_entity["name"],
+ version["version"]
+ )
+ self.log.debug((
+ "Skipping version. Already tagged as inactive. < {} >"
+ ).format(msg))
+ versions.remove(version)
+
+ version_ids = [ent["id"] for ent in versions]
+
+ self.log.debug(
+ "Filtered versions to delete ({})".format(len(version_ids))
+ )
+
+ if not version_ids:
+ msg = "Skipping processing. Nothing to delete on {}/{}".format(
+ folder_entity["path"], product_entity["name"]
+ )
+ self.log.info(msg)
+ print(msg)
+ return
+
+ repres = list(ayon_api.get_representations(
+ project_name, version_ids=version_ids
+ ))
+
+ self.log.debug(
+ "Collected representations to remove ({})".format(len(repres))
+ )
+
+ dir_paths = {}
+ file_paths_by_dir = collections.defaultdict(list)
+ for repre in repres:
+ file_path, seq_path = self.path_from_representation(
+ repre, anatomy
+ )
+ if file_path is None:
+ self.log.debug((
+ "Could not format path for represenation \"{}\""
+ ).format(str(repre)))
+ continue
+
+ dir_path = os.path.dirname(file_path)
+ dir_id = None
+ for _dir_id, _dir_path in dir_paths.items():
+ if _dir_path == dir_path:
+ dir_id = _dir_id
+ break
+
+ if dir_id is None:
+ dir_id = uuid.uuid4()
+ dir_paths[dir_id] = dir_path
+
+ file_paths_by_dir[dir_id].append([file_path, seq_path])
+
+ dir_ids_to_pop = []
+ for dir_id, dir_path in dir_paths.items():
+ if os.path.exists(dir_path):
+ continue
+
+ dir_ids_to_pop.append(dir_id)
+
+ # Pop dirs from both dictionaries
+ for dir_id in dir_ids_to_pop:
+ dir_paths.pop(dir_id)
+ paths = file_paths_by_dir.pop(dir_id)
+ # TODO report of missing directories?
+ paths_msg = ", ".join([
+ "'{}'".format(path[0].replace("\\", "/")) for path in paths
+ ])
+ self.log.debug((
+ "Folder does not exist. Deleting its files skipped: {}"
+ ).format(paths_msg))
+
+ return {
+ "dir_paths": dir_paths,
+ "file_paths_by_dir": file_paths_by_dir,
+ "versions": versions,
+ "folder": folder_entity,
+ "product": product_entity,
+ "archive_product": versions_count == 0
+ }
+
+ def main(self, project_name, data, remove_publish_folder):
+ # Size of files.
+ size = 0
+ if not data:
+ return size
+
+ if remove_publish_folder:
+ size = self.delete_whole_dir_paths(data["dir_paths"].values())
+ else:
+ size = self.delete_only_repre_files(
+ data["dir_paths"], data["file_paths_by_dir"]
+ )
+
+ op_session = OperationsSession()
+ for version in data["versions"]:
+ orig_version_tags = version["tags"]
+ version_tags = list(orig_version_tags)
+ changes = {}
+ if "deleted" not in version_tags:
+ version_tags.append("deleted")
+ changes["tags"] = version_tags
+
+ if version["active"]:
+ changes["active"] = False
+
+ if not changes:
+ continue
+ op_session.update_entity(
+ project_name, "version", version["id"], changes
+ )
+
+ op_session.commit()
+
+ return size
+
+ def load(self, contexts, name=None, namespace=None, options=None):
+ try:
+ size = 0
+ for count, context in enumerate(contexts):
+ versions_to_keep = 2
+ remove_publish_folder = False
+ if options:
+ versions_to_keep = options.get(
+ "versions_to_keep", versions_to_keep
+ )
+ remove_publish_folder = options.get(
+ "remove_publish_folder", remove_publish_folder
+ )
+
+ data = self.get_data(context, versions_to_keep)
+ if not data:
+ continue
+ project_name = context["project"]["name"]
+ size += self.main(project_name, data, remove_publish_folder)
+ print("Progressing {}/{}".format(count + 1, len(contexts)))
+
+ msg = "Total size of files: {}".format(format_file_size(size))
+ self.log.info(msg)
+ self.message(msg)
+
+ except Exception:
+ self.log.error("Failed to delete versions.", exc_info=True)
+
+
+class CalculateOldVersions(DeleteOldVersions):
+ """Calculate file size of old versions"""
+ label = "Calculate Old Versions"
+ order = 30
+ tool_names = ["library_loader"]
+
+ options = [
+ qargparse.Integer(
+ "versions_to_keep", default=2, min=0, help="Versions to keep:"
+ ),
+ qargparse.Boolean(
+ "remove_publish_folder", help="Remove publish folder:"
+ )
+ ]
+
+ def main(self, project_name, data, remove_publish_folder):
+ size = 0
+
+ if not data:
+ return size
+
+ if remove_publish_folder:
+ size = self.delete_whole_dir_paths(
+ data["dir_paths"].values(), delete=False
+ )
+ else:
+ size = self.delete_only_repre_files(
+ data["dir_paths"], data["file_paths_by_dir"], delete=False
+ )
+
+ return size
diff --git a/client/ayon_core/plugins/publish/integrate.py b/client/ayon_core/plugins/publish/integrate.py
index ce34f2e88b..764168edd3 100644
--- a/client/ayon_core/plugins/publish/integrate.py
+++ b/client/ayon_core/plugins/publish/integrate.py
@@ -167,7 +167,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
"uasset",
"blendScene",
"yeticacheUE",
- "tycache"
+ "tycache",
+ "csv_ingest_file",
]
default_template_name = "publish"
diff --git a/client/ayon_core/tools/common_models/cache.py b/client/ayon_core/tools/common_models/cache.py
index 221a14160c..59b727728f 100644
--- a/client/ayon_core/tools/common_models/cache.py
+++ b/client/ayon_core/tools/common_models/cache.py
@@ -1,239 +1,31 @@
-import time
-import collections
+import warnings
-InitInfo = collections.namedtuple(
- "InitInfo",
- ["default_factory", "lifetime"]
+from ayon_core.lib import CacheItem as _CacheItem
+from ayon_core.lib import NestedCacheItem as _NestedCacheItem
+
+
+# Cache classes were moved to `ayon_core.lib.cache`
+class CacheItem(_CacheItem):
+ def __init__(self, *args, **kwargs):
+ warnings.warn(
+ "Used 'CacheItem' from deprecated location "
+ "'ayon_core.tools.common_models', use 'ayon_core.lib' instead.",
+ DeprecationWarning,
+ )
+ super().__init__(*args, **kwargs)
+
+
+class NestedCacheItem(_NestedCacheItem):
+ def __init__(self, *args, **kwargs):
+ warnings.warn(
+ "Used 'NestedCacheItem' from deprecated location "
+ "'ayon_core.tools.common_models', use 'ayon_core.lib' instead.",
+ DeprecationWarning,
+ )
+ super().__init__(*args, **kwargs)
+
+
+__all__ = (
+ "CacheItem",
+ "NestedCacheItem",
)
-
-
-def _default_factory_func():
- return None
-
-
-class CacheItem:
- """Simple cache item with lifetime and default value.
-
- Args:
- default_factory (Optional[callable]): Function that returns default
- value used on init and on reset.
- lifetime (Optional[int]): Lifetime of the cache data in seconds.
- """
-
- def __init__(self, default_factory=None, lifetime=None):
- if lifetime is None:
- lifetime = 120
- self._lifetime = lifetime
- self._last_update = None
- if default_factory is None:
- default_factory = _default_factory_func
- self._default_factory = default_factory
- self._data = default_factory()
-
- @property
- def is_valid(self):
- """Is cache valid to use.
-
- Return:
- bool: True if cache is valid, False otherwise.
- """
-
- if self._last_update is None:
- return False
-
- return (time.time() - self._last_update) < self._lifetime
-
- def set_lifetime(self, lifetime):
- """Change lifetime of cache item.
-
- Args:
- lifetime (int): Lifetime of the cache data in seconds.
- """
-
- self._lifetime = lifetime
-
- def set_invalid(self):
- """Set cache as invalid."""
-
- self._last_update = None
-
- def reset(self):
- """Set cache as invalid and reset data."""
-
- self._last_update = None
- self._data = self._default_factory()
-
- def get_data(self):
- """Receive cached data.
-
- Returns:
- Any: Any data that are cached.
- """
-
- return self._data
-
- def update_data(self, data):
- self._data = data
- self._last_update = time.time()
-
-
-class NestedCacheItem:
- """Helper for cached items stored in nested structure.
-
- Example:
- >>> cache = NestedCacheItem(levels=2, default_factory=lambda: 0)
- >>> cache["a"]["b"].is_valid
- False
- >>> cache["a"]["b"].get_data()
- 0
- >>> cache["a"]["b"] = 1
- >>> cache["a"]["b"].is_valid
- True
- >>> cache["a"]["b"].get_data()
- 1
- >>> cache.reset()
- >>> cache["a"]["b"].is_valid
- False
-
- Args:
- levels (int): Number of nested levels where read cache is stored.
- default_factory (Optional[callable]): Function that returns default
- value used on init and on reset.
- lifetime (Optional[int]): Lifetime of the cache data in seconds.
- _init_info (Optional[InitInfo]): Private argument. Init info for
- nested cache where created from parent item.
- """
-
- def __init__(
- self, levels=1, default_factory=None, lifetime=None, _init_info=None
- ):
- if levels < 1:
- raise ValueError("Nested levels must be greater than 0")
- self._data_by_key = {}
- if _init_info is None:
- _init_info = InitInfo(default_factory, lifetime)
- self._init_info = _init_info
- self._levels = levels
-
- def __getitem__(self, key):
- """Get cached data.
-
- Args:
- key (str): Key of the cache item.
-
- Returns:
- Union[NestedCacheItem, CacheItem]: Cache item.
- """
-
- cache = self._data_by_key.get(key)
- if cache is None:
- if self._levels > 1:
- cache = NestedCacheItem(
- levels=self._levels - 1,
- _init_info=self._init_info
- )
- else:
- cache = CacheItem(
- self._init_info.default_factory,
- self._init_info.lifetime
- )
- self._data_by_key[key] = cache
- return cache
-
- def __setitem__(self, key, value):
- """Update cached data.
-
- Args:
- key (str): Key of the cache item.
- value (Any): Any data that are cached.
- """
-
- if self._levels > 1:
- raise AttributeError((
- "{} does not support '__setitem__'. Lower nested level by {}"
- ).format(self.__class__.__name__, self._levels - 1))
- cache = self[key]
- cache.update_data(value)
-
- def get(self, key):
- """Get cached data.
-
- Args:
- key (str): Key of the cache item.
-
- Returns:
- Union[NestedCacheItem, CacheItem]: Cache item.
- """
-
- return self[key]
-
- def cached_count(self):
- """Amount of cached items.
-
- Returns:
- int: Amount of cached items.
- """
-
- return len(self._data_by_key)
-
- def clear_key(self, key):
- """Clear cached item by key.
-
- Args:
- key (str): Key of the cache item.
- """
-
- self._data_by_key.pop(key, None)
-
- def clear_invalid(self):
- """Clear all invalid cache items.
-
- Note:
- To clear all cache items use 'reset'.
- """
-
- changed = {}
- children_are_nested = self._levels > 1
- for key, cache in tuple(self._data_by_key.items()):
- if children_are_nested:
- output = cache.clear_invalid()
- if output:
- changed[key] = output
- if not cache.cached_count():
- self._data_by_key.pop(key)
- elif not cache.is_valid:
- changed[key] = cache.get_data()
- self._data_by_key.pop(key)
- return changed
-
- def reset(self):
- """Reset cache.
-
- Note:
- To clear only invalid cache items use 'clear_invalid'.
- """
-
- self._data_by_key = {}
-
- def set_lifetime(self, lifetime):
- """Change lifetime of all children cache items.
-
- Args:
- lifetime (int): Lifetime of the cache data in seconds.
- """
-
- self._init_info.lifetime = lifetime
- for cache in self._data_by_key.values():
- cache.set_lifetime(lifetime)
-
- @property
- def is_valid(self):
- """Raise reasonable error when called on wront level.
-
- Raises:
- AttributeError: If called on nested cache item.
- """
-
- raise AttributeError((
- "{} does not support 'is_valid'. Lower nested level by '{}'"
- ).format(self.__class__.__name__, self._levels))
diff --git a/client/ayon_core/tools/common_models/hierarchy.py b/client/ayon_core/tools/common_models/hierarchy.py
index d8b28f020d..78b8a7f492 100644
--- a/client/ayon_core/tools/common_models/hierarchy.py
+++ b/client/ayon_core/tools/common_models/hierarchy.py
@@ -6,8 +6,7 @@ import ayon_api
import six
from ayon_core.style import get_default_entity_icon_color
-
-from .cache import NestedCacheItem
+from ayon_core.lib import NestedCacheItem
HIERARCHY_MODEL_SENDER = "hierarchy.model"
diff --git a/client/ayon_core/tools/common_models/projects.py b/client/ayon_core/tools/common_models/projects.py
index e30561000e..19a38bee21 100644
--- a/client/ayon_core/tools/common_models/projects.py
+++ b/client/ayon_core/tools/common_models/projects.py
@@ -5,8 +5,7 @@ import ayon_api
import six
from ayon_core.style import get_default_entity_icon_color
-
-from .cache import CacheItem
+from ayon_core.lib import CacheItem
PROJECTS_MODEL_SENDER = "projects.model"
diff --git a/client/ayon_core/tools/common_models/thumbnails.py b/client/ayon_core/tools/common_models/thumbnails.py
index 1c3aadc49f..6d14783b9a 100644
--- a/client/ayon_core/tools/common_models/thumbnails.py
+++ b/client/ayon_core/tools/common_models/thumbnails.py
@@ -5,7 +5,7 @@ import collections
import ayon_api
import appdirs
-from .cache import NestedCacheItem
+from ayon_core.lib import NestedCacheItem
FileInfo = collections.namedtuple(
"FileInfo",
diff --git a/client/ayon_core/tools/loader/models/actions.py b/client/ayon_core/tools/loader/models/actions.py
index ad2993af50..cfe91cadab 100644
--- a/client/ayon_core/tools/loader/models/actions.py
+++ b/client/ayon_core/tools/loader/models/actions.py
@@ -6,6 +6,7 @@ import uuid
import ayon_api
+from ayon_core.lib import NestedCacheItem
from ayon_core.pipeline.load import (
discover_loader_plugins,
ProductLoaderPlugin,
@@ -17,7 +18,6 @@ from ayon_core.pipeline.load import (
LoadError,
IncompatibleLoaderError,
)
-from ayon_core.tools.common_models import NestedCacheItem
from ayon_core.tools.loader.abstract import ActionItem
ACTIONS_MODEL_SENDER = "actions.model"
diff --git a/client/ayon_core/tools/loader/models/products.py b/client/ayon_core/tools/loader/models/products.py
index 812446a012..a3bbc30a09 100644
--- a/client/ayon_core/tools/loader/models/products.py
+++ b/client/ayon_core/tools/loader/models/products.py
@@ -5,8 +5,8 @@ import arrow
import ayon_api
from ayon_api.operations import OperationsSession
+from ayon_core.lib import NestedCacheItem
from ayon_core.style import get_default_entity_icon_color
-from ayon_core.tools.common_models import NestedCacheItem
from ayon_core.tools.loader.abstract import (
ProductTypeItem,
ProductItem,
diff --git a/client/ayon_core/tools/loader/models/sitesync.py b/client/ayon_core/tools/loader/models/sitesync.py
index 987510905b..02504c2ad3 100644
--- a/client/ayon_core/tools/loader/models/sitesync.py
+++ b/client/ayon_core/tools/loader/models/sitesync.py
@@ -2,9 +2,8 @@ import collections
from ayon_api import get_representations, get_versions_links
-from ayon_core.lib import Logger
+from ayon_core.lib import Logger, NestedCacheItem
from ayon_core.addon import AddonsManager
-from ayon_core.tools.common_models import NestedCacheItem
from ayon_core.tools.loader.abstract import ActionItem
DOWNLOAD_IDENTIFIER = "sitesync.download"
diff --git a/client/ayon_core/tools/workfile_template_build/lib.py b/client/ayon_core/tools/workfile_template_build/lib.py
index de3a0d0084..ffd6fefc38 100644
--- a/client/ayon_core/tools/workfile_template_build/lib.py
+++ b/client/ayon_core/tools/workfile_template_build/lib.py
@@ -8,12 +8,12 @@ from ayon_core.tools.utils.dialogs import show_message_dialog
def open_template_ui(builder, main_window):
"""Open template from `builder`
- Asks user about overwriting current scene and feedsback exceptions.
+ Asks user about overwriting current scene and feedback exceptions.
"""
result = QtWidgets.QMessageBox.question(
main_window,
"Opening template",
- "Caution! You will loose unsaved changes.\nDo you want to continue?",
+ "Caution! You will lose unsaved changes.\nDo you want to continue?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
)
if result == QtWidgets.QMessageBox.Yes:
diff --git a/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py b/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py
index 6a1572deb2..fe6abee951 100644
--- a/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py
+++ b/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py
@@ -20,6 +20,8 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel):
controller (AbstractWorkfilesFrontend): The control object.
"""
+ refreshed = QtCore.Signal()
+
def __init__(self, controller):
super(WorkAreaFilesModel, self).__init__()
@@ -163,6 +165,12 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel):
self._fill_items()
def _fill_items(self):
+ try:
+ self._fill_items_impl()
+ finally:
+ self.refreshed.emit()
+
+ def _fill_items_impl(self):
folder_id = self._selected_folder_id
task_id = self._selected_task_id
if not folder_id or not task_id:
@@ -285,6 +293,7 @@ class WorkAreaFilesWidget(QtWidgets.QWidget):
selection_model.selectionChanged.connect(self._on_selection_change)
view.double_clicked.connect(self._on_mouse_double_click)
view.customContextMenuRequested.connect(self._on_context_menu)
+ model.refreshed.connect(self._on_model_refresh)
controller.register_event_callback(
"expected_selection_changed",
@@ -298,6 +307,7 @@ class WorkAreaFilesWidget(QtWidgets.QWidget):
self._controller = controller
self._published_mode = False
+ self._change_selection_on_refresh = True
def set_published_mode(self, published_mode):
"""Set the published mode.
@@ -379,7 +389,9 @@ class WorkAreaFilesWidget(QtWidgets.QWidget):
if not workfile_info["current"]:
return
+ self._change_selection_on_refresh = False
self._model.refresh()
+ self._change_selection_on_refresh = True
workfile_name = workfile_info["name"]
if (
@@ -394,3 +406,30 @@ class WorkAreaFilesWidget(QtWidgets.QWidget):
self._controller.expected_workfile_selected(
event["folder"]["id"], event["task"]["name"], workfile_name
)
+
+ def _on_model_refresh(self):
+ if (
+ not self._change_selection_on_refresh
+ or self._proxy_model.rowCount() < 1
+ ):
+ return
+
+ # Find the row with latest date modified
+ latest_index = max(
+ (
+ self._proxy_model.index(idx, 0)
+ for idx in range(self._proxy_model.rowCount())
+ ),
+ key=lambda model_index: model_index.data(DATE_MODIFIED_ROLE)
+ )
+
+ # Select row of latest modified
+ selection_model = self._view.selectionModel()
+ selection_model.select(
+ latest_index,
+ (
+ QtCore.QItemSelectionModel.ClearAndSelect
+ | QtCore.QItemSelectionModel.Current
+ | QtCore.QItemSelectionModel.Rows
+ )
+ )
diff --git a/client/ayon_core/tools/workfiles/widgets/window.py b/client/ayon_core/tools/workfiles/widgets/window.py
index 8a2617d270..1cfae7ec90 100644
--- a/client/ayon_core/tools/workfiles/widgets/window.py
+++ b/client/ayon_core/tools/workfiles/widgets/window.py
@@ -118,11 +118,11 @@ class WorkfilesToolWindow(QtWidgets.QWidget):
overlay_invalid_host = InvalidHostOverlay(self)
overlay_invalid_host.setVisible(False)
- first_show_timer = QtCore.QTimer()
- first_show_timer.setSingleShot(True)
- first_show_timer.setInterval(50)
+ show_timer = QtCore.QTimer()
+ show_timer.setSingleShot(True)
+ show_timer.setInterval(50)
- first_show_timer.timeout.connect(self._on_first_show)
+ show_timer.timeout.connect(self._on_show)
controller.register_event_callback(
"save_as.finished",
@@ -159,7 +159,7 @@ class WorkfilesToolWindow(QtWidgets.QWidget):
self._tasks_widget = tasks_widget
self._side_panel = side_panel
- self._first_show_timer = first_show_timer
+ self._show_timer = show_timer
self._post_init()
@@ -287,9 +287,9 @@ class WorkfilesToolWindow(QtWidgets.QWidget):
def showEvent(self, event):
super(WorkfilesToolWindow, self).showEvent(event)
+ self._show_timer.start()
if self._first_show:
self._first_show = False
- self._first_show_timer.start()
self.setStyleSheet(style.load_stylesheet())
def keyPressEvent(self, event):
@@ -303,9 +303,8 @@ class WorkfilesToolWindow(QtWidgets.QWidget):
pass
- def _on_first_show(self):
- if not self._controller_refreshed:
- self.refresh()
+ def _on_show(self):
+ self.refresh()
def _on_file_text_filter_change(self, text):
self._files_widget.set_text_filter(text)
diff --git a/pyproject.toml b/pyproject.toml
index dc8b312364..4726bef41a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -77,6 +77,20 @@ unfixable = []
# Allow unused variables when underscore-prefixed.
dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
+exclude = [
+ "client/ayon_core/hosts/unreal/integration/*",
+ "client/ayon_core/hosts/aftereffects/api/extension/js/libs/*",
+ "client/ayon_core/hosts/hiero/api/startup/*",
+ "client/ayon_core/modules/deadline/repository/custom/plugins/CelAction/*",
+ "client/ayon_core/modules/deadline/repository/custom/plugins/HarmonyAYON/*",
+ "client/ayon_core/modules/click_wrap.py",
+ "client/ayon_core/scripts/slates/__init__.py"
+]
+
+[tool.ruff.lint.per-file-ignores]
+"client/ayon_core/lib/__init__.py" = ["E402"]
+"client/ayon_core/hosts/max/startup/startup.py" = ["E402"]
+
[tool.ruff.format]
# Like Black, use double quotes for strings.
quote-style = "double"
@@ -92,7 +106,7 @@ line-ending = "auto"
[tool.codespell]
# Ignore words that are not in the dictionary.
-ignore-words-list = "ayon,ynput,parms,parm,hda,developpement"
+ignore-words-list = "ayon,ynput,parms,parm,hda,developpement,ue"
skip = "./.*,./package/*,*/vendor/*,*/unreal/integration/*,*/aftereffects/api/extension/js/libs/*"
count = true
diff --git a/server_addon/aftereffects/package.py b/server_addon/aftereffects/package.py
new file mode 100644
index 0000000000..a680b37602
--- /dev/null
+++ b/server_addon/aftereffects/package.py
@@ -0,0 +1,3 @@
+name = "aftereffects"
+title = "AfterEffects"
+version = "0.1.3"
diff --git a/server_addon/aftereffects/server/__init__.py b/server_addon/aftereffects/server/__init__.py
index e14e76e9db..76e6d5b2eb 100644
--- a/server_addon/aftereffects/server/__init__.py
+++ b/server_addon/aftereffects/server/__init__.py
@@ -1,14 +1,9 @@
from ayon_server.addons import BaseServerAddon
from .settings import AfterEffectsSettings, DEFAULT_AFTEREFFECTS_SETTING
-from .version import __version__
class AfterEffects(BaseServerAddon):
- name = "aftereffects"
- title = "AfterEffects"
- version = __version__
-
settings_model = AfterEffectsSettings
async def get_default_settings(self):
diff --git a/server_addon/aftereffects/server/version.py b/server_addon/aftereffects/server/version.py
deleted file mode 100644
index e57ad00718..0000000000
--- a/server_addon/aftereffects/server/version.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Package declaring addon version."""
-__version__ = "0.1.3"
diff --git a/server_addon/applications/client/ayon_applications/addon.py b/server_addon/applications/client/ayon_applications/addon.py
index 0f1b68af0e..a8eaa46cad 100644
--- a/server_addon/applications/client/ayon_applications/addon.py
+++ b/server_addon/applications/client/ayon_applications/addon.py
@@ -110,6 +110,26 @@ class ApplicationsAddon(AYONAddon, IPluginPaths):
]
}
+ def launch_application(
+ self, app_name, project_name, folder_path, task_name
+ ):
+ """Launch application.
+
+ Args:
+ app_name (str): Full application name e.g. 'maya/2024'.
+ project_name (str): Project name.
+ folder_path (str): Folder path.
+ task_name (str): Task name.
+
+ """
+ app_manager = self.get_applications_manager()
+ return app_manager.launch(
+ app_name,
+ project_name=project_name,
+ folder_path=folder_path,
+ task_name=task_name,
+ )
+
# --- CLI ---
def cli(self, addon_click_group):
main_group = click_wrap.group(
@@ -134,6 +154,17 @@ class ApplicationsAddon(AYONAddon, IPluginPaths):
default=None
)
)
+ (
+ main_group.command(
+ self._cli_launch_applications,
+ name="launch",
+ help="Launch application"
+ )
+ .option("--app", required=True, help="Application name")
+ .option("--project", required=True, help="Project name")
+ .option("--folder", required=True, help="Folder path")
+ .option("--task", required=True, help="Task name")
+ )
# Convert main command to click object and add it to parent group
addon_click_group.add_command(
main_group.to_click_obj()
@@ -171,3 +202,15 @@ class ApplicationsAddon(AYONAddon, IPluginPaths):
with open(output_json_path, "w") as file_stream:
json.dump(env, file_stream, indent=4)
+
+ def _cli_launch_applications(self, project, folder, task, app):
+ """Launch application.
+
+ Args:
+ project (str): Project name.
+ folder (str): Folder path.
+ task (str): Task name.
+ app (str): Full application name e.g. 'maya/2024'.
+
+ """
+ self.launch_application(app, project, folder, task)
diff --git a/server_addon/applications/package.py b/server_addon/applications/package.py
index ce312ed662..500f609fc6 100644
--- a/server_addon/applications/package.py
+++ b/server_addon/applications/package.py
@@ -1,3 +1,10 @@
name = "applications"
title = "Applications"
-version = "0.2.0"
+version = "0.2.1"
+
+ayon_server_version = ">=1.0.7"
+ayon_launcher_version = ">=1.0.2"
+ayon_required_addons = {
+ "core": ">0.3.0",
+}
+ayon_compatible_addons = {}
diff --git a/server_addon/blender/package.py b/server_addon/blender/package.py
new file mode 100644
index 0000000000..667076e533
--- /dev/null
+++ b/server_addon/blender/package.py
@@ -0,0 +1,3 @@
+name = "blender"
+title = "Blender"
+version = "0.1.8"
diff --git a/server_addon/blender/server/__init__.py b/server_addon/blender/server/__init__.py
index a7d6cb4400..b274e3bc29 100644
--- a/server_addon/blender/server/__init__.py
+++ b/server_addon/blender/server/__init__.py
@@ -2,17 +2,11 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import BlenderSettings, DEFAULT_VALUES
class BlenderAddon(BaseServerAddon):
- name = "blender"
- title = "Blender"
- version = __version__
settings_model: Type[BlenderSettings] = BlenderSettings
- frontend_scopes = {}
- services = {}
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()
diff --git a/server_addon/blender/server/version.py b/server_addon/blender/server/version.py
deleted file mode 100644
index 9cb17e7976..0000000000
--- a/server_addon/blender/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.8"
diff --git a/server_addon/celaction/package.py b/server_addon/celaction/package.py
new file mode 100644
index 0000000000..2b11a8630f
--- /dev/null
+++ b/server_addon/celaction/package.py
@@ -0,0 +1,3 @@
+name = "celaction"
+title = "CelAction"
+version = "0.1.0"
diff --git a/server_addon/celaction/server/__init__.py b/server_addon/celaction/server/__init__.py
index 90d3dbaa01..e3769a4b7f 100644
--- a/server_addon/celaction/server/__init__.py
+++ b/server_addon/celaction/server/__init__.py
@@ -2,17 +2,11 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import CelActionSettings, DEFAULT_VALUES
class CelActionAddon(BaseServerAddon):
- name = "celaction"
- title = "CelAction"
- version = __version__
settings_model: Type[CelActionSettings] = CelActionSettings
- frontend_scopes = {}
- services = {}
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()
diff --git a/server_addon/celaction/server/version.py b/server_addon/celaction/server/version.py
deleted file mode 100644
index 3dc1f76bc6..0000000000
--- a/server_addon/celaction/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.0"
diff --git a/server_addon/clockify/package.py b/server_addon/clockify/package.py
new file mode 100644
index 0000000000..bcf9425b3f
--- /dev/null
+++ b/server_addon/clockify/package.py
@@ -0,0 +1,3 @@
+name = "clockify"
+title = "Clockify"
+version = "0.1.1"
diff --git a/server_addon/clockify/server/__init__.py b/server_addon/clockify/server/__init__.py
index 0fa453fdf4..11bbfed261 100644
--- a/server_addon/clockify/server/__init__.py
+++ b/server_addon/clockify/server/__init__.py
@@ -2,14 +2,8 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import ClockifySettings
class ClockifyAddon(BaseServerAddon):
- name = "clockify"
- title = "Clockify"
- version = __version__
settings_model: Type[ClockifySettings] = ClockifySettings
- frontend_scopes = {}
- services = {}
diff --git a/server_addon/clockify/server/version.py b/server_addon/clockify/server/version.py
deleted file mode 100644
index 485f44ac21..0000000000
--- a/server_addon/clockify/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.1"
diff --git a/server_addon/create_ayon_addons.py b/server_addon/create_ayon_addons.py
index bfd601af07..f0a36d4740 100644
--- a/server_addon/create_ayon_addons.py
+++ b/server_addon/create_ayon_addons.py
@@ -5,7 +5,7 @@ import shutil
import argparse
import zipfile
import types
-import importlib
+import importlib.machinery
import platform
import collections
from pathlib import Path
@@ -245,12 +245,8 @@ def create_addon_package(
keep_source: bool,
):
src_package_py = addon_dir / "package.py"
- package = None
- if src_package_py.exists():
- package = import_filepath(src_package_py)
- addon_version = package.version
- else:
- addon_version = get_addon_version(addon_dir)
+ package = import_filepath(src_package_py)
+ addon_version = package.version
addon_output_dir = output_dir / addon_dir.name / addon_version
if addon_output_dir.exists():
@@ -259,18 +255,7 @@ def create_addon_package(
# Copy server content
dst_package_py = addon_output_dir / "package.py"
- if package is not None:
- shutil.copy(src_package_py, dst_package_py)
- else:
- addon_name = addon_dir.name
- if addon_name == "royal_render":
- addon_name = "royalrender"
- package_py_content = PACKAGE_PY_TEMPLATE.format(
- addon_name=addon_name, addon_version=addon_version
- )
-
- with open(dst_package_py, "w+") as pkg_py:
- pkg_py.write(package_py_content)
+ shutil.copy(src_package_py, dst_package_py)
server_dir = addon_dir / "server"
shutil.copytree(
diff --git a/server_addon/deadline/package.py b/server_addon/deadline/package.py
new file mode 100644
index 0000000000..944797fea6
--- /dev/null
+++ b/server_addon/deadline/package.py
@@ -0,0 +1,3 @@
+name = "deadline"
+title = "Deadline"
+version = "0.1.10"
diff --git a/server_addon/deadline/server/__init__.py b/server_addon/deadline/server/__init__.py
index 36d04189a9..e7dcb7d347 100644
--- a/server_addon/deadline/server/__init__.py
+++ b/server_addon/deadline/server/__init__.py
@@ -2,14 +2,10 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import DeadlineSettings, DEFAULT_VALUES
class Deadline(BaseServerAddon):
- name = "deadline"
- title = "Deadline"
- version = __version__
settings_model: Type[DeadlineSettings] = DeadlineSettings
async def get_default_settings(self):
diff --git a/server_addon/deadline/server/settings/main.py b/server_addon/deadline/server/settings/main.py
index 83c7567c0d..21a314cd2f 100644
--- a/server_addon/deadline/server/settings/main.py
+++ b/server_addon/deadline/server/settings/main.py
@@ -22,7 +22,7 @@ class ServerListSubmodel(BaseSettingsModel):
async def defined_deadline_ws_name_enum_resolver(
- addon: BaseServerAddon,
+ addon: "BaseServerAddon",
settings_variant: str = "production",
project_name: str | None = None,
) -> list[str]:
diff --git a/server_addon/deadline/server/version.py b/server_addon/deadline/server/version.py
deleted file mode 100644
index 569b1212f7..0000000000
--- a/server_addon/deadline/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.10"
diff --git a/server_addon/flame/package.py b/server_addon/flame/package.py
new file mode 100644
index 0000000000..8c077ed91d
--- /dev/null
+++ b/server_addon/flame/package.py
@@ -0,0 +1,3 @@
+name = "flame"
+title = "Flame"
+version = "0.1.0"
diff --git a/server_addon/flame/server/__init__.py b/server_addon/flame/server/__init__.py
index 7d5eb3960f..4aa46617ee 100644
--- a/server_addon/flame/server/__init__.py
+++ b/server_addon/flame/server/__init__.py
@@ -2,17 +2,11 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import FlameSettings, DEFAULT_VALUES
class FlameAddon(BaseServerAddon):
- name = "flame"
- title = "Flame"
- version = __version__
settings_model: Type[FlameSettings] = FlameSettings
- frontend_scopes = {}
- services = {}
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()
diff --git a/server_addon/flame/server/version.py b/server_addon/flame/server/version.py
deleted file mode 100644
index 3dc1f76bc6..0000000000
--- a/server_addon/flame/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.0"
diff --git a/server_addon/fusion/package.py b/server_addon/fusion/package.py
new file mode 100644
index 0000000000..9e7a46df2c
--- /dev/null
+++ b/server_addon/fusion/package.py
@@ -0,0 +1,3 @@
+name = "fusion"
+title = "Fusion"
+version = "0.1.5"
diff --git a/server_addon/fusion/server/__init__.py b/server_addon/fusion/server/__init__.py
index 4d43f28812..0456cfd5ee 100644
--- a/server_addon/fusion/server/__init__.py
+++ b/server_addon/fusion/server/__init__.py
@@ -2,17 +2,11 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import FusionSettings, DEFAULT_VALUES
class FusionAddon(BaseServerAddon):
- name = "fusion"
- title = "Fusion"
- version = __version__
settings_model: Type[FusionSettings] = FusionSettings
- frontend_scopes = {}
- services = {}
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()
diff --git a/server_addon/fusion/server/version.py b/server_addon/fusion/server/version.py
deleted file mode 100644
index 1276d0254f..0000000000
--- a/server_addon/fusion/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.5"
diff --git a/server_addon/harmony/package.py b/server_addon/harmony/package.py
new file mode 100644
index 0000000000..83e88e7d57
--- /dev/null
+++ b/server_addon/harmony/package.py
@@ -0,0 +1,3 @@
+name = "harmony"
+title = "Harmony"
+version = "0.1.2"
diff --git a/server_addon/harmony/server/__init__.py b/server_addon/harmony/server/__init__.py
index 4ecda1989e..154618241e 100644
--- a/server_addon/harmony/server/__init__.py
+++ b/server_addon/harmony/server/__init__.py
@@ -1,14 +1,9 @@
from ayon_server.addons import BaseServerAddon
from .settings import HarmonySettings, DEFAULT_HARMONY_SETTING
-from .version import __version__
class Harmony(BaseServerAddon):
- name = "harmony"
- title = "Harmony"
- version = __version__
-
settings_model = HarmonySettings
async def get_default_settings(self):
diff --git a/server_addon/harmony/server/version.py b/server_addon/harmony/server/version.py
deleted file mode 100644
index df0c92f1e2..0000000000
--- a/server_addon/harmony/server/version.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Package declaring addon version."""
-__version__ = "0.1.2"
diff --git a/server_addon/hiero/package.py b/server_addon/hiero/package.py
new file mode 100644
index 0000000000..54c2f74fa7
--- /dev/null
+++ b/server_addon/hiero/package.py
@@ -0,0 +1,3 @@
+name = "hiero"
+title = "Hiero"
+version = "0.1.3"
diff --git a/server_addon/hiero/server/__init__.py b/server_addon/hiero/server/__init__.py
index d0f9bcefc3..3db78eafd7 100644
--- a/server_addon/hiero/server/__init__.py
+++ b/server_addon/hiero/server/__init__.py
@@ -2,17 +2,11 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import HieroSettings, DEFAULT_VALUES
class HieroAddon(BaseServerAddon):
- name = "hiero"
- title = "Hiero"
- version = __version__
settings_model: Type[HieroSettings] = HieroSettings
- frontend_scopes = {}
- services = {}
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()
diff --git a/server_addon/hiero/server/settings/imageio.py b/server_addon/hiero/server/settings/imageio.py
index f2bc71ac33..9e15e15597 100644
--- a/server_addon/hiero/server/settings/imageio.py
+++ b/server_addon/hiero/server/settings/imageio.py
@@ -149,15 +149,15 @@ class ImageIOSettings(BaseSettingsModel):
DEFAULT_IMAGEIO_SETTINGS = {
"workfile": {
- "ocioConfigName": "nuke-default",
- "workingSpace": "linear",
- "viewerLut": "sRGB",
- "eightBitLut": "sRGB",
- "sixteenBitLut": "sRGB",
- "logLut": "Cineon",
- "floatLut": "linear",
- "thumbnailLut": "sRGB",
- "monitorOutLut": "sRGB"
+ "ocioConfigName": "aces_1.2",
+ "workingSpace": "role_scene_linear",
+ "viewerLut": "ACES/sRGB",
+ "eightBitLut": "role_matte_paint",
+ "sixteenBitLut": "role_texture_paint",
+ "logLut": "role_compositing_log",
+ "floatLut": "role_scene_linear",
+ "thumbnailLut": "ACES/sRGB",
+ "monitorOutLut": "ACES/sRGB"
},
"regexInputs": {
"inputs": [
diff --git a/server_addon/hiero/server/version.py b/server_addon/hiero/server/version.py
deleted file mode 100644
index b3f4756216..0000000000
--- a/server_addon/hiero/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.2"
diff --git a/server_addon/houdini/package.py b/server_addon/houdini/package.py
new file mode 100644
index 0000000000..4e441c76ae
--- /dev/null
+++ b/server_addon/houdini/package.py
@@ -0,0 +1,3 @@
+name = "houdini"
+title = "Houdini"
+version = "0.2.13"
diff --git a/server_addon/houdini/server/__init__.py b/server_addon/houdini/server/__init__.py
index 870ec2d0b7..8c1ffcb0b3 100644
--- a/server_addon/houdini/server/__init__.py
+++ b/server_addon/houdini/server/__init__.py
@@ -2,14 +2,10 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import HoudiniSettings, DEFAULT_VALUES
class Houdini(BaseServerAddon):
- name = "houdini"
- title = "Houdini"
- version = __version__
settings_model: Type[HoudiniSettings] = HoudiniSettings
async def get_default_settings(self):
diff --git a/server_addon/houdini/server/settings/imageio.py b/server_addon/houdini/server/settings/imageio.py
index f4850c5df7..c4f4813d51 100644
--- a/server_addon/houdini/server/settings/imageio.py
+++ b/server_addon/houdini/server/settings/imageio.py
@@ -34,6 +34,34 @@ class ImageIOFileRulesModel(BaseSettingsModel):
return value
+class WorkfileImageIOModel(BaseSettingsModel):
+ """Workfile settings help.
+
+ Empty values will be skipped, allowing any existing env vars to
+ pass through as defined.
+
+ Note: The render space in Houdini is
+ always set to the 'scene_linear' role."""
+
+ enabled: bool = SettingsField(False, title="Enabled")
+ default_display: str = SettingsField(
+ title="Default active displays",
+ description="It behaves like the 'OCIO_ACTIVE_DISPLAYS' env var,"
+ " Colon-separated list of displays, e.g ACES:P3"
+ )
+ default_view: str = SettingsField(
+ title="Default active views",
+ description="It behaves like the 'OCIO_ACTIVE_VIEWS' env var,"
+ " Colon-separated list of views, e.g sRGB:DCDM"
+ )
+ review_color_space: str = SettingsField(
+ title="Review colorspace",
+ description="It exposes OCIO Colorspace parameter in opengl nodes."
+ "if left empty, Ayon will figure out the default "
+ "colorspace using your default display and default view."
+ )
+
+
class HoudiniImageIOModel(BaseSettingsModel):
activate_host_color_management: bool = SettingsField(
True, title="Enable Color Management"
@@ -46,3 +74,26 @@ class HoudiniImageIOModel(BaseSettingsModel):
default_factory=ImageIOFileRulesModel,
title="File Rules"
)
+ workfile: WorkfileImageIOModel = SettingsField(
+ default_factory=WorkfileImageIOModel,
+ title="Workfile"
+ )
+
+
+DEFAULT_IMAGEIO_SETTINGS = {
+ "activate_host_color_management": False,
+ "ocio_config": {
+ "override_global_config": False,
+ "filepath": []
+ },
+ "file_rules": {
+ "activate_host_rules": False,
+ "rules": []
+ },
+ "workfile": {
+ "enabled": False,
+ "default_display": "ACES",
+ "default_view": "sRGB",
+ "review_color_space": ""
+ }
+}
diff --git a/server_addon/houdini/server/settings/main.py b/server_addon/houdini/server/settings/main.py
index cbb19d15b7..3acab0ce74 100644
--- a/server_addon/houdini/server/settings/main.py
+++ b/server_addon/houdini/server/settings/main.py
@@ -3,7 +3,10 @@ from .general import (
GeneralSettingsModel,
DEFAULT_GENERAL_SETTINGS
)
-from .imageio import HoudiniImageIOModel
+from .imageio import (
+ HoudiniImageIOModel,
+ DEFAULT_IMAGEIO_SETTINGS
+)
from .shelves import ShelvesModel
from .create import (
CreatePluginsModel,
@@ -40,6 +43,7 @@ class HoudiniSettings(BaseSettingsModel):
DEFAULT_VALUES = {
"general": DEFAULT_GENERAL_SETTINGS,
+ "imageio": DEFAULT_IMAGEIO_SETTINGS,
"shelves": [],
"create": DEFAULT_HOUDINI_CREATE_SETTINGS,
"publish": DEFAULT_HOUDINI_PUBLISH_SETTINGS
diff --git a/server_addon/houdini/server/version.py b/server_addon/houdini/server/version.py
deleted file mode 100644
index b5c9b6cb71..0000000000
--- a/server_addon/houdini/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.2.12"
diff --git a/server_addon/max/package.py b/server_addon/max/package.py
new file mode 100644
index 0000000000..fb1f1b3050
--- /dev/null
+++ b/server_addon/max/package.py
@@ -0,0 +1,3 @@
+name = "max"
+title = "Max"
+version = "0.1.7"
diff --git a/server_addon/max/server/__init__.py b/server_addon/max/server/__init__.py
index 31c694a084..d03b29d249 100644
--- a/server_addon/max/server/__init__.py
+++ b/server_addon/max/server/__init__.py
@@ -2,14 +2,10 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import MaxSettings, DEFAULT_VALUES
class MaxAddon(BaseServerAddon):
- name = "max"
- title = "Max"
- version = __version__
settings_model: Type[MaxSettings] = MaxSettings
async def get_default_settings(self):
diff --git a/server_addon/max/server/version.py b/server_addon/max/server/version.py
deleted file mode 100644
index f1380eede2..0000000000
--- a/server_addon/max/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.7"
diff --git a/server_addon/maya/package.py b/server_addon/maya/package.py
new file mode 100644
index 0000000000..5c6ce923aa
--- /dev/null
+++ b/server_addon/maya/package.py
@@ -0,0 +1,3 @@
+name = "maya"
+title = "Maya"
+version = "0.1.17"
diff --git a/server_addon/maya/server/__init__.py b/server_addon/maya/server/__init__.py
index 8784427dcf..6dda2cdd77 100644
--- a/server_addon/maya/server/__init__.py
+++ b/server_addon/maya/server/__init__.py
@@ -2,13 +2,9 @@
from ayon_server.addons import BaseServerAddon
from .settings.main import MayaSettings, DEFAULT_MAYA_SETTING
-from .version import __version__
class MayaAddon(BaseServerAddon):
- name = "maya"
- title = "Maya"
- version = __version__
settings_model = MayaSettings
async def get_default_settings(self):
diff --git a/server_addon/maya/server/settings/loaders.py b/server_addon/maya/server/settings/loaders.py
index f59711b1e6..2f104d2858 100644
--- a/server_addon/maya/server/settings/loaders.py
+++ b/server_addon/maya/server/settings/loaders.py
@@ -103,6 +103,17 @@ class ImportLoaderModel(BaseSettingsModel):
group_name: str = SettingsField(title="Group name")
+class YetiRigLoaderModel(LoaderEnabledModel):
+ create_cache_instance_on_load: bool = SettingsField(
+ title="Create Yeti Cache instance on load",
+ description=(
+ "When enabled, upon loading a Yeti Rig product a new Yeti cache "
+ "instance is automatically created as preparation to publishing "
+ "the output directly."
+ )
+ )
+
+
class LoadersModel(BaseSettingsModel):
colors: ColorsSetting = SettingsField(
default_factory=ColorsSetting,
@@ -195,8 +206,8 @@ class LoadersModel(BaseSettingsModel):
default_factory=LoaderEnabledModel,
title="Yeti Cache Loader"
)
- YetiRigLoader: LoaderEnabledModel = SettingsField(
- default_factory=LoaderEnabledModel,
+ YetiRigLoader: YetiRigLoaderModel = SettingsField(
+ default_factory=YetiRigLoaderModel,
title="Yeti Rig Loader"
)
@@ -266,5 +277,8 @@ DEFAULT_LOADERS_SETTING = {
"VRaySceneLoader": {"enabled": True},
"XgenLoader": {"enabled": True},
"YetiCacheLoader": {"enabled": True},
- "YetiRigLoader": {"enabled": True},
+ "YetiRigLoader": {
+ "enabled": True,
+ "create_cache_instance_on_load": True
+ },
}
diff --git a/server_addon/maya/server/settings/publishers.py b/server_addon/maya/server/settings/publishers.py
index 27288053a2..8dcffbb59a 100644
--- a/server_addon/maya/server/settings/publishers.py
+++ b/server_addon/maya/server/settings/publishers.py
@@ -35,6 +35,51 @@ def angular_unit_enum():
]
+def extract_alembic_data_format_enum():
+ return [
+ {"label": "ogawa", "value": "ogawa"},
+ {"label": "HDF", "value": "HDF"}
+ ]
+
+
+def extract_alembic_overrides_enum():
+ return [
+ {"label": "Custom Attributes", "value": "attr"},
+ {"label": "Custom Attributes Prefix", "value": "attrPrefix"},
+ {"label": "Auto Subd", "value": "autoSubd"},
+ {"label": "Data Format", "value": "dataFormat"},
+ {"label": "Euler Filter", "value": "eulerFilter"},
+ {"label": "Mel Per Frame Callback", "value": "melPerFrameCallback"},
+ {"label": "Mel Post Job Callback", "value": "melPostJobCallback"},
+ {"label": "Pre Roll", "value": "preRoll"},
+ {"label": "Pre Roll Start Frame", "value": "preRollStartFrame"},
+ {
+ "label": "Python Per Frame Callback",
+ "value": "pythonPerFrameCallback"
+ },
+ {
+ "label": "Python Post Job Callback",
+ "value": "pythonPostJobCallback"
+ },
+ {"label": "Renderable Only", "value": "renderableOnly"},
+ {"label": "Strip Namespaces", "value": "stripNamespaces"},
+ {"label": "User Attr", "value": "userAttr"},
+ {"label": "User Attr Prefix", "value": "userAttrPrefix"},
+ {"label": "UV Write", "value": "uvWrite"},
+ {"label": "UVs Only", "value": "uvsOnly"},
+ {"label": "Verbose", "value": "verbose"},
+ {"label": "Visible Only", "value": "visibleOnly"},
+ {"label": "Whole Frame Geo", "value": "wholeFrameGeo"},
+ {"label": "World Space", "value": "worldSpace"},
+ {"label": "Write Color Sets", "value": "writeColorSets"},
+ {"label": "Write Creases", "value": "writeCreases"},
+ {"label": "Write Face Sets", "value": "writeFaceSets"},
+ {"label": "Write Normals", "value": "writeNormals"},
+ {"label": "Write UV Sets", "value": "writeUVSets"},
+ {"label": "Write Visibility", "value": "writeVisibility"}
+ ]
+
+
class BasicValidateModel(BaseSettingsModel):
enabled: bool = SettingsField(title="Enabled")
optional: bool = SettingsField(title="Optional")
@@ -299,6 +344,115 @@ class ExtractAlembicModel(BaseSettingsModel):
families: list[str] = SettingsField(
default_factory=list,
title="Families")
+ autoSubd: bool = SettingsField(
+ title="Auto Subd",
+ description=(
+ "If this flag is present and the mesh has crease edges, crease "
+ "vertices or holes, the mesh (OPolyMesh) would now be written out "
+ "as an OSubD and crease info will be stored in the Alembic file. "
+ "Otherwise, creases info won't be preserved in Alembic file unless"
+ " a custom Boolean attribute SubDivisionMesh has been added to "
+ "mesh node and its value is true."
+ )
+ )
+ eulerFilter: bool = SettingsField(
+ title="Euler Filter",
+ description="Apply Euler filter while sampling rotations."
+ )
+ renderableOnly: bool = SettingsField(
+ title="Renderable Only",
+ description="Only export renderable visible shapes."
+ )
+ stripNamespaces: bool = SettingsField(
+ title="Strip Namespaces",
+ description=(
+ "Namespaces will be stripped off of the node before being written "
+ "to Alembic."
+ )
+ )
+ uvsOnly: bool = SettingsField(
+ title="UVs Only",
+ description=(
+ "If this flag is present, only uv data for PolyMesh and SubD "
+ "shapes will be written to the Alembic file."
+ )
+ )
+ uvWrite: bool = SettingsField(
+ title="UV Write",
+ description=(
+ "Uv data for PolyMesh and SubD shapes will be written to the "
+ "Alembic file."
+ )
+ )
+ verbose: bool = SettingsField(
+ title="Verbose",
+ description="Prints the current frame that is being evaluated."
+ )
+ visibleOnly: bool = SettingsField(
+ title="Visible Only",
+ description="Only export dag objects visible during frame range."
+ )
+ wholeFrameGeo: bool = SettingsField(
+ title="Whole Frame Geo",
+ description=(
+ "Data for geometry will only be written out on whole frames."
+ )
+ )
+ worldSpace: bool = SettingsField(
+ title="World Space",
+ description="Any root nodes will be stored in world space."
+ )
+ writeColorSets: bool = SettingsField(
+ title="Write Color Sets",
+ description="Write vertex colors with the geometry."
+ )
+ writeFaceSets: bool = SettingsField(
+ title="Write Face Sets",
+ description="Write face sets with the geometry."
+ )
+ writeNormals: bool = SettingsField(
+ title="Write Normals",
+ description="Write normals with the deforming geometry."
+ )
+ writeUVSets: bool = SettingsField(
+ title="Write UV Sets",
+ description=(
+ "Write all uv sets on MFnMeshes as vector 2 indexed geometry "
+ "parameters with face varying scope."
+ )
+ )
+ writeVisibility: bool = SettingsField(
+ title="Write Visibility",
+ description=(
+ "Visibility state will be stored in the Alembic file. Otherwise "
+ "everything written out is treated as visible."
+ )
+ )
+ preRoll: bool = SettingsField(
+ title="Pre Roll",
+ description=(
+ "When enabled, the pre roll start frame is used to pre roll the "
+ "When enabled, the pre roll start frame is used to being the "
+ "evaluation of the mesh. From the pre roll start frame to the "
+ "alembic start frame, will not be written to disk. This can be "
+ "used for simulation run up."
+ )
+ )
+ preRollStartFrame: int = SettingsField(
+ title="Pre Roll Start Frame",
+ description=(
+ "The frame to start scene evaluation at. This is used to set the "
+ "starting frame for time dependent translations and can be used to"
+ " evaluate run-up that isn't actually translated.\n"
+ "NOTE: Pre Roll needs to be enabled for this start frame "
+ "to be considered."
+ )
+ )
+ dataFormat: str = SettingsField(
+ enum_resolver=extract_alembic_data_format_enum,
+ title="Data Format",
+ description="The data format to use to write the file."
+ )
bake_attributes: list[str] = SettingsField(
default_factory=list, title="Bake Attributes",
description="List of attributes that will be included in the alembic "
@@ -309,6 +463,73 @@ class ExtractAlembicModel(BaseSettingsModel):
description="List of attribute prefixes for attributes that will be "
"included in the alembic export.",
)
+ attr: str = SettingsField(
+ title="Custom Attributes",
+ placeholder="attr1;attr2",
+ description=(
+ "Attributes matching by name will be included in the Alembic "
+ "export. Attributes should be separated by semi-colon `;`"
+ )
+ )
+ attrPrefix: str = SettingsField(
+ title="Custom Attributes Prefix",
+ placeholder="prefix1;prefix2",
+ description=(
+ "Attributes starting with these prefixes will be included in the "
+ "Alembic export. Attributes should be separated by semi-colon `;`"
+ )
+ )
+ userAttr: str = SettingsField(
+ title="User Attr",
+ placeholder="attr1;attr2",
+ description=(
+ "Attributes matching by name will be included in the Alembic "
+ "export. Attributes should be separated by semi-colon `;`"
+ )
+ )
+ userAttrPrefix: str = SettingsField(
+ title="User Attr Prefix",
+ placeholder="prefix1;prefix2",
+ description=(
+ "Attributes starting with these prefixes will be included in the "
+ "Alembic export. Attributes should be separated by semi-colon `;`"
+ )
+ )
+ melPerFrameCallback: str = SettingsField(
+ title="Mel Per Frame Callback",
+ description=(
+ "When each frame (and the static frame) is evaluated the string "
+ "specified is evaluated as a Mel command."
+ )
+ )
+ melPostJobCallback: str = SettingsField(
+ title="Mel Post Job Callback",
+ description=(
+ "When the translation has finished the string specified is "
+ "evaluated as a Mel command."
+ )
+ )
+ pythonPerFrameCallback: str = SettingsField(
+ title="Python Per Frame Callback",
+ description=(
+ "When each frame (and the static frame) is evaluated the string "
+ "specified is evaluated as a python command."
+ )
+ )
+ pythonPostJobCallback: str = SettingsField(
+ title="Python Post Job Callback",
+ description=(
+ "When the translation has finished the string specified is "
+ "evaluated as a python command."
+ )
+ )
+ overrides: list[str] = SettingsField(
+ enum_resolver=extract_alembic_overrides_enum,
+ title="Exposed Overrides",
+ description=(
+ "Expose the attribute in this list to the user when publishing."
+ )
+ )
class ExtractObjModel(BaseSettingsModel):
@@ -668,15 +889,19 @@ class PublishersModel(BaseSettingsModel):
default_factory=BasicValidateModel,
title="Validate Alembic Visible Node",
)
+ ValidateAlembicDefaultsPointcache: BasicValidateModel = SettingsField(
+ default_factory=BasicValidateModel,
+ title="Validate Alembic Defaults Pointcache"
+ )
+ ValidateAlembicDefaultsAnimation: BasicValidateModel = SettingsField(
+ default_factory=BasicValidateModel,
+ title="Validate Alembic Defaults Animation"
+ )
ExtractProxyAlembic: ExtractProxyAlembicModel = SettingsField(
default_factory=ExtractProxyAlembicModel,
title="Extract Proxy Alembic",
section="Model Extractors",
)
- ExtractAlembic: ExtractAlembicModel = SettingsField(
- default_factory=ExtractAlembicModel,
- title="Extract Alembic",
- )
ExtractObj: ExtractObjModel = SettingsField(
default_factory=ExtractObjModel,
title="Extract OBJ"
@@ -811,6 +1036,10 @@ class PublishersModel(BaseSettingsModel):
default_factory=ExtractModelModel,
title="Extract Model (Maya Scene)"
)
+ ExtractAlembic: ExtractAlembicModel = SettingsField(
+ default_factory=ExtractAlembicModel,
+ title="Extract Alembic"
+ )
DEFAULT_SUFFIX_NAMING = {
@@ -1200,16 +1429,6 @@ DEFAULT_PUBLISH_SETTINGS = {
"proxyAbc"
]
},
- "ExtractAlembic": {
- "enabled": True,
- "families": [
- "pointcache",
- "model",
- "vrayproxy.alembic"
- ],
- "bake_attributes": [],
- "bake_attribute_prefixes": []
- },
"ExtractObj": {
"enabled": False,
"optional": True,
@@ -1330,6 +1549,16 @@ DEFAULT_PUBLISH_SETTINGS = {
"optional": False,
"validate_shapes": True
},
+ "ValidateAlembicDefaultsPointcache": {
+ "enabled": True,
+ "optional": True,
+ "active": True
+ },
+ "ValidateAlembicDefaultsAnimation": {
+ "enabled": True,
+ "optional": True,
+ "active": True
+ },
"ExtractPlayblast": DEFAULT_PLAYBLAST_SETTING,
"ExtractMayaSceneRaw": {
"enabled": True,
@@ -1371,6 +1600,52 @@ DEFAULT_PUBLISH_SETTINGS = {
"ExtractModel": {
"enabled": True,
"optional": True,
- "active": True,
+ "active": True
+ },
+ "ExtractAlembic": {
+ "enabled": True,
+ "families": [
+ "pointcache",
+ "model",
+ "vrayproxy.alembic"
+ ],
+ "attr": "",
+ "attrPrefix": "",
+ "autoSubd": False,
+ "bake_attributes": [],
+ "bake_attribute_prefixes": [],
+ "dataFormat": "ogawa",
+ "eulerFilter": False,
+ "melPerFrameCallback": "",
+ "melPostJobCallback": "",
+ "overrides": [
+ "attr",
+ "attrPrefix",
+ "renderableOnly",
+ "visibleOnly",
+ "worldSpace",
+ "writeColorSets",
+ "writeFaceSets",
+ "writeNormals"
+ ],
+ "preRoll": False,
+ "preRollStartFrame": 0,
+ "pythonPerFrameCallback": "",
+ "pythonPostJobCallback": "",
+ "renderableOnly": False,
+ "stripNamespaces": True,
+ "uvsOnly": False,
+ "uvWrite": False,
+ "userAttr": "",
+ "userAttrPrefix": "",
+ "verbose": False,
+ "visibleOnly": False,
+ "wholeFrameGeo": False,
+ "worldSpace": True,
+ "writeColorSets": False,
+ "writeFaceSets": False,
+ "writeNormals": True,
+ "writeUVSets": False,
+ "writeVisibility": False
}
}
diff --git a/server_addon/maya/server/version.py b/server_addon/maya/server/version.py
deleted file mode 100644
index 75b463f198..0000000000
--- a/server_addon/maya/server/version.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Package declaring addon version."""
-__version__ = "0.1.15"
diff --git a/server_addon/nuke/package.py b/server_addon/nuke/package.py
new file mode 100644
index 0000000000..bf03c4e7e7
--- /dev/null
+++ b/server_addon/nuke/package.py
@@ -0,0 +1,3 @@
+name = "nuke"
+title = "Nuke"
+version = "0.1.11"
diff --git a/server_addon/nuke/server/__init__.py b/server_addon/nuke/server/__init__.py
index 032ceea5fb..aeb5e36675 100644
--- a/server_addon/nuke/server/__init__.py
+++ b/server_addon/nuke/server/__init__.py
@@ -2,14 +2,10 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import NukeSettings, DEFAULT_VALUES
class NukeAddon(BaseServerAddon):
- name = "nuke"
- title = "Nuke"
- version = __version__
settings_model: Type[NukeSettings] = NukeSettings
async def get_default_settings(self):
diff --git a/server_addon/nuke/server/settings/imageio.py b/server_addon/nuke/server/settings/imageio.py
index 1b84457133..9cdb0bf1d7 100644
--- a/server_addon/nuke/server/settings/imageio.py
+++ b/server_addon/nuke/server/settings/imageio.py
@@ -97,8 +97,23 @@ class WorkfileColorspaceSettings(BaseSettingsModel):
working_space: str = SettingsField(
title="Working Space"
)
- thumbnail_space: str = SettingsField(
- title="Thumbnail Space"
+ monitor_lut: str = SettingsField(
+ title="Thumbnails"
+ )
+ monitor_out_lut: str = SettingsField(
+ title="Monitor Out"
+ )
+ int_8_lut: str = SettingsField(
+ title="8-bit Files"
+ )
+ int_16_lut: str = SettingsField(
+ title="16-bit Files"
+ )
+ log_lut: str = SettingsField(
+ title="Log Files"
+ )
+ float_lut: str = SettingsField(
+ title="Float Files"
)
@@ -120,6 +135,9 @@ class ViewProcessModel(BaseSettingsModel):
viewerProcess: str = SettingsField(
title="Viewer Process Name"
)
+ output_transform: str = SettingsField(
+ title="Output Transform"
+ )
class ImageIOConfigModel(BaseSettingsModel):
@@ -214,16 +232,23 @@ class ImageIOSettings(BaseSettingsModel):
DEFAULT_IMAGEIO_SETTINGS = {
"viewer": {
- "viewerProcess": "sRGB (default)"
+ "viewerProcess": "ACES/sRGB",
+ "output_transform": "ACES/sRGB"
},
"baking": {
- "viewerProcess": "rec709 (default)"
+ "viewerProcess": "ACES/Rec.709",
+ "output_transform": "ACES/Rec.709"
},
"workfile": {
"color_management": "OCIO",
- "native_ocio_config": "nuke-default",
- "working_space": "scene_linear",
- "thumbnail_space": "sRGB (default)",
+ "native_ocio_config": "aces_1.2",
+ "working_space": "role_scene_linear",
+ "monitor_lut": "ACES/sRGB",
+ "monitor_out_lut": "ACES/sRGB",
+ "int_8_lut": "role_matte_paint",
+ "int_16_lut": "role_texture_paint",
+ "log_lut": "role_compositing_log",
+ "float_lut": "role_scene_linear"
},
"nodes": {
"required_nodes": [
diff --git a/server_addon/nuke/server/version.py b/server_addon/nuke/server/version.py
deleted file mode 100644
index 569b1212f7..0000000000
--- a/server_addon/nuke/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.10"
diff --git a/server_addon/photoshop/package.py b/server_addon/photoshop/package.py
new file mode 100644
index 0000000000..25615529d1
--- /dev/null
+++ b/server_addon/photoshop/package.py
@@ -0,0 +1,3 @@
+name = "photoshop"
+title = "Photoshop"
+version = "0.1.2"
diff --git a/server_addon/photoshop/server/__init__.py b/server_addon/photoshop/server/__init__.py
index 3a45f7a809..86d1025a2d 100644
--- a/server_addon/photoshop/server/__init__.py
+++ b/server_addon/photoshop/server/__init__.py
@@ -1,14 +1,9 @@
from ayon_server.addons import BaseServerAddon
from .settings import PhotoshopSettings, DEFAULT_PHOTOSHOP_SETTING
-from .version import __version__
class Photoshop(BaseServerAddon):
- name = "photoshop"
- title = "Photoshop"
- version = __version__
-
settings_model = PhotoshopSettings
async def get_default_settings(self):
diff --git a/server_addon/photoshop/server/version.py b/server_addon/photoshop/server/version.py
deleted file mode 100644
index df0c92f1e2..0000000000
--- a/server_addon/photoshop/server/version.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Package declaring addon version."""
-__version__ = "0.1.2"
diff --git a/server_addon/resolve/package.py b/server_addon/resolve/package.py
new file mode 100644
index 0000000000..cf92413bce
--- /dev/null
+++ b/server_addon/resolve/package.py
@@ -0,0 +1,3 @@
+name = "resolve"
+title = "DaVinci Resolve"
+version = "0.1.0"
diff --git a/server_addon/resolve/server/__init__.py b/server_addon/resolve/server/__init__.py
index a84180d0f5..35d2db19e4 100644
--- a/server_addon/resolve/server/__init__.py
+++ b/server_addon/resolve/server/__init__.py
@@ -2,17 +2,11 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import ResolveSettings, DEFAULT_VALUES
class ResolveAddon(BaseServerAddon):
- name = "resolve"
- title = "DaVinci Resolve"
- version = __version__
settings_model: Type[ResolveSettings] = ResolveSettings
- frontend_scopes = {}
- services = {}
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()
diff --git a/server_addon/resolve/server/version.py b/server_addon/resolve/server/version.py
deleted file mode 100644
index 3dc1f76bc6..0000000000
--- a/server_addon/resolve/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.0"
diff --git a/server_addon/royal_render/server/version.py b/server_addon/royal_render/server/version.py
deleted file mode 100644
index 485f44ac21..0000000000
--- a/server_addon/royal_render/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.1"
diff --git a/server_addon/royalrender/package.py b/server_addon/royalrender/package.py
new file mode 100644
index 0000000000..1fdea4abbb
--- /dev/null
+++ b/server_addon/royalrender/package.py
@@ -0,0 +1,3 @@
+name = "royalrender"
+title = "Royal Render"
+version = "0.1.1"
diff --git a/server_addon/royal_render/server/__init__.py b/server_addon/royalrender/server/__init__.py
similarity index 77%
rename from server_addon/royal_render/server/__init__.py
rename to server_addon/royalrender/server/__init__.py
index c5f0aafa00..5b10678136 100644
--- a/server_addon/royal_render/server/__init__.py
+++ b/server_addon/royalrender/server/__init__.py
@@ -2,14 +2,10 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import RoyalRenderSettings, DEFAULT_VALUES
class RoyalRenderAddon(BaseServerAddon):
- name = "royalrender"
- version = __version__
- title = "Royal Render"
settings_model: Type[RoyalRenderSettings] = RoyalRenderSettings
async def get_default_settings(self):
diff --git a/server_addon/royal_render/server/settings.py b/server_addon/royalrender/server/settings.py
similarity index 100%
rename from server_addon/royal_render/server/settings.py
rename to server_addon/royalrender/server/settings.py
diff --git a/server_addon/substancepainter/package.py b/server_addon/substancepainter/package.py
new file mode 100644
index 0000000000..d445b0059f
--- /dev/null
+++ b/server_addon/substancepainter/package.py
@@ -0,0 +1,3 @@
+name = "substancepainter"
+title = "Substance Painter"
+version = "0.1.1"
diff --git a/server_addon/substancepainter/server/__init__.py b/server_addon/substancepainter/server/__init__.py
index 2bf808d508..f6cd51e610 100644
--- a/server_addon/substancepainter/server/__init__.py
+++ b/server_addon/substancepainter/server/__init__.py
@@ -2,14 +2,10 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import SubstancePainterSettings, DEFAULT_SPAINTER_SETTINGS
class SubstancePainterAddon(BaseServerAddon):
- name = "substancepainter"
- title = "Substance Painter"
- version = __version__
settings_model: Type[SubstancePainterSettings] = SubstancePainterSettings
async def get_default_settings(self):
diff --git a/server_addon/substancepainter/server/settings/load_plugins.py b/server_addon/substancepainter/server/settings/load_plugins.py
new file mode 100644
index 0000000000..e6b2fd86c3
--- /dev/null
+++ b/server_addon/substancepainter/server/settings/load_plugins.py
@@ -0,0 +1,122 @@
+from ayon_server.settings import BaseSettingsModel, SettingsField
+
+
+def normal_map_format_enum():
+ return [
+ {"label": "DirectX", "value": "NormalMapFormat.DirectX"},
+ {"label": "OpenGL", "value": "NormalMapFormat.OpenGL"},
+ ]
+
+
+def tangent_space_enum():
+ return [
+ {"label": "Per Fragment", "value": "TangentSpace.PerFragment"},
+ {"label": "Per Vertex", "value": "TangentSpace.PerVertex"},
+ ]
+
+
+def uv_workflow_enum():
+ return [
+ {"label": "Default", "value": "ProjectWorkflow.Default"},
+ {"label": "UV Tile", "value": "ProjectWorkflow.UVTile"},
+ {"label": "Texture Set Per UV Tile",
+ "value": "ProjectWorkflow.TextureSetPerUVTile"}
+ ]
+
+
+def document_resolution_enum():
+ return [
+ {"label": "128", "value": 128},
+ {"label": "256", "value": 256},
+ {"label": "512", "value": 512},
+ {"label": "1024", "value": 1024},
+ {"label": "2048", "value": 2048},
+ {"label": "4096", "value": 4096}
+ ]
+
+
+class ProjectTemplatesModel(BaseSettingsModel):
+ _layout = "expanded"
+ name: str = SettingsField("default", title="Template Name")
+ default_texture_resolution: int = SettingsField(
+ 1024, enum_resolver=document_resolution_enum,
+ title="Document Resolution",
+ description=("Set texture resolution when "
+ "creating new project.")
+ )
+ import_cameras: bool = SettingsField(
+ True, title="Import Cameras",
+ description="Import cameras from the mesh file.")
+ normal_map_format: str = SettingsField(
+ "DirectX", enum_resolver=normal_map_format_enum,
+ title="Normal Map Format",
+ description=("Set normal map format when "
+ "creating new project.")
+ )
+ project_workflow: str = SettingsField(
+ "Default", enum_resolver=uv_workflow_enum,
+ title="UV Tile Settings",
+ description=("Set UV workflow when "
+ "creating new project.")
+ )
+ tangent_space_mode: str = SettingsField(
+ "PerFragment", enum_resolver=tangent_space_enum,
+ title="Tangent Space",
+ description=("An option to compute tangent space "
+ "when creating new project.")
+ )
+ preserve_strokes: bool = SettingsField(
+ True, title="Preserve Strokes",
+ description=("Preserve strokes positions on mesh.\n"
+ "(only relevant when loading into "
+ "existing project)")
+ )
+
+
+class ProjectTemplateSettingModel(BaseSettingsModel):
+ project_templates: list[ProjectTemplatesModel] = SettingsField(
+ default_factory=ProjectTemplatesModel,
+ title="Project Templates"
+ )
+
+
+class LoadersModel(BaseSettingsModel):
+ SubstanceLoadProjectMesh: ProjectTemplateSettingModel = SettingsField(
+ default_factory=ProjectTemplateSettingModel,
+ title="Load Mesh"
+ )
+
+
+DEFAULT_LOADER_SETTINGS = {
+ "SubstanceLoadProjectMesh": {
+ "project_templates": [
+ {
+ "name": "2K(Default)",
+ "default_texture_resolution": 2048,
+ "import_cameras": True,
+ "normal_map_format": "NormalMapFormat.DirectX",
+ "project_workflow": "ProjectWorkflow.Default",
+ "tangent_space_mode": "TangentSpace.PerFragment",
+ "preserve_strokes": True
+ },
+ {
+ "name": "2K(UV tile)",
+ "default_texture_resolution": 2048,
+ "import_cameras": True,
+ "normal_map_format": "NormalMapFormat.DirectX",
+ "project_workflow": "ProjectWorkflow.UVTile",
+ "tangent_space_mode": "TangentSpace.PerFragment",
+ "preserve_strokes": True
+ },
+ {
+ "name": "4K(Custom)",
+ "default_texture_resolution": 4096,
+ "import_cameras": True,
+ "normal_map_format": "NormalMapFormat.OpenGL",
+ "project_workflow": "ProjectWorkflow.UVTile",
+ "tangent_space_mode": "TangentSpace.PerFragment",
+ "preserve_strokes": True
+ }
+ ]
+ }
+}
diff --git a/server_addon/substancepainter/server/settings/main.py b/server_addon/substancepainter/server/settings/main.py
index f80fa9fe1e..93523fd650 100644
--- a/server_addon/substancepainter/server/settings/main.py
+++ b/server_addon/substancepainter/server/settings/main.py
@@ -1,5 +1,6 @@
from ayon_server.settings import BaseSettingsModel, SettingsField
from .imageio import ImageIOSettings, DEFAULT_IMAGEIO_SETTINGS
+from .load_plugins import LoadersModel, DEFAULT_LOADER_SETTINGS
class ShelvesSettingsModel(BaseSettingsModel):
@@ -17,9 +18,12 @@ class SubstancePainterSettings(BaseSettingsModel):
default_factory=list,
title="Shelves"
)
+ load: LoadersModel = SettingsField(
+ default_factory=DEFAULT_LOADER_SETTINGS, title="Loaders")
DEFAULT_SPAINTER_SETTINGS = {
"imageio": DEFAULT_IMAGEIO_SETTINGS,
- "shelves": []
+ "shelves": [],
+ "load": DEFAULT_LOADER_SETTINGS,
}
diff --git a/server_addon/substancepainter/server/version.py b/server_addon/substancepainter/server/version.py
deleted file mode 100644
index 3dc1f76bc6..0000000000
--- a/server_addon/substancepainter/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.0"
diff --git a/server_addon/timers_manager/package.py b/server_addon/timers_manager/package.py
new file mode 100644
index 0000000000..bd6b81b4b7
--- /dev/null
+++ b/server_addon/timers_manager/package.py
@@ -0,0 +1,3 @@
+name = "timers_manager"
+title = "Timers Manager"
+version = "0.1.1"
diff --git a/server_addon/timers_manager/server/__init__.py b/server_addon/timers_manager/server/__init__.py
index 29f9d47370..32e83d295c 100644
--- a/server_addon/timers_manager/server/__init__.py
+++ b/server_addon/timers_manager/server/__init__.py
@@ -2,12 +2,8 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import TimersManagerSettings
class TimersManagerAddon(BaseServerAddon):
- name = "timers_manager"
- version = __version__
- title = "Timers Manager"
settings_model: Type[TimersManagerSettings] = TimersManagerSettings
diff --git a/server_addon/timers_manager/server/version.py b/server_addon/timers_manager/server/version.py
deleted file mode 100644
index 485f44ac21..0000000000
--- a/server_addon/timers_manager/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.1"
diff --git a/server_addon/traypublisher/package.py b/server_addon/traypublisher/package.py
new file mode 100644
index 0000000000..4ca8ae9fd3
--- /dev/null
+++ b/server_addon/traypublisher/package.py
@@ -0,0 +1,3 @@
+name = "traypublisher"
+title = "TrayPublisher"
+version = "0.1.4"
diff --git a/server_addon/traypublisher/server/__init__.py b/server_addon/traypublisher/server/__init__.py
index e6f079609f..830f325ac0 100644
--- a/server_addon/traypublisher/server/__init__.py
+++ b/server_addon/traypublisher/server/__init__.py
@@ -1,14 +1,9 @@
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import TraypublisherSettings, DEFAULT_TRAYPUBLISHER_SETTING
class Traypublisher(BaseServerAddon):
- name = "traypublisher"
- title = "TrayPublisher"
- version = __version__
-
settings_model = TraypublisherSettings
async def get_default_settings(self):
diff --git a/server_addon/traypublisher/server/settings/creator_plugins.py b/server_addon/traypublisher/server/settings/creator_plugins.py
index bf66d9a088..1ff14002aa 100644
--- a/server_addon/traypublisher/server/settings/creator_plugins.py
+++ b/server_addon/traypublisher/server/settings/creator_plugins.py
@@ -1,4 +1,7 @@
+from pydantic import validator
from ayon_server.settings import BaseSettingsModel, SettingsField
+from ayon_server.settings.validators import ensure_unique_names
+from ayon_server.exceptions import BadRequestException
class BatchMovieCreatorPlugin(BaseSettingsModel):
@@ -22,11 +25,139 @@ class BatchMovieCreatorPlugin(BaseSettingsModel):
)
+class ColumnItemModel(BaseSettingsModel):
+ """Allows to publish multiple video files in one go.
Name of matching
+ asset is parsed from file names ('asset.mov', 'asset_v001.mov',
+ 'my_asset_to_publish.mov')"""
+
+ name: str = SettingsField(
+ title="Name",
+ default=""
+ )
+
+ type: str = SettingsField(
+ title="Type",
+ default=""
+ )
+
+ default: str = SettingsField(
+ title="Default",
+ default=""
+ )
+
+ required_column: bool = SettingsField(
+ title="Required Column",
+ default=False
+ )
+
+ validation_pattern: str = SettingsField(
+ title="Validation Regex Pattern",
+ default="^(.*)$"
+ )
+
+
+class ColumnConfigModel(BaseSettingsModel):
+ """Allows to publish multiple video files in one go.
Name of matching
+ asset is parsed from file names ('asset.mov', 'asset_v001.mov',
+ 'my_asset_to_publish.mov')"""
+
+ csv_delimiter: str = SettingsField(
+ title="CSV delimiter",
+ default=","
+ )
+
+ columns: list[ColumnItemModel] = SettingsField(
+ title="Columns",
+ default_factory=list
+ )
+
+ @validator("columns")
+ def validate_unique_outputs(cls, value):
+ ensure_unique_names(value)
+ return value
+
+
+class RepresentationItemModel(BaseSettingsModel):
+ """Allows to publish multiple video files in one go.
+
+ Name of matching asset is parsed from file names
+ ('asset.mov', 'asset_v001.mov', 'my_asset_to_publish.mov')
+ """
+
+ name: str = SettingsField(
+ title="Name",
+ default=""
+ )
+
+ extensions: list[str] = SettingsField(
+ title="Extensions",
+ default_factory=list
+ )
+
+ @validator("extensions")
+ def validate_extension(cls, value):
+ for ext in value:
+ if not ext.startswith("."):
+ raise BadRequestException(f"Extension must start with '.': {ext}")
+ return value
+
+
+class RepresentationConfigModel(BaseSettingsModel):
+ """Allows to publish multiple video files in one go.
Name of matching
+ asset is parsed from file names ('asset.mov', 'asset_v001.mov',
+ 'my_asset_to_publish.mov')"""
+
+ tags_delimiter: str = SettingsField(
+ title="Tags delimiter",
+ default=";"
+ )
+
+ default_tags: list[str] = SettingsField(
+ title="Default tags",
+ default_factory=list
+ )
+
+ representations: list[RepresentationItemModel] = SettingsField(
+ title="Representations",
+ default_factory=list
+ )
+
+ @validator("representations")
+ def validate_unique_outputs(cls, value):
+ ensure_unique_names(value)
+ return value
+
+
+class IngestCSVPluginModel(BaseSettingsModel):
+ """Allows to publish multiple video files in one go.
Name of matching
+ asset is parsed from file names ('asset.mov', 'asset_v001.mov',
+ 'my_asset_to_publish.mov')"""
+
+ enabled: bool = SettingsField(
+ title="Enabled",
+ default=False
+ )
+
+ columns_config: ColumnConfigModel = SettingsField(
+ title="Columns config",
+ default_factory=ColumnConfigModel
+ )
+
+ representations_config: RepresentationConfigModel = SettingsField(
+ title="Representations config",
+ default_factory=RepresentationConfigModel
+ )
+
+
class TrayPublisherCreatePluginsModel(BaseSettingsModel):
BatchMovieCreator: BatchMovieCreatorPlugin = SettingsField(
title="Batch Movie Creator",
default_factory=BatchMovieCreatorPlugin
)
+ IngestCSV: IngestCSVPluginModel = SettingsField(
+ title="Ingest CSV",
+ default_factory=IngestCSVPluginModel
+ )
DEFAULT_CREATORS = {
@@ -41,4 +172,170 @@ DEFAULT_CREATORS = {
".mov"
]
},
+ "IngestCSV": {
+ "enabled": True,
+ "columns_config": {
+ "csv_delimiter": ",",
+ "columns": [
+ {
+ "name": "File Path",
+ "type": "text",
+ "default": "",
+ "required_column": True,
+ "validation_pattern": "^([a-z0-9#._\\/]*)$"
+ },
+ {
+ "name": "Folder Path",
+ "type": "text",
+ "default": "",
+ "required_column": True,
+ "validation_pattern": "^([a-zA-Z0-9_\\/]*)$"
+ },
+ {
+ "name": "Task Name",
+ "type": "text",
+ "default": "",
+ "required_column": True,
+ "validation_pattern": "^(.*)$"
+ },
+ {
+ "name": "Product Type",
+ "type": "text",
+ "default": "",
+ "required_column": False,
+ "validation_pattern": "^(.*)$"
+ },
+ {
+ "name": "Variant",
+ "type": "text",
+ "default": "",
+ "required_column": False,
+ "validation_pattern": "^(.*)$"
+ },
+ {
+ "name": "Version",
+ "type": "number",
+ "default": 1,
+ "required_column": True,
+ "validation_pattern": "^(\\d{1,3})$"
+ },
+ {
+ "name": "Version Comment",
+ "type": "text",
+ "default": "",
+ "required_column": False,
+ "validation_pattern": "^(.*)$"
+ },
+ {
+ "name": "Version Thumbnail",
+ "type": "text",
+ "default": "",
+ "required_column": False,
+ "validation_pattern": "^([a-zA-Z0-9#._\\/]*)$"
+ },
+ {
+ "name": "Frame Start",
+ "type": "number",
+ "default": 0,
+ "required_column": True,
+ "validation_pattern": "^(\\d{1,8})$"
+ },
+ {
+ "name": "Frame End",
+ "type": "number",
+ "default": 0,
+ "required_column": True,
+ "validation_pattern": "^(\\d{1,8})$"
+ },
+ {
+ "name": "Handle Start",
+ "type": "number",
+ "default": 0,
+ "required_column": True,
+ "validation_pattern": "^(\\d)$"
+ },
+ {
+ "name": "Handle End",
+ "type": "number",
+ "default": 0,
+ "required_column": True,
+ "validation_pattern": "^(\\d)$"
+ },
+ {
+ "name": "FPS",
+ "type": "decimal",
+ "default": 0.0,
+ "required_column": True,
+ "validation_pattern": "^[0-9]*\\.[0-9]+$|^[0-9]+$"
+ },
+ {
+ "name": "Slate Exists",
+ "type": "bool",
+ "default": True,
+ "required_column": False,
+ "validation_pattern": "(True|False)"
+ },
+ {
+ "name": "Representation",
+ "type": "text",
+ "default": "",
+ "required_column": False,
+ "validation_pattern": "^(.*)$"
+ },
+ {
+ "name": "Representation Colorspace",
+ "type": "text",
+ "default": "",
+ "required_column": False,
+ "validation_pattern": "^(.*)$"
+ },
+ {
+ "name": "Representation Tags",
+ "type": "text",
+ "default": "",
+ "required_column": False,
+ "validation_pattern": "^(.*)$"
+ }
+ ]
+ },
+ "representations_config": {
+ "tags_delimiter": ";",
+ "default_tags": [
+ "review"
+ ],
+ "representations": [
+ {
+ "name": "preview",
+ "extensions": [
+ ".mp4",
+ ".mov"
+ ]
+ },
+ {
+ "name": "exr",
+ "extensions": [
+ ".exr"
+ ]
+ },
+ {
+ "name": "edit",
+ "extensions": [
+ ".mov"
+ ]
+ },
+ {
+ "name": "review",
+ "extensions": [
+ ".mov"
+ ]
+ },
+ {
+ "name": "nuke",
+ "extensions": [
+ ".nk"
+ ]
+ }
+ ]
+ }
+ }
}
diff --git a/server_addon/traypublisher/server/version.py b/server_addon/traypublisher/server/version.py
deleted file mode 100644
index de699158fd..0000000000
--- a/server_addon/traypublisher/server/version.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Package declaring addon version."""
-__version__ = "0.1.4"
diff --git a/server_addon/tvpaint/package.py b/server_addon/tvpaint/package.py
new file mode 100644
index 0000000000..2be3164f4a
--- /dev/null
+++ b/server_addon/tvpaint/package.py
@@ -0,0 +1,3 @@
+name = "tvpaint"
+title = "TVPaint"
+version = "0.1.2"
diff --git a/server_addon/tvpaint/server/__init__.py b/server_addon/tvpaint/server/__init__.py
index 033d7d3792..658dcf0bb6 100644
--- a/server_addon/tvpaint/server/__init__.py
+++ b/server_addon/tvpaint/server/__init__.py
@@ -2,14 +2,10 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import TvpaintSettings, DEFAULT_VALUES
class TvpaintAddon(BaseServerAddon):
- name = "tvpaint"
- title = "TVPaint"
- version = __version__
settings_model: Type[TvpaintSettings] = TvpaintSettings
async def get_default_settings(self):
diff --git a/server_addon/tvpaint/server/version.py b/server_addon/tvpaint/server/version.py
deleted file mode 100644
index b3f4756216..0000000000
--- a/server_addon/tvpaint/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.2"
diff --git a/server_addon/unreal/package.py b/server_addon/unreal/package.py
new file mode 100644
index 0000000000..cab89ca873
--- /dev/null
+++ b/server_addon/unreal/package.py
@@ -0,0 +1,3 @@
+name = "unreal"
+title = "Unreal"
+version = "0.1.0"
diff --git a/server_addon/unreal/server/__init__.py b/server_addon/unreal/server/__init__.py
index a5f3e9597d..751560b623 100644
--- a/server_addon/unreal/server/__init__.py
+++ b/server_addon/unreal/server/__init__.py
@@ -2,17 +2,11 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .version import __version__
from .settings import UnrealSettings, DEFAULT_VALUES
class UnrealAddon(BaseServerAddon):
- name = "unreal"
- title = "Unreal"
- version = __version__
settings_model: Type[UnrealSettings] = UnrealSettings
- frontend_scopes = {}
- services = {}
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()
diff --git a/server_addon/unreal/server/version.py b/server_addon/unreal/server/version.py
deleted file mode 100644
index 3dc1f76bc6..0000000000
--- a/server_addon/unreal/server/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.0"