diff --git a/client/ayon_core/hosts/houdini/api/lib.py b/client/ayon_core/hosts/houdini/api/lib.py
index da1b21ad95..7ca8f7f8f0 100644
--- a/client/ayon_core/hosts/houdini/api/lib.py
+++ b/client/ayon_core/hosts/houdini/api/lib.py
@@ -811,6 +811,43 @@ def get_current_context_template_data_with_folder_attrs():
return template_data
+def set_review_color_space(opengl_node, review_color_space="", log=None):
+ """Set ociocolorspace parameter for the given OpenGL node.
+
+ Set `ociocolorspace` parameter of the given OpenGl node
+ to to the given review_color_space value.
+ If review_color_space is empty, a default colorspace corresponding to
+ the display & view of the current Houdini session will be used.
+
+ Args:
+ opengl_node (hou.Node): ROP node to set its ociocolorspace parm.
+ review_color_space (str): Colorspace value for ociocolorspace parm.
+ log (logging.Logger): Logger to log to.
+ """
+
+ if log is None:
+ log = self.log
+
+ # Set Color Correction parameter to OpenColorIO
+ colorcorrect_parm = opengl_node.parm("colorcorrect")
+ if colorcorrect_parm.eval() != 2:
+ colorcorrect_parm.set(2)
+ log.debug(
+ "'Color Correction' parm on '{}' has been set to"
+ " 'OpenColorIO'".format(opengl_node.path())
+ )
+
+ opengl_node.setParms(
+ {"ociocolorspace": review_color_space}
+ )
+
+ log.debug(
+ "'OCIO Colorspace' parm on '{}' has been set to "
+ "the view color space '{}'"
+ .format(opengl_node, review_color_space)
+ )
+
+
def get_context_var_changes():
"""get context var changes."""
diff --git a/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py b/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py
new file mode 100644
index 0000000000..2e97c06bff
--- /dev/null
+++ b/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py
@@ -0,0 +1,58 @@
+from ayon_applications import PreLaunchHook, LaunchTypes
+
+
+class SetDefaultDisplayView(PreLaunchHook):
+ """Set default view and default display for houdini via OpenColorIO.
+
+ Houdini's defaultDisplay and defaultView are set by
+ setting 'OCIO_ACTIVE_DISPLAYS' and 'OCIO_ACTIVE_VIEWS'
+ environment variables respectively.
+
+ More info: https://www.sidefx.com/docs/houdini/io/ocio.html#set-up
+ """
+
+ app_groups = {"houdini"}
+ launch_types = {LaunchTypes.local}
+
+ def execute(self):
+
+ OCIO = self.launch_context.env.get("OCIO")
+
+ # This is a cheap way to skip this hook if either global color
+ # management or houdini color management was disabled because the
+ # OCIO var would be set by the global OCIOEnvHook
+ if not OCIO:
+ return
+
+ houdini_color_settings = \
+ self.data["project_settings"]["houdini"]["imageio"]["workfile"]
+
+ if not houdini_color_settings["enabled"]:
+ self.log.info(
+ "Houdini workfile color management is disabled."
+ )
+ return
+
+ # 'OCIO_ACTIVE_DISPLAYS', 'OCIO_ACTIVE_VIEWS' are checked
+ # as Admins can add them in Ayon env vars or Ayon tools.
+
+ default_display = houdini_color_settings["default_display"]
+ if default_display:
+ # get 'OCIO_ACTIVE_DISPLAYS' value if exists.
+ self._set_context_env("OCIO_ACTIVE_DISPLAYS", default_display)
+
+ default_view = houdini_color_settings["default_view"]
+ if default_view:
+ # get 'OCIO_ACTIVE_VIEWS' value if exists.
+ self._set_context_env("OCIO_ACTIVE_VIEWS", default_view)
+
+ def _set_context_env(self, env_var, default_value):
+ env_value = self.launch_context.env.get(env_var, "")
+ new_value = ":".join(
+ key for key in [default_value, env_value] if key
+ )
+ self.log.info(
+ "Setting {} environment to: {}"
+ .format(env_var, new_value)
+ )
+ self.launch_context.env[env_var] = new_value
diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_review.py b/client/ayon_core/hosts/houdini/plugins/create/create_review.py
index 18f7ce498d..94dcf23181 100644
--- a/client/ayon_core/hosts/houdini/plugins/create/create_review.py
+++ b/client/ayon_core/hosts/houdini/plugins/create/create_review.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating openGL reviews."""
-from ayon_core.hosts.houdini.api import plugin
+from ayon_core.hosts.houdini.api import lib, plugin
from ayon_core.lib import EnumDef, BoolDef, NumberDef
import os
@@ -14,6 +14,13 @@ class CreateReview(plugin.HoudiniCreator):
label = "Review"
product_type = "review"
icon = "video-camera"
+ review_color_space = ""
+
+ def apply_settings(self, project_settings):
+ super(CreateReview, self).apply_settings(project_settings)
+ color_settings = project_settings["houdini"]["imageio"]["workfile"]
+ if color_settings["enabled"]:
+ self.review_color_space = color_settings.get("review_color_space")
def create(self, product_name, instance_data, pre_create_data):
@@ -85,10 +92,20 @@ class CreateReview(plugin.HoudiniCreator):
instance_node.setParms(parms)
- # Set OCIO Colorspace to the default output colorspace
+ # Set OCIO Colorspace to the default colorspace
# if there's OCIO
if os.getenv("OCIO"):
- self.set_colorcorrect_to_default_view_space(instance_node)
+ # Fall to the default value if cls.review_color_space is empty.
+ if not self.review_color_space:
+ # cls.review_color_space is an empty string
+ # when the imageio/workfile setting is disabled or
+ # when the Review colorspace setting is empty.
+ from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
+ self.review_color_space = get_default_display_view_colorspace()
+
+ lib.set_review_color_space(instance_node,
+ self.review_color_space,
+ self.log)
to_lock = ["id", "productType"]
@@ -131,23 +148,3 @@ class CreateReview(plugin.HoudiniCreator):
minimum=0.0001,
decimals=3)
]
-
- def set_colorcorrect_to_default_view_space(self,
- instance_node):
- """Set ociocolorspace to the default output space."""
- from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
-
- # set Color Correction parameter to OpenColorIO
- instance_node.setParms({"colorcorrect": 2})
-
- # Get default view space for ociocolorspace parm.
- default_view_space = get_default_display_view_colorspace()
- instance_node.setParms(
- {"ociocolorspace": default_view_space}
- )
-
- self.log.debug(
- "'OCIO Colorspace' parm on '{}' has been set to "
- "the default view color space '{}'"
- .format(instance_node, default_view_space)
- )
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py
index c6dfb4332d..0fab69ef4a 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py
@@ -7,7 +7,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop, splitext
import hou
-class ExtractComposite(publish.Extractor):
+class ExtractComposite(publish.Extractor,
+ publish.ColormanagedPyblishPluginMixin):
order = pyblish.api.ExtractorOrder
label = "Extract Composite (Image Sequence)"
@@ -45,8 +46,14 @@ class ExtractComposite(publish.Extractor):
"frameEnd": instance.data["frameEndHandle"],
}
- from pprint import pformat
-
- self.log.info(pformat(representation))
+ if ext.lower() == "exr":
+ # Inject colorspace with 'scene_linear' as that's the
+ # default Houdini working colorspace and all extracted
+ # OpenEXR images should be in that colorspace.
+ # https://www.sidefx.com/docs/houdini/render/linear.html#image-formats
+ self.set_representation_colorspace(
+ representation, instance.context,
+ colorspace="scene_linear"
+ )
instance.data["representations"].append(representation)
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py
index d3b4b094b2..26a216e335 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py
@@ -8,7 +8,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop
import hou
-class ExtractOpenGL(publish.Extractor):
+class ExtractOpenGL(publish.Extractor,
+ publish.ColormanagedPyblishPluginMixin):
order = pyblish.api.ExtractorOrder - 0.01
label = "Extract OpenGL"
@@ -56,6 +57,14 @@ class ExtractOpenGL(publish.Extractor):
"camera_name": instance.data.get("review_camera")
}
+ if ropnode.evalParm("colorcorrect") == 2: # OpenColorIO enabled
+ colorspace = ropnode.evalParm("ociocolorspace")
+ # inject colorspace data
+ self.set_representation_colorspace(
+ representation, instance.context,
+ colorspace=colorspace
+ )
+
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["representations"].append(representation)
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py
index 691b54ac05..e8b726e2eb 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py
@@ -4,15 +4,19 @@ from ayon_core.pipeline import (
PublishValidationError,
OptionalPyblishPluginMixin
)
-from ayon_core.pipeline.publish import RepairAction
+from ayon_core.pipeline.publish import (
+ RepairAction,
+ get_plugin_settings,
+ apply_plugin_settings_automatically
+)
from ayon_core.hosts.houdini.api.action import SelectROPAction
import os
import hou
-class SetDefaultViewSpaceAction(RepairAction):
- label = "Set default view colorspace"
+class ResetViewSpaceAction(RepairAction):
+ label = "Reset OCIO colorspace parm"
icon = "mdi.monitor"
@@ -27,9 +31,25 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
families = ["review"]
hosts = ["houdini"]
label = "Validate Review Colorspace"
- actions = [SetDefaultViewSpaceAction, SelectROPAction]
+ actions = [ResetViewSpaceAction, SelectROPAction]
optional = True
+ review_color_space = ""
+
+ @classmethod
+ def apply_settings(cls, project_settings):
+ # Preserve automatic settings applying logic
+ settings = get_plugin_settings(plugin=cls,
+ project_settings=project_settings,
+ log=cls.log,
+ category="houdini")
+ apply_plugin_settings_automatically(cls, settings, logger=cls.log)
+
+ # Add review color settings
+ color_settings = project_settings["houdini"]["imageio"]["workfile"]
+ if color_settings["enabled"]:
+ cls.review_color_space = color_settings.get("review_color_space")
+
def process(self, instance):
@@ -63,39 +83,54 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
" 'OpenColorIO'".format(rop_node.path())
)
- if rop_node.evalParm("ociocolorspace") not in \
- hou.Color.ocio_spaces():
-
+ current_color_space = rop_node.evalParm("ociocolorspace")
+ if current_color_space not in hou.Color.ocio_spaces():
raise PublishValidationError(
"Invalid value: Colorspace name doesn't exist.\n"
"Check 'OCIO Colorspace' parameter on '{}' ROP"
.format(rop_node.path())
)
- @classmethod
- def repair(cls, instance):
- """Set Default View Space Action.
+ # if houdini/imageio/workfile is enabled and
+ # Review colorspace setting is empty then this check should
+ # actually check if the current_color_space setting equals
+ # the default colorspace value.
+ # However, it will make the black cmd screen show up more often
+ # which is very annoying.
+ if self.review_color_space and \
+ self.review_color_space != current_color_space:
- It is a helper action more than a repair action,
- used to set colorspace on opengl node to the default view.
- """
- from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
-
- rop_node = hou.node(instance.data["instance_node"])
-
- if rop_node.evalParm("colorcorrect") != 2:
- rop_node.setParms({"colorcorrect": 2})
- cls.log.debug(
- "'Color Correction' parm on '{}' has been set to"
- " 'OpenColorIO'".format(rop_node.path())
+ raise PublishValidationError(
+ "Invalid value: Colorspace name doesn't match"
+ "the Colorspace specified in settings."
)
- # Get default view colorspace name
- default_view_space = get_default_display_view_colorspace()
+ @classmethod
+ def repair(cls, instance):
+ """Reset view colorspace.
- rop_node.setParms({"ociocolorspace": default_view_space})
- cls.log.info(
- "'OCIO Colorspace' parm on '{}' has been set to "
- "the default view color space '{}'"
- .format(rop_node, default_view_space)
- )
+ It is used to set colorspace on opengl node.
+
+ It uses the colorspace value specified in the Houdini addon settings.
+ If the value in the Houdini addon settings is empty,
+ it will fall to the default colorspace.
+
+ Note:
+ This repair action assumes that OCIO is enabled.
+ As if OCIO is disabled the whole validation is skipped
+ and this repair action won't show up.
+ """
+ from ayon_core.hosts.houdini.api.lib import set_review_color_space
+
+ # Fall to the default value if cls.review_color_space is empty.
+ if not cls.review_color_space:
+ # cls.review_color_space is an empty string
+ # when the imageio/workfile setting is disabled or
+ # when the Review colorspace setting is empty.
+ from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
+ cls.review_color_space = get_default_display_view_colorspace()
+
+ rop_node = hou.node(instance.data["instance_node"])
+ set_review_color_space(rop_node,
+ cls.review_color_space,
+ cls.log)
diff --git a/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py b/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py
index 67b5174200..67cec23ecc 100644
--- a/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py
+++ b/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py
@@ -53,6 +53,7 @@ class ExtractAlembic(publish.Extractor,
hosts = ["max"]
families = ["pointcache"]
optional = True
+ active = True
def process(self, instance):
if not self.is_active(instance.data):
@@ -102,24 +103,27 @@ class ExtractAlembic(publish.Extractor,
@classmethod
def get_attribute_defs(cls):
- return [
+ defs = super(ExtractAlembic, cls).get_attribute_defs()
+ defs.extend([
BoolDef("custom_attrs",
label="Custom Attributes",
default=False),
- ]
+ ])
+ return defs
class ExtractCameraAlembic(ExtractAlembic):
"""Extract Camera with AlembicExport."""
-
label = "Extract Alembic Camera"
families = ["camera"]
+ optional = True
-class ExtractModel(ExtractAlembic):
+class ExtractModelAlembic(ExtractAlembic):
"""Extract Geometry in Alembic Format"""
label = "Extract Geometry (Alembic)"
families = ["model"]
+ optional = True
def _set_abc_attributes(self, instance):
attr_values = self.get_attr_values_from_data(instance.data)
diff --git a/client/ayon_core/hosts/maya/api/lib_renderproducts.py b/client/ayon_core/hosts/maya/api/lib_renderproducts.py
index 832d1c21c2..52c282c6de 100644
--- a/client/ayon_core/hosts/maya/api/lib_renderproducts.py
+++ b/client/ayon_core/hosts/maya/api/lib_renderproducts.py
@@ -720,7 +720,8 @@ class RenderProductsArnold(ARenderProducts):
# AOVs > Legacy > Maya Render View > Mode
aovs_enabled = bool(
- self._get_attr("defaultArnoldRenderOptions.aovMode")
+ self._get_attr(
+ "defaultArnoldRenderOptions.aovMode", as_string=False)
)
if not aovs_enabled:
return beauty_products
diff --git a/client/ayon_core/hosts/maya/api/workfile_template_builder.py b/client/ayon_core/hosts/maya/api/workfile_template_builder.py
index 75386d7e64..ddf19125e3 100644
--- a/client/ayon_core/hosts/maya/api/workfile_template_builder.py
+++ b/client/ayon_core/hosts/maya/api/workfile_template_builder.py
@@ -331,7 +331,8 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
if scene_parent:
cmds.parent(node, scene_parent)
else:
- cmds.parent(node, world=True)
+ if cmds.listRelatives(node, parent=True):
+ cmds.parent(node, world=True)
holding_sets = cmds.listSets(object=placeholder.scene_identifier)
if not holding_sets:
diff --git a/client/ayon_core/hosts/maya/api/yeti.py b/client/ayon_core/hosts/maya/api/yeti.py
new file mode 100644
index 0000000000..1526c3a2f3
--- /dev/null
+++ b/client/ayon_core/hosts/maya/api/yeti.py
@@ -0,0 +1,101 @@
+from typing import List
+
+from maya import cmds
+
+
+def get_yeti_user_variables(yeti_shape_node: str) -> List[str]:
+ """Get user defined yeti user variables for a `pgYetiMaya` shape node.
+
+ Arguments:
+ yeti_shape_node (str): The `pgYetiMaya` shape node.
+
+ Returns:
+ list: Attribute names (for a vector attribute it only lists the top
+ parent attribute, not the attribute per axis)
+ """
+
+ attrs = cmds.listAttr(yeti_shape_node,
+ userDefined=True,
+ string=("yetiVariableV_*",
+ "yetiVariableF_*")) or []
+ valid_attrs = []
+ for attr in attrs:
+ attr_type = cmds.attributeQuery(attr, node=yeti_shape_node,
+ attributeType=True)
+ if attr.startswith("yetiVariableV_") and attr_type == "double3":
+ # vector
+ valid_attrs.append(attr)
+ elif attr.startswith("yetiVariableF_") and attr_type == "double":
+ valid_attrs.append(attr)
+
+ return valid_attrs
+
+
+def create_yeti_variable(yeti_shape_node: str,
+ attr_name: str,
+ value=None,
+ force_value: bool = False) -> bool:
+ """Get user defined yeti user variables for a `pgYetiMaya` shape node.
+
+ Arguments:
+ yeti_shape_node (str): The `pgYetiMaya` shape node.
+ attr_name (str): The fully qualified yeti variable name, e.g.
+ "yetiVariableF_myfloat" or "yetiVariableV_myvector"
+ value (object): The value to set (must match the type of the attribute)
+ When value is None it will ignored and not be set.
+ force_value (bool): Whether to set the value if the attribute already
+ exists or not.
+
+ Returns:
+ bool: Whether the attribute value was set or not.
+
+ """
+ exists = cmds.attributeQuery(attr_name, node=yeti_shape_node, exists=True)
+ if not exists:
+ if attr_name.startswith("yetiVariableV_"):
+ _create_vector_yeti_user_variable(yeti_shape_node, attr_name)
+ if attr_name.startswith("yetiVariableF_"):
+ _create_float_yeti_user_variable(yeti_shape_node, attr_name)
+
+ if value is not None and (not exists or force_value):
+ plug = "{}.{}".format(yeti_shape_node, attr_name)
+ if (
+ isinstance(value, (list, tuple))
+ and attr_name.startswith("yetiVariableV_")
+ ):
+ cmds.setAttr(plug, *value, type="double3")
+ else:
+ cmds.setAttr(plug, value)
+
+ return True
+ return False
+
+
+def _create_vector_yeti_user_variable(yeti_shape_node: str, attr_name: str):
+ if not attr_name.startswith("yetiVariableV_"):
+ raise ValueError("Must start with yetiVariableV_")
+ cmds.addAttr(yeti_shape_node,
+ longName=attr_name,
+ attributeType="double3",
+ cachedInternally=True,
+ keyable=True)
+ for axis in "XYZ":
+ cmds.addAttr(yeti_shape_node,
+ longName="{}{}".format(attr_name, axis),
+ attributeType="double",
+ parent=attr_name,
+ cachedInternally=True,
+ keyable=True)
+
+
+def _create_float_yeti_user_variable(yeti_node: str, attr_name: str):
+ if not attr_name.startswith("yetiVariableF_"):
+ raise ValueError("Must start with yetiVariableF_")
+
+ cmds.addAttr(yeti_node,
+ longName=attr_name,
+ attributeType="double",
+ cachedInternally=True,
+ softMinValue=0,
+ softMaxValue=100,
+ keyable=True)
diff --git a/client/ayon_core/hosts/maya/plugins/load/load_as_template.py b/client/ayon_core/hosts/maya/plugins/load/load_as_template.py
new file mode 100644
index 0000000000..f696d369e3
--- /dev/null
+++ b/client/ayon_core/hosts/maya/plugins/load/load_as_template.py
@@ -0,0 +1,39 @@
+from ayon_core.lib import (
+ BoolDef
+)
+from ayon_core.pipeline import (
+ load,
+ registered_host
+)
+from ayon_core.hosts.maya.api.workfile_template_builder import (
+ MayaTemplateBuilder
+)
+
+
+class LoadAsTemplate(load.LoaderPlugin):
+ """Load workfile as a template """
+
+ product_types = {"workfile", "mayaScene"}
+ label = "Load as template"
+ representations = ["ma", "mb"]
+ icon = "wrench"
+ color = "#775555"
+ order = 10
+
+ options = [
+ BoolDef("keep_placeholders",
+ label="Keep Placeholders",
+ default=False),
+ BoolDef("create_first_version",
+ label="Create First Version",
+ default=False),
+ ]
+
+ def load(self, context, name, namespace, data):
+ keep_placeholders = data.get("keep_placeholders", False)
+ create_first_version = data.get("create_first_version", False)
+ path = self.filepath_from_context(context)
+ builder = MayaTemplateBuilder(registered_host())
+ builder.build_template(template_path=path,
+ keep_placeholders=keep_placeholders,
+ create_first_version=create_first_version)
diff --git a/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py b/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py
index caea6b7a72..4ca9ae9d03 100644
--- a/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py
+++ b/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py
@@ -12,6 +12,7 @@ from ayon_core.pipeline import (
get_representation_path
)
from ayon_core.hosts.maya.api import lib
+from ayon_core.hosts.maya.api.yeti import create_yeti_variable
from ayon_core.hosts.maya.api.pipeline import containerise
from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type
@@ -23,8 +24,19 @@ SKIP_UPDATE_ATTRS = {
"viewportDensity",
"viewportWidth",
"viewportLength",
+ "renderDensity",
+ "renderWidth",
+ "renderLength",
+ "increaseRenderBounds"
}
+SKIP_ATTR_MESSAGE = (
+ "Skipping updating %s.%s to %s because it "
+ "is considered a local overridable attribute. "
+ "Either set manually or the load the cache "
+ "anew."
+)
+
def set_attribute(node, attr, value):
"""Wrapper of set attribute which ignores None values"""
@@ -209,9 +221,31 @@ class YetiCacheLoader(load.LoaderPlugin):
for attr, value in node_settings["attrs"].items():
if attr in SKIP_UPDATE_ATTRS:
+ self.log.info(
+ SKIP_ATTR_MESSAGE, yeti_node, attr, value
+ )
continue
set_attribute(attr, value, yeti_node)
+ # Set up user defined attributes
+ user_variables = node_settings.get("user_variables", {})
+ for attr, value in user_variables.items():
+ was_value_set = create_yeti_variable(
+ yeti_shape_node=yeti_node,
+ attr_name=attr,
+ value=value,
+ # We do not want to update the
+ # value if it already exists so
+ # that any local overrides that
+ # may have been applied still
+ # persist
+ force_value=False
+ )
+ if not was_value_set:
+ self.log.info(
+ SKIP_ATTR_MESSAGE, yeti_node, attr, value
+ )
+
cmds.setAttr("{}.representation".format(container_node),
repre_entity["id"],
typ="string")
@@ -332,6 +366,13 @@ class YetiCacheLoader(load.LoaderPlugin):
for attr, value in attributes.items():
set_attribute(attr, value, yeti_node)
+ # Set up user defined attributes
+ user_variables = node_settings.get("user_variables", {})
+ for attr, value in user_variables.items():
+ create_yeti_variable(yeti_shape_node=yeti_node,
+ attr_name=attr,
+ value=value)
+
# Connect to the time node
cmds.connectAttr("time1.outTime", "%s.currentTime" % yeti_node)
diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py b/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py
index 067a7bc532..e1755e4212 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py
@@ -3,6 +3,7 @@ from maya import cmds
import pyblish.api
from ayon_core.hosts.maya.api import lib
+from ayon_core.hosts.maya.api.yeti import get_yeti_user_variables
SETTINGS = {
@@ -34,7 +35,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
- "increaseRenderBounds"
- "imageSearchPath"
- Other information is the name of the transform and it's Colorbleed ID
+ Other information is the name of the transform and its `cbId`
"""
order = pyblish.api.CollectorOrder + 0.45
@@ -54,6 +55,16 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
# Get specific node attributes
attr_data = {}
for attr in SETTINGS:
+ # Ignore non-existing attributes with a warning, e.g. cbId
+ # if they have not been generated yet
+ if not cmds.attributeQuery(attr, node=shape, exists=True):
+ self.log.warning(
+ "Attribute '{}' not found on Yeti node: {}".format(
+ attr, shape
+ )
+ )
+ continue
+
current = cmds.getAttr("%s.%s" % (shape, attr))
# change None to empty string as Maya doesn't support
# NoneType in attributes
@@ -61,6 +72,12 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
current = ""
attr_data[attr] = current
+ # Get user variable attributes
+ user_variable_attrs = {
+ attr: lib.get_attribute("{}.{}".format(shape, attr))
+ for attr in get_yeti_user_variables(shape)
+ }
+
# Get transform data
parent = cmds.listRelatives(shape, parent=True)[0]
transform_data = {"name": parent, "cbId": lib.get_id(parent)}
@@ -70,6 +87,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
"name": shape,
"cbId": lib.get_id(shape),
"attrs": attr_data,
+ "user_variables": user_variable_attrs
}
settings["nodes"].append(shape_data)
diff --git a/client/ayon_core/hosts/traypublisher/addon.py b/client/ayon_core/hosts/traypublisher/addon.py
index 70bdfe9a64..3dd275f223 100644
--- a/client/ayon_core/hosts/traypublisher/addon.py
+++ b/client/ayon_core/hosts/traypublisher/addon.py
@@ -1,5 +1,6 @@
import os
+from pathlib import Path
from ayon_core.lib import get_ayon_launcher_args
from ayon_core.lib.execute import run_detached_process
from ayon_core.addon import (
@@ -57,3 +58,62 @@ def launch():
from ayon_core.tools import traypublisher
traypublisher.main()
+
+
+@cli_main.command()
+@click_wrap.option(
+ "--filepath",
+ help="Full path to CSV file with data",
+ type=str,
+ required=True
+)
+@click_wrap.option(
+ "--project",
+ help="Project name in which the context will be used",
+ type=str,
+ required=True
+)
+@click_wrap.option(
+ "--folder-path",
+ help="Asset name in which the context will be used",
+ type=str,
+ required=True
+)
+@click_wrap.option(
+ "--task",
+ help="Task name under Asset in which the context will be used",
+ type=str,
+ required=False
+)
+@click_wrap.option(
+ "--ignore-validators",
+ help="Option to ignore validators",
+ type=bool,
+ is_flag=True,
+ required=False
+)
+def ingestcsv(
+ filepath,
+ project,
+ folder_path,
+ task,
+ ignore_validators
+):
+ """Ingest CSV file into project.
+
+ This command will ingest CSV file into project. CSV file must be in
+ specific format. See documentation for more information.
+ """
+ from .csv_publish import csvpublish
+
+ # use Path to check if csv_filepath exists
+ if not Path(filepath).exists():
+ raise FileNotFoundError(f"File {filepath} does not exist.")
+
+ csvpublish(
+ filepath,
+ project,
+ folder_path,
+ task,
+ ignore_validators
+ )
diff --git a/client/ayon_core/hosts/traypublisher/csv_publish.py b/client/ayon_core/hosts/traypublisher/csv_publish.py
new file mode 100644
index 0000000000..b43792a357
--- /dev/null
+++ b/client/ayon_core/hosts/traypublisher/csv_publish.py
@@ -0,0 +1,86 @@
+import os
+
+import pyblish.api
+import pyblish.util
+
+from ayon_api import get_folder_by_path, get_task_by_name
+from ayon_core.lib.attribute_definitions import FileDefItem
+from ayon_core.pipeline import install_host
+from ayon_core.pipeline.create import CreateContext
+
+from ayon_core.hosts.traypublisher.api import TrayPublisherHost
+
+
+def csvpublish(
+ filepath,
+ project_name,
+ folder_path,
+ task_name=None,
+ ignore_validators=False
+):
+ """Publish CSV file.
+
+ Args:
+ filepath (str): Path to CSV file.
+ project_name (str): Project name.
+ folder_path (str): Folder path.
+ task_name (Optional[str]): Task name.
+ ignore_validators (Optional[bool]): Option to ignore validators.
+ """
+
+ # initialization of host
+ host = TrayPublisherHost()
+ install_host(host)
+
+ # setting host context into project
+ host.set_project_name(project_name)
+
+ # form precreate data with field values
+ file_field = FileDefItem.from_paths([filepath], False).pop().to_dict()
+ precreate_data = {
+ "csv_filepath_data": file_field,
+ }
+
+ # create context initialization
+ create_context = CreateContext(host, headless=True)
+ folder_entity = get_folder_by_path(
+ project_name,
+ folder_path=folder_path,
+ )
+
+ if not folder_entity:
+ ValueError(
+ f"Folder path '{folder_path}' doesn't "
+ f"exists at project '{project_name}'."
+ )
+
+ task_entity = get_task_by_name(
+ project_name,
+ folder_entity["id"],
+ task_name,
+ )
+
+ if not task_entity:
+ ValueError(
+ f"Task name '{task_name}' doesn't "
+ f"exists at folder '{folder_path}'."
+ )
+
+ create_context.create(
+ "io.ayon.creators.traypublisher.csv_ingest",
+ "Main",
+ folder_entity=folder_entity,
+ task_entity=task_entity,
+ pre_create_data=precreate_data,
+ )
+
+ # publishing context initialization
+ pyblish_context = pyblish.api.Context()
+ pyblish_context.data["create_context"] = create_context
+
+ # redefine targets (skip 'local' to disable validators)
+ if ignore_validators:
+ targets = ["default", "ingest"]
+
+ # publishing
+ pyblish.util.publish(context=pyblish_context, targets=targets)
diff --git a/client/ayon_core/hosts/traypublisher/plugins/create/create_csv_ingest.py b/client/ayon_core/hosts/traypublisher/plugins/create/create_csv_ingest.py
new file mode 100644
index 0000000000..8143e8b45b
--- /dev/null
+++ b/client/ayon_core/hosts/traypublisher/plugins/create/create_csv_ingest.py
@@ -0,0 +1,741 @@
+import os
+import re
+import csv
+import clique
+from io import StringIO
+from copy import deepcopy, copy
+
+from ayon_api import get_folder_by_path, get_task_by_name
+from ayon_core.pipeline.create import get_product_name
+from ayon_core.pipeline import CreatedInstance
+from ayon_core.lib import FileDef, BoolDef
+from ayon_core.lib.transcoding import (
+ VIDEO_EXTENSIONS, IMAGE_EXTENSIONS
+)
+from ayon_core.pipeline.create import CreatorError
+from ayon_core.hosts.traypublisher.api.plugin import (
+ TrayPublishCreator
+)
+
+
+class IngestCSV(TrayPublishCreator):
+ """CSV ingest creator class"""
+
+ icon = "fa.file"
+
+ label = "CSV Ingest"
+ product_type = "csv_ingest_file"
+ identifier = "io.ayon.creators.traypublisher.csv_ingest"
+
+ default_variants = ["Main"]
+
+ description = "Ingest products' data from CSV file"
+ detailed_description = """
+Ingest products' data from CSV file following column and representation
+configuration in project settings.
+"""
+
+ # Position in the list of creators.
+ order = 10
+
+ # settings for this creator
+ columns_config = {}
+ representations_config = {}
+
+ def create(self, subset_name, instance_data, pre_create_data):
+ """Create an product from each row found in the CSV.
+
+ Args:
+ subset_name (str): The subset name.
+ instance_data (dict): The instance data.
+ pre_create_data (dict):
+ """
+
+ csv_filepath_data = pre_create_data.get("csv_filepath_data", {})
+
+ folder = csv_filepath_data.get("directory", "")
+ if not os.path.exists(folder):
+ raise CreatorError(
+ f"Directory '{folder}' does not exist."
+ )
+ filename = csv_filepath_data.get("filenames", [])
+ self._process_csv_file(subset_name, instance_data, folder, filename[0])
+
+ def _process_csv_file(
+ self, subset_name, instance_data, staging_dir, filename):
+ """Process CSV file.
+
+ Args:
+ subset_name (str): The subset name.
+ instance_data (dict): The instance data.
+ staging_dir (str): The staging directory.
+ filename (str): The filename.
+ """
+
+ # create new instance from the csv file via self function
+ self._pass_data_to_csv_instance(
+ instance_data,
+ staging_dir,
+ filename
+ )
+
+ csv_instance = CreatedInstance(
+ self.product_type, subset_name, instance_data, self
+ )
+ self._store_new_instance(csv_instance)
+
+ csv_instance["csvFileData"] = {
+ "filename": filename,
+ "staging_dir": staging_dir,
+ }
+
+ # from special function get all data from csv file and convert them
+ # to new instances
+ csv_data_for_instances = self._get_data_from_csv(
+ staging_dir, filename)
+
+ # create instances from csv data via self function
+ self._create_instances_from_csv_data(
+ csv_data_for_instances, staging_dir
+ )
+
+ def _create_instances_from_csv_data(
+ self,
+ csv_data_for_instances,
+ staging_dir
+ ):
+ """Create instances from csv data"""
+
+ for folder_path, prepared_data in csv_data_for_instances.items():
+ project_name = self.create_context.get_current_project_name()
+ products = prepared_data["products"]
+
+ for instance_name, product_data in products.items():
+ # get important instance variables
+ task_name = product_data["task_name"]
+ task_type = product_data["task_type"]
+ variant = product_data["variant"]
+ product_type = product_data["product_type"]
+ version = product_data["version"]
+
+ # create subset/product name
+ product_name = get_product_name(
+ project_name,
+ task_name,
+ task_type,
+ self.host_name,
+ product_type,
+ variant
+ )
+
+ # make sure frame start/end is inherited from csv columns
+ # expected frame range data are handles excluded
+ for _, repre_data in product_data["representations"].items(): # noqa: E501
+ frame_start = repre_data["frameStart"]
+ frame_end = repre_data["frameEnd"]
+ handle_start = repre_data["handleStart"]
+ handle_end = repre_data["handleEnd"]
+ fps = repre_data["fps"]
+ break
+
+ # try to find any version comment in representation data
+ version_comment = next(
+ iter(
+ repre_data["comment"]
+ for repre_data in product_data["representations"].values() # noqa: E501
+ if repre_data["comment"]
+ ),
+ None
+ )
+
+ # try to find any slate switch in representation data
+ slate_exists = any(
+ repre_data["slate"]
+ for _, repre_data in product_data["representations"].items() # noqa: E501
+ )
+
+ # get representations from product data
+ representations = product_data["representations"]
+ label = f"{folder_path}_{product_name}_v{version:>03}"
+
+ families = ["csv_ingest"]
+ if slate_exists:
+ # adding slate to families mainly for loaders to be able
+ # to filter out slates
+ families.append("slate")
+
+ # make product data
+ product_data = {
+ "name": instance_name,
+ "folderPath": folder_path,
+ "families": families,
+ "label": label,
+ "task": task_name,
+ "variant": variant,
+ "source": "csv",
+ "frameStart": frame_start,
+ "frameEnd": frame_end,
+ "handleStart": handle_start,
+ "handleEnd": handle_end,
+ "fps": fps,
+ "version": version,
+ "comment": version_comment,
+ }
+
+ # create new instance
+ new_instance = CreatedInstance(
+ product_type, product_name, product_data, self
+ )
+ self._store_new_instance(new_instance)
+
+ if not new_instance.get("prepared_data_for_repres"):
+ new_instance["prepared_data_for_repres"] = []
+
+ base_thumbnail_repre_data = {
+ "name": "thumbnail",
+ "ext": None,
+ "files": None,
+ "stagingDir": None,
+ "stagingDir_persistent": True,
+ "tags": ["thumbnail", "delete"],
+ }
+ # need to populate all thumbnails for all representations
+ # so we can check if unique thumbnail per representation
+ # is needed
+ thumbnails = [
+ repre_data["thumbnailPath"]
+ for repre_data in representations.values()
+ if repre_data["thumbnailPath"]
+ ]
+ multiple_thumbnails = len(set(thumbnails)) > 1
+ explicit_output_name = None
+ thumbnails_processed = False
+ for filepath, repre_data in representations.items():
+ # check if any review derivate tag is present
+ reviewable = any(
+ tag for tag in repre_data.get("tags", [])
+ # tag can be `ftrackreview` or `review`
+ if "review" in tag
+ )
+ # since we need to populate multiple thumbnails as
+ # representation with outputName for (Ftrack instance
+ # integrator) pairing with reviewable video representations
+ if (
+ thumbnails
+ and multiple_thumbnails
+ and reviewable
+ ):
+ # multiple unique thumbnails per representation needs
+ # grouping by outputName
+ # mainly used in Ftrack instance integrator
+ explicit_output_name = repre_data["representationName"]
+ relative_thumbnail_path = repre_data["thumbnailPath"]
+ # representation might not have thumbnail path
+ # so ignore this one
+ if not relative_thumbnail_path:
+ continue
+ thumb_dir, thumb_file = \
+ self._get_refactor_thumbnail_path(
+ staging_dir, relative_thumbnail_path)
+ filename, ext = os.path.splitext(thumb_file)
+ thumbnail_repr_data = deepcopy(
+ base_thumbnail_repre_data)
+ thumbnail_repr_data.update({
+ "name": "thumbnail_{}".format(filename),
+ "ext": ext[1:],
+ "files": thumb_file,
+ "stagingDir": thumb_dir,
+ "outputName": explicit_output_name,
+ })
+ new_instance["prepared_data_for_repres"].append({
+ "type": "thumbnail",
+ "colorspace": None,
+ "representation": thumbnail_repr_data,
+ })
+ # also add thumbnailPath for ayon to integrate
+ if not new_instance.get("thumbnailPath"):
+ new_instance["thumbnailPath"] = (
+ os.path.join(thumb_dir, thumb_file)
+ )
+ elif (
+ thumbnails
+ and not multiple_thumbnails
+ and not thumbnails_processed
+ or not reviewable
+ ):
+ """
+ For case where we have only one thumbnail
+ and not reviewable medias. This needs to be processed
+ only once per instance.
+ """
+ if not thumbnails:
+ continue
+ # here we will use only one thumbnail for
+ # all representations
+ relative_thumbnail_path = repre_data["thumbnailPath"]
+ # popping last thumbnail from list since it is only one
+ # and we do not need to iterate again over it
+ if not relative_thumbnail_path:
+ relative_thumbnail_path = thumbnails.pop()
+ thumb_dir, thumb_file = \
+ self._get_refactor_thumbnail_path(
+ staging_dir, relative_thumbnail_path)
+ _, ext = os.path.splitext(thumb_file)
+ thumbnail_repr_data = deepcopy(
+ base_thumbnail_repre_data)
+ thumbnail_repr_data.update({
+ "ext": ext[1:],
+ "files": thumb_file,
+ "stagingDir": thumb_dir
+ })
+ new_instance["prepared_data_for_repres"].append({
+ "type": "thumbnail",
+ "colorspace": None,
+ "representation": thumbnail_repr_data,
+ })
+ # also add thumbnailPath for ayon to integrate
+ if not new_instance.get("thumbnailPath"):
+ new_instance["thumbnailPath"] = (
+ os.path.join(thumb_dir, thumb_file)
+ )
+
+ thumbnails_processed = True
+
+ # get representation data
+ representation_data = self._get_representation_data(
+ filepath, repre_data, staging_dir,
+ explicit_output_name
+ )
+
+ new_instance["prepared_data_for_repres"].append({
+ "type": "media",
+ "colorspace": repre_data["colorspace"],
+ "representation": representation_data,
+ })
+
+ def _get_refactor_thumbnail_path(
+ self, staging_dir, relative_thumbnail_path):
+ thumbnail_abs_path = os.path.join(
+ staging_dir, relative_thumbnail_path)
+ return os.path.split(
+ thumbnail_abs_path)
+
+ def _get_representation_data(
+ self, filepath, repre_data, staging_dir, explicit_output_name=None
+ ):
+ """Get representation data
+
+ Args:
+ filepath (str): Filepath to representation file.
+ repre_data (dict): Representation data from CSV file.
+ staging_dir (str): Staging directory.
+ explicit_output_name (Optional[str]): Explicit output name.
+ For grouping purposes with reviewable components.
+ Defaults to None.
+ """
+
+ # get extension of file
+ basename = os.path.basename(filepath)
+ extension = os.path.splitext(filepath)[-1].lower()
+
+ # validate filepath is having correct extension based on output
+ repre_name = repre_data["representationName"]
+ repre_config_data = None
+ for repre in self.representations_config["representations"]:
+ if repre["name"] == repre_name:
+ repre_config_data = repre
+ break
+
+ if not repre_config_data:
+ raise CreatorError(
+ f"Representation '{repre_name}' not found "
+ "in config representation data."
+ )
+
+ validate_extensions = repre_config_data["extensions"]
+ if extension not in validate_extensions:
+ raise CreatorError(
+ f"File extension '{extension}' not valid for "
+ f"output '{validate_extensions}'."
+ )
+
+ is_sequence = (extension in IMAGE_EXTENSIONS)
+ # convert ### string in file name to %03d
+ # this is for correct frame range validation
+ # example: file.###.exr -> file.%03d.exr
+ if "#" in basename:
+ padding = len(basename.split("#")) - 1
+ basename = basename.replace("#" * padding, f"%0{padding}d")
+ is_sequence = True
+
+ # make absolute path to file
+ absfilepath = os.path.normpath(os.path.join(staging_dir, filepath))
+ dirname = os.path.dirname(absfilepath)
+
+ # check if dirname exists
+ if not os.path.isdir(dirname):
+ raise CreatorError(
+ f"Directory '{dirname}' does not exist."
+ )
+
+ # collect all data from dirname
+ paths_for_collection = []
+ for file in os.listdir(dirname):
+ filepath = os.path.join(dirname, file)
+ paths_for_collection.append(filepath)
+
+ collections, _ = clique.assemble(paths_for_collection)
+
+ if collections:
+ collections = collections[0]
+ else:
+ if is_sequence:
+ raise CreatorError(
+ f"No collections found in directory '{dirname}'."
+ )
+
+ frame_start = None
+ frame_end = None
+ if is_sequence:
+ files = [os.path.basename(file) for file in collections]
+ frame_start = list(collections.indexes)[0]
+ frame_end = list(collections.indexes)[-1]
+ else:
+ files = basename
+
+ tags = deepcopy(repre_data["tags"])
+ # if slate in repre_data is True then remove one frame from start
+ if repre_data["slate"]:
+ tags.append("has_slate")
+
+ # get representation data
+ representation_data = {
+ "name": repre_name,
+ "ext": extension[1:],
+ "files": files,
+ "stagingDir": dirname,
+ "stagingDir_persistent": True,
+ "tags": tags,
+ }
+ if extension in VIDEO_EXTENSIONS:
+ representation_data.update({
+ "fps": repre_data["fps"],
+ "outputName": repre_name,
+ })
+
+ if explicit_output_name:
+ representation_data["outputName"] = explicit_output_name
+
+ if frame_start:
+ representation_data["frameStart"] = frame_start
+ if frame_end:
+ representation_data["frameEnd"] = frame_end
+
+ return representation_data
+
+ def _get_data_from_csv(
+ self, package_dir, filename
+ ):
+ """Generate instances from the csv file"""
+ # get current project name and code from context.data
+ project_name = self.create_context.get_current_project_name()
+
+ csv_file_path = os.path.join(
+ package_dir, filename
+ )
+
+ # make sure csv file contains columns from following list
+ required_columns = [
+ column["name"] for column in self.columns_config["columns"]
+ if column["required_column"]
+ ]
+
+ # read csv file
+ with open(csv_file_path, "r") as csv_file:
+ csv_content = csv_file.read()
+
+ # read csv file with DictReader
+ csv_reader = csv.DictReader(
+ StringIO(csv_content),
+ delimiter=self.columns_config["csv_delimiter"]
+ )
+
+ # fix fieldnames
+ # sometimes someone can keep extra space at the start or end of
+ # the column name
+ all_columns = [
+ " ".join(column.rsplit()) for column in csv_reader.fieldnames]
+
+ # return back fixed fieldnames
+ csv_reader.fieldnames = all_columns
+
+ # check if csv file contains all required columns
+ if any(column not in all_columns for column in required_columns):
+ raise CreatorError(
+ f"Missing required columns: {required_columns}"
+ )
+
+ csv_data = {}
+ # get data from csv file
+ for row in csv_reader:
+ # Get required columns first
+ # TODO: will need to be folder path in CSV
+ # TODO: `context_asset_name` is now `folder_path`
+ folder_path = self._get_row_value_with_validation(
+ "Folder Path", row)
+ task_name = self._get_row_value_with_validation(
+ "Task Name", row)
+ version = self._get_row_value_with_validation(
+ "Version", row)
+
+ # Get optional columns
+ variant = self._get_row_value_with_validation(
+ "Variant", row)
+ product_type = self._get_row_value_with_validation(
+ "Product Type", row)
+
+ pre_product_name = (
+ f"{task_name}{variant}{product_type}"
+ f"{version}".replace(" ", "").lower()
+ )
+
+ # get representation data
+ filename, representation_data = \
+ self._get_representation_row_data(row)
+
+ # TODO: batch query of all folder paths and task names
+
+ # get folder entity from folder path
+ folder_entity = get_folder_by_path(
+ project_name, folder_path)
+
+ # make sure asset exists
+ if not folder_entity:
+ raise CreatorError(
+ f"Asset '{folder_path}' not found."
+ )
+
+ # first get all tasks on the folder entity and then find
+ task_entity = get_task_by_name(
+ project_name, folder_entity["id"], task_name)
+
+ # check if task name is valid task in asset doc
+ if not task_entity:
+ raise CreatorError(
+ f"Task '{task_name}' not found in asset doc."
+ )
+
+ # get all csv data into one dict and make sure there are no
+ # duplicates data are already validated and sorted under
+ # correct existing asset also check if asset exists and if
+ # task name is valid task in asset doc and representations
+ # are distributed under products following variants
+ if folder_path not in csv_data:
+ csv_data[folder_path] = {
+ "folder_entity": folder_entity,
+ "products": {
+ pre_product_name: {
+ "task_name": task_name,
+ "task_type": task_entity["taskType"],
+ "variant": variant,
+ "product_type": product_type,
+ "version": version,
+ "representations": {
+ filename: representation_data,
+ },
+ }
+ }
+ }
+ else:
+ csv_products = csv_data[folder_path]["products"]
+ if pre_product_name not in csv_products:
+ csv_products[pre_product_name] = {
+ "task_name": task_name,
+ "task_type": task_entity["taskType"],
+ "variant": variant,
+ "product_type": product_type,
+ "version": version,
+ "representations": {
+ filename: representation_data,
+ },
+ }
+ else:
+ csv_representations = \
+ csv_products[pre_product_name]["representations"]
+ if filename in csv_representations:
+ raise CreatorError(
+ f"Duplicate filename '{filename}' in csv file."
+ )
+ csv_representations[filename] = representation_data
+
+ return csv_data
+
+ def _get_representation_row_data(self, row_data):
+ """Get representation row data"""
+ # Get required columns first
+ file_path = self._get_row_value_with_validation(
+ "File Path", row_data)
+ frame_start = self._get_row_value_with_validation(
+ "Frame Start", row_data)
+ frame_end = self._get_row_value_with_validation(
+ "Frame End", row_data)
+ handle_start = self._get_row_value_with_validation(
+ "Handle Start", row_data)
+ handle_end = self._get_row_value_with_validation(
+ "Handle End", row_data)
+ fps = self._get_row_value_with_validation(
+ "FPS", row_data)
+
+ # Get optional columns
+ thumbnail_path = self._get_row_value_with_validation(
+ "Version Thumbnail", row_data)
+ colorspace = self._get_row_value_with_validation(
+ "Representation Colorspace", row_data)
+ comment = self._get_row_value_with_validation(
+ "Version Comment", row_data)
+ repre = self._get_row_value_with_validation(
+ "Representation", row_data)
+ slate_exists = self._get_row_value_with_validation(
+ "Slate Exists", row_data)
+ repre_tags = self._get_row_value_with_validation(
+ "Representation Tags", row_data)
+
+ # convert tags value to list
+ tags_list = copy(self.representations_config["default_tags"])
+ if repre_tags:
+ tags_list = []
+ tags_delimiter = self.representations_config["tags_delimiter"]
+ # strip spaces from repre_tags
+ if tags_delimiter in repre_tags:
+ tags = repre_tags.split(tags_delimiter)
+ for _tag in tags:
+ tags_list.append(("".join(_tag.strip())).lower())
+ else:
+ tags_list.append(repre_tags)
+
+ representation_data = {
+ "colorspace": colorspace,
+ "comment": comment,
+ "representationName": repre,
+ "slate": slate_exists,
+ "tags": tags_list,
+ "thumbnailPath": thumbnail_path,
+ "frameStart": int(frame_start),
+ "frameEnd": int(frame_end),
+ "handleStart": int(handle_start),
+ "handleEnd": int(handle_end),
+ "fps": float(fps),
+ }
+ return file_path, representation_data
+
+ def _get_row_value_with_validation(
+ self, column_name, row_data, default_value=None
+ ):
+ """Get row value with validation"""
+
+ # get column data from column config
+ column_data = None
+ for column in self.columns_config["columns"]:
+ if column["name"] == column_name:
+ column_data = column
+ break
+
+ if not column_data:
+ raise CreatorError(
+ f"Column '{column_name}' not found in column config."
+ )
+
+ # get column value from row
+ column_value = row_data.get(column_name)
+ column_required = column_data["required_column"]
+
+ # check if column value is not empty string and column is required
+ if column_value == "" and column_required:
+ raise CreatorError(
+ f"Value in column '{column_name}' is required."
+ )
+
+ # get column type
+ column_type = column_data["type"]
+ # get column validation regex
+ column_validation = column_data["validation_pattern"]
+ # get column default value
+ column_default = default_value or column_data["default"]
+
+ if column_type in ["number", "decimal"] and column_default == 0:
+ column_default = None
+
+ # check if column value is not empty string
+ if column_value == "":
+ # set default value if column value is empty string
+ column_value = column_default
+
+ # set column value to correct type following column type
+ if column_type == "number" and column_value is not None:
+ column_value = int(column_value)
+ elif column_type == "decimal" and column_value is not None:
+ column_value = float(column_value)
+ elif column_type == "bool":
+ column_value = column_value in ["true", "True"]
+
+ # check if column value matches validation regex
+ if (
+ column_value is not None and
+ not re.match(str(column_validation), str(column_value))
+ ):
+ raise CreatorError(
+ f"Column '{column_name}' value '{column_value}' "
+ f"does not match validation regex '{column_validation}' \n"
+ f"Row data: {row_data} \n"
+ f"Column data: {column_data}"
+ )
+
+ return column_value
+
+ def _pass_data_to_csv_instance(
+ self, instance_data, staging_dir, filename
+ ):
+ """Pass CSV representation file to instance data"""
+
+ representation = {
+ "name": "csv",
+ "ext": "csv",
+ "files": filename,
+ "stagingDir": staging_dir,
+ "stagingDir_persistent": True,
+ }
+
+ instance_data.update({
+ "label": f"CSV: {filename}",
+ "representations": [representation],
+ "stagingDir": staging_dir,
+ "stagingDir_persistent": True,
+ })
+
+ def get_instance_attr_defs(self):
+ return [
+ BoolDef(
+ "add_review_family",
+ default=True,
+ label="Review"
+ )
+ ]
+
+ def get_pre_create_attr_defs(self):
+ """Creating pre-create attributes at creator plugin.
+
+ Returns:
+ list: list of attribute object instances
+ """
+ # Use same attributes as for instance attributes
+ attr_defs = [
+ FileDef(
+ "csv_filepath_data",
+ folders=False,
+ extensions=[".csv"],
+ allow_sequences=False,
+ single_item=True,
+ label="CSV File",
+ ),
+ ]
+ return attr_defs
diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_csv_ingest_instance_data.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_csv_ingest_instance_data.py
new file mode 100644
index 0000000000..33536d0854
--- /dev/null
+++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_csv_ingest_instance_data.py
@@ -0,0 +1,47 @@
+from pprint import pformat
+import pyblish.api
+from ayon_core.pipeline import publish
+
+
+class CollectCSVIngestInstancesData(
+ pyblish.api.InstancePlugin,
+ publish.AYONPyblishPluginMixin,
+ publish.ColormanagedPyblishPluginMixin
+):
+ """Collect CSV Ingest data from instance.
+ """
+
+ label = "Collect CSV Ingest instances data"
+ order = pyblish.api.CollectorOrder + 0.1
+ hosts = ["traypublisher"]
+ families = ["csv_ingest"]
+
+ def process(self, instance):
+
+ # expecting [(colorspace, repre_data), ...]
+ prepared_repres_data_items = instance.data[
+ "prepared_data_for_repres"]
+
+ for prep_repre_data in prepared_repres_data_items:
+ type = prep_repre_data["type"]
+ colorspace = prep_repre_data["colorspace"]
+ repre_data = prep_repre_data["representation"]
+
+ # thumbnails should be skipped
+ if type == "media":
+ # colorspace name is passed from CSV column
+ self.set_representation_colorspace(
+ repre_data, instance.context, colorspace
+ )
+ elif type == "media" and colorspace is None:
+ # TODO: implement colorspace file rules file parsing
+ self.log.warning(
+ "Colorspace is not defined in csv for following"
+ f" representation: {pformat(repre_data)}"
+ )
+ pass
+ elif type == "thumbnail":
+ # thumbnails should be skipped
+ pass
+
+ instance.data["representations"].append(repre_data)
diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/extract_csv_file.py b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_csv_file.py
new file mode 100644
index 0000000000..4bdf7c0493
--- /dev/null
+++ b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_csv_file.py
@@ -0,0 +1,31 @@
+import pyblish.api
+
+from ayon_core.pipeline import publish
+
+
+class ExtractCSVFile(publish.Extractor):
+ """
+ Extractor export CSV file
+ """
+
+ label = "Extract CSV file"
+ order = pyblish.api.ExtractorOrder - 0.45
+ families = ["csv_ingest_file"]
+ hosts = ["traypublisher"]
+
+ def process(self, instance):
+
+ csv_file_data = instance.data["csvFileData"]
+
+ representation_csv = {
+ 'name': "csv_data",
+ 'ext': "csv",
+ 'files': csv_file_data["filename"],
+ "stagingDir": csv_file_data["staging_dir"],
+ "stagingDir_persistent": True
+ }
+
+ instance.data["representations"].append(representation_csv)
+
+ self.log.info("Added CSV file representation: {}".format(
+ representation_csv))
diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py
index 3a62536507..0b4f8e16c1 100644
--- a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py
+++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py
@@ -16,6 +16,7 @@ class ValidateExistingVersion(
order = ValidateContentsOrder
hosts = ["traypublisher"]
+ targets = ["local"]
actions = [RepairAction]
diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py
index 4f11571efe..13f13b05bb 100644
--- a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py
+++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py
@@ -16,6 +16,8 @@ class ValidateFrameRange(OptionalPyblishPluginMixin,
label = "Validate Frame Range"
hosts = ["traypublisher"]
families = ["render", "plate"]
+ targets = ["local"]
+
order = ValidateContentsOrder
optional = True
diff --git a/client/ayon_core/pipeline/workfile/workfile_template_builder.py b/client/ayon_core/pipeline/workfile/workfile_template_builder.py
index 8082adc65d..3447520b39 100644
--- a/client/ayon_core/pipeline/workfile/workfile_template_builder.py
+++ b/client/ayon_core/pipeline/workfile/workfile_template_builder.py
@@ -498,15 +498,21 @@ class AbstractTemplateBuilder(object):
process if version is created
"""
- template_preset = self.get_template_preset()
-
- if template_path is None:
- template_path = template_preset["path"]
-
- if keep_placeholders is None:
- keep_placeholders = template_preset["keep_placeholder"]
- if create_first_version is None:
- create_first_version = template_preset["create_first_version"]
+ if any(
+ value is None
+ for value in [
+ template_path,
+ keep_placeholders,
+ create_first_version,
+ ]
+ ):
+ template_preset = self.get_template_preset()
+ if template_path is None:
+ template_path = template_preset["path"]
+ if keep_placeholders is None:
+ keep_placeholders = template_preset["keep_placeholder"]
+ if create_first_version is None:
+ create_first_version = template_preset["create_first_version"]
# check if first version is created
created_version_workfile = False
@@ -772,12 +778,14 @@ class AbstractTemplateBuilder(object):
- 'project_settings/{host name}/templated_workfile_build/profiles'
Returns:
- str: Path to a template file with placeholders.
+ dict: Dictionary with `path`, `keep_placeholder` and
+ `create_first_version` settings from the template preset
+ for current context.
Raises:
TemplateProfileNotFound: When profiles are not filled.
TemplateLoadFailed: Profile was found but path is not set.
- TemplateNotFound: Path was set but file does not exists.
+ TemplateNotFound: Path was set but file does not exist.
"""
host_name = self.host_name
diff --git a/client/ayon_core/plugins/publish/integrate.py b/client/ayon_core/plugins/publish/integrate.py
index ea24112831..1b3cfa6cb8 100644
--- a/client/ayon_core/plugins/publish/integrate.py
+++ b/client/ayon_core/plugins/publish/integrate.py
@@ -168,6 +168,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
"blendScene",
"yeticacheUE",
"tycache",
+ "csv_ingest_file",
"render.local.hou"
]
diff --git a/server_addon/create_ayon_addons.py b/server_addon/create_ayon_addons.py
index 79b9aa5450..f0a36d4740 100644
--- a/server_addon/create_ayon_addons.py
+++ b/server_addon/create_ayon_addons.py
@@ -5,7 +5,7 @@ import shutil
import argparse
import zipfile
import types
-import importlib
+import importlib.machinery
import platform
import collections
from pathlib import Path
diff --git a/server_addon/houdini/server/settings/imageio.py b/server_addon/houdini/server/settings/imageio.py
index f4850c5df7..c4f4813d51 100644
--- a/server_addon/houdini/server/settings/imageio.py
+++ b/server_addon/houdini/server/settings/imageio.py
@@ -34,6 +34,34 @@ class ImageIOFileRulesModel(BaseSettingsModel):
return value
+class WorkfileImageIOModel(BaseSettingsModel):
+ """Workfile settings help.
+
+ Empty values will be skipped, allowing any existing env vars to
+ pass through as defined.
+
+ Note: The render space in Houdini is
+ always set to the 'scene_linear' role."""
+
+ enabled: bool = SettingsField(False, title="Enabled")
+ default_display: str = SettingsField(
+ title="Default active displays",
+ description="It behaves like the 'OCIO_ACTIVE_DISPLAYS' env var,"
+ " Colon-separated list of displays, e.g ACES:P3"
+ )
+ default_view: str = SettingsField(
+ title="Default active views",
+ description="It behaves like the 'OCIO_ACTIVE_VIEWS' env var,"
+ " Colon-separated list of views, e.g sRGB:DCDM"
+ )
+ review_color_space: str = SettingsField(
+ title="Review colorspace",
+ description="It exposes OCIO Colorspace parameter in opengl nodes."
+ "if left empty, Ayon will figure out the default "
+ "colorspace using your default display and default view."
+ )
+
+
class HoudiniImageIOModel(BaseSettingsModel):
activate_host_color_management: bool = SettingsField(
True, title="Enable Color Management"
@@ -46,3 +74,26 @@ class HoudiniImageIOModel(BaseSettingsModel):
default_factory=ImageIOFileRulesModel,
title="File Rules"
)
+ workfile: WorkfileImageIOModel = SettingsField(
+ default_factory=WorkfileImageIOModel,
+ title="Workfile"
+ )
+
+
+DEFAULT_IMAGEIO_SETTINGS = {
+ "activate_host_color_management": False,
+ "ocio_config": {
+ "override_global_config": False,
+ "filepath": []
+ },
+ "file_rules": {
+ "activate_host_rules": False,
+ "rules": []
+ },
+ "workfile": {
+ "enabled": False,
+ "default_display": "ACES",
+ "default_view": "sRGB",
+ "review_color_space": ""
+ }
+}
diff --git a/server_addon/houdini/server/settings/main.py b/server_addon/houdini/server/settings/main.py
index cbb19d15b7..3acab0ce74 100644
--- a/server_addon/houdini/server/settings/main.py
+++ b/server_addon/houdini/server/settings/main.py
@@ -3,7 +3,10 @@ from .general import (
GeneralSettingsModel,
DEFAULT_GENERAL_SETTINGS
)
-from .imageio import HoudiniImageIOModel
+from .imageio import (
+ HoudiniImageIOModel,
+ DEFAULT_IMAGEIO_SETTINGS
+)
from .shelves import ShelvesModel
from .create import (
CreatePluginsModel,
@@ -40,6 +43,7 @@ class HoudiniSettings(BaseSettingsModel):
DEFAULT_VALUES = {
"general": DEFAULT_GENERAL_SETTINGS,
+ "imageio": DEFAULT_IMAGEIO_SETTINGS,
"shelves": [],
"create": DEFAULT_HOUDINI_CREATE_SETTINGS,
"publish": DEFAULT_HOUDINI_PUBLISH_SETTINGS
diff --git a/server_addon/traypublisher/server/settings/creator_plugins.py b/server_addon/traypublisher/server/settings/creator_plugins.py
index bf66d9a088..1ff14002aa 100644
--- a/server_addon/traypublisher/server/settings/creator_plugins.py
+++ b/server_addon/traypublisher/server/settings/creator_plugins.py
@@ -1,4 +1,7 @@
+from pydantic import validator
from ayon_server.settings import BaseSettingsModel, SettingsField
+from ayon_server.settings.validators import ensure_unique_names
+from ayon_server.exceptions import BadRequestException
class BatchMovieCreatorPlugin(BaseSettingsModel):
@@ -22,11 +25,139 @@ class BatchMovieCreatorPlugin(BaseSettingsModel):
)
+class ColumnItemModel(BaseSettingsModel):
+ """Allows to publish multiple video files in one go.
Name of matching
+ asset is parsed from file names ('asset.mov', 'asset_v001.mov',
+ 'my_asset_to_publish.mov')"""
+
+ name: str = SettingsField(
+ title="Name",
+ default=""
+ )
+
+ type: str = SettingsField(
+ title="Type",
+ default=""
+ )
+
+ default: str = SettingsField(
+ title="Default",
+ default=""
+ )
+
+ required_column: bool = SettingsField(
+ title="Required Column",
+ default=False
+ )
+
+ validation_pattern: str = SettingsField(
+ title="Validation Regex Pattern",
+ default="^(.*)$"
+ )
+
+
+class ColumnConfigModel(BaseSettingsModel):
+ """Allows to publish multiple video files in one go.
Name of matching
+ asset is parsed from file names ('asset.mov', 'asset_v001.mov',
+ 'my_asset_to_publish.mov')"""
+
+ csv_delimiter: str = SettingsField(
+ title="CSV delimiter",
+ default=","
+ )
+
+ columns: list[ColumnItemModel] = SettingsField(
+ title="Columns",
+ default_factory=list
+ )
+
+ @validator("columns")
+ def validate_unique_outputs(cls, value):
+ ensure_unique_names(value)
+ return value
+
+
+class RepresentationItemModel(BaseSettingsModel):
+ """Allows to publish multiple video files in one go.
+
+ Name of matching asset is parsed from file names
+ ('asset.mov', 'asset_v001.mov', 'my_asset_to_publish.mov')
+ """
+
+ name: str = SettingsField(
+ title="Name",
+ default=""
+ )
+
+ extensions: list[str] = SettingsField(
+ title="Extensions",
+ default_factory=list
+ )
+
+ @validator("extensions")
+ def validate_extension(cls, value):
+ for ext in value:
+ if not ext.startswith("."):
+ raise BadRequestException(f"Extension must start with '.': {ext}")
+ return value
+
+
+class RepresentationConfigModel(BaseSettingsModel):
+ """Allows to publish multiple video files in one go.
Name of matching
+ asset is parsed from file names ('asset.mov', 'asset_v001.mov',
+ 'my_asset_to_publish.mov')"""
+
+ tags_delimiter: str = SettingsField(
+ title="Tags delimiter",
+ default=";"
+ )
+
+ default_tags: list[str] = SettingsField(
+ title="Default tags",
+ default_factory=list
+ )
+
+ representations: list[RepresentationItemModel] = SettingsField(
+ title="Representations",
+ default_factory=list
+ )
+
+ @validator("representations")
+ def validate_unique_outputs(cls, value):
+ ensure_unique_names(value)
+ return value
+
+
+class IngestCSVPluginModel(BaseSettingsModel):
+ """Allows to publish multiple video files in one go.
Name of matching
+ asset is parsed from file names ('asset.mov', 'asset_v001.mov',
+ 'my_asset_to_publish.mov')"""
+
+ enabled: bool = SettingsField(
+ title="Enabled",
+ default=False
+ )
+
+ columns_config: ColumnConfigModel = SettingsField(
+ title="Columns config",
+ default_factory=ColumnConfigModel
+ )
+
+ representations_config: RepresentationConfigModel = SettingsField(
+ title="Representations config",
+ default_factory=RepresentationConfigModel
+ )
+
+
class TrayPublisherCreatePluginsModel(BaseSettingsModel):
BatchMovieCreator: BatchMovieCreatorPlugin = SettingsField(
title="Batch Movie Creator",
default_factory=BatchMovieCreatorPlugin
)
+ IngestCSV: IngestCSVPluginModel = SettingsField(
+ title="Ingest CSV",
+ default_factory=IngestCSVPluginModel
+ )
DEFAULT_CREATORS = {
@@ -41,4 +172,170 @@ DEFAULT_CREATORS = {
".mov"
]
},
+ "IngestCSV": {
+ "enabled": True,
+ "columns_config": {
+ "csv_delimiter": ",",
+ "columns": [
+ {
+ "name": "File Path",
+ "type": "text",
+ "default": "",
+ "required_column": True,
+ "validation_pattern": "^([a-z0-9#._\\/]*)$"
+ },
+ {
+ "name": "Folder Path",
+ "type": "text",
+ "default": "",
+ "required_column": True,
+ "validation_pattern": "^([a-zA-Z0-9_\\/]*)$"
+ },
+ {
+ "name": "Task Name",
+ "type": "text",
+ "default": "",
+ "required_column": True,
+ "validation_pattern": "^(.*)$"
+ },
+ {
+ "name": "Product Type",
+ "type": "text",
+ "default": "",
+ "required_column": False,
+ "validation_pattern": "^(.*)$"
+ },
+ {
+ "name": "Variant",
+ "type": "text",
+ "default": "",
+ "required_column": False,
+ "validation_pattern": "^(.*)$"
+ },
+ {
+ "name": "Version",
+ "type": "number",
+ "default": 1,
+ "required_column": True,
+ "validation_pattern": "^(\\d{1,3})$"
+ },
+ {
+ "name": "Version Comment",
+ "type": "text",
+ "default": "",
+ "required_column": False,
+ "validation_pattern": "^(.*)$"
+ },
+ {
+ "name": "Version Thumbnail",
+ "type": "text",
+ "default": "",
+ "required_column": False,
+ "validation_pattern": "^([a-zA-Z0-9#._\\/]*)$"
+ },
+ {
+ "name": "Frame Start",
+ "type": "number",
+ "default": 0,
+ "required_column": True,
+ "validation_pattern": "^(\\d{1,8})$"
+ },
+ {
+ "name": "Frame End",
+ "type": "number",
+ "default": 0,
+ "required_column": True,
+ "validation_pattern": "^(\\d{1,8})$"
+ },
+ {
+ "name": "Handle Start",
+ "type": "number",
+ "default": 0,
+ "required_column": True,
+ "validation_pattern": "^(\\d)$"
+ },
+ {
+ "name": "Handle End",
+ "type": "number",
+ "default": 0,
+ "required_column": True,
+ "validation_pattern": "^(\\d)$"
+ },
+ {
+ "name": "FPS",
+ "type": "decimal",
+ "default": 0.0,
+ "required_column": True,
+ "validation_pattern": "^[0-9]*\\.[0-9]+$|^[0-9]+$"
+ },
+ {
+ "name": "Slate Exists",
+ "type": "bool",
+ "default": True,
+ "required_column": False,
+ "validation_pattern": "(True|False)"
+ },
+ {
+ "name": "Representation",
+ "type": "text",
+ "default": "",
+ "required_column": False,
+ "validation_pattern": "^(.*)$"
+ },
+ {
+ "name": "Representation Colorspace",
+ "type": "text",
+ "default": "",
+ "required_column": False,
+ "validation_pattern": "^(.*)$"
+ },
+ {
+ "name": "Representation Tags",
+ "type": "text",
+ "default": "",
+ "required_column": False,
+ "validation_pattern": "^(.*)$"
+ }
+ ]
+ },
+ "representations_config": {
+ "tags_delimiter": ";",
+ "default_tags": [
+ "review"
+ ],
+ "representations": [
+ {
+ "name": "preview",
+ "extensions": [
+ ".mp4",
+ ".mov"
+ ]
+ },
+ {
+ "name": "exr",
+ "extensions": [
+ ".exr"
+ ]
+ },
+ {
+ "name": "edit",
+ "extensions": [
+ ".mov"
+ ]
+ },
+ {
+ "name": "review",
+ "extensions": [
+ ".mov"
+ ]
+ },
+ {
+ "name": "nuke",
+ "extensions": [
+ ".nk"
+ ]
+ }
+ ]
+ }
+ }
}