diff --git a/client/ayon_core/hooks/pre_ocio_hook.py b/client/ayon_core/hooks/pre_ocio_hook.py
index 0817afec71..6c30b267bc 100644
--- a/client/ayon_core/hooks/pre_ocio_hook.py
+++ b/client/ayon_core/hooks/pre_ocio_hook.py
@@ -1,7 +1,7 @@
from ayon_applications import PreLaunchHook
-from ayon_core.pipeline.colorspace import get_imageio_config
-from ayon_core.pipeline.template_data import get_template_data_with_names
+from ayon_core.pipeline.colorspace import get_imageio_config_preset
+from ayon_core.pipeline.template_data import get_template_data
class OCIOEnvHook(PreLaunchHook):
@@ -26,32 +26,38 @@ class OCIOEnvHook(PreLaunchHook):
def execute(self):
"""Hook entry method."""
- template_data = get_template_data_with_names(
- project_name=self.data["project_name"],
- folder_path=self.data["folder_path"],
- task_name=self.data["task_name"],
+ folder_entity = self.data["folder_entity"]
+
+ template_data = get_template_data(
+ self.data["project_entity"],
+ folder_entity=folder_entity,
+ task_entity=self.data["task_entity"],
host_name=self.host_name,
- settings=self.data["project_settings"]
+ settings=self.data["project_settings"],
)
- config_data = get_imageio_config(
- project_name=self.data["project_name"],
- host_name=self.host_name,
- project_settings=self.data["project_settings"],
- anatomy_data=template_data,
+ config_data = get_imageio_config_preset(
+ self.data["project_name"],
+ self.data["folder_path"],
+ self.data["task_name"],
+ self.host_name,
anatomy=self.data["anatomy"],
+ project_settings=self.data["project_settings"],
+ template_data=template_data,
env=self.launch_context.env,
+ folder_id=folder_entity["id"],
)
- if config_data:
- ocio_path = config_data["path"]
-
- if self.host_name in ["nuke", "hiero"]:
- ocio_path = ocio_path.replace("\\", "/")
-
- self.log.info(
- f"Setting OCIO environment to config path: {ocio_path}")
-
- self.launch_context.env["OCIO"] = ocio_path
- else:
+ if not config_data:
self.log.debug("OCIO not set or enabled")
+ return
+
+ ocio_path = config_data["path"]
+
+ if self.host_name in ["nuke", "hiero"]:
+ ocio_path = ocio_path.replace("\\", "/")
+
+ self.log.info(
+ f"Setting OCIO environment to config path: {ocio_path}")
+
+ self.launch_context.env["OCIO"] = ocio_path
diff --git a/client/ayon_core/hosts/aftereffects/api/launch_logic.py b/client/ayon_core/hosts/aftereffects/api/launch_logic.py
index 5a23f2cb35..da6887668a 100644
--- a/client/ayon_core/hosts/aftereffects/api/launch_logic.py
+++ b/client/ayon_core/hosts/aftereffects/api/launch_logic.py
@@ -60,7 +60,7 @@ def main(*subprocess_args):
)
)
- elif os.environ.get("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", True):
+ elif os.environ.get("AVALON_AFTEREFFECTS_WORKFILES_ON_LAUNCH", True):
save = False
if os.getenv("WORKFILES_SAVE_AS"):
save = True
diff --git a/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py
index c28042b6ae..ebd4b8f944 100644
--- a/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py
+++ b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py
@@ -24,7 +24,7 @@ class AERenderInstance(RenderInstance):
class CollectAERender(publish.AbstractCollectRender):
- order = pyblish.api.CollectorOrder + 0.405
+ order = pyblish.api.CollectorOrder + 0.100
label = "Collect After Effects Render Layers"
hosts = ["aftereffects"]
@@ -145,6 +145,7 @@ class CollectAERender(publish.AbstractCollectRender):
if "review" in instance.families:
# to skip ExtractReview locally
instance.families.remove("review")
+ instance.deadline = inst.data.get("deadline")
instances.append(instance)
diff --git a/client/ayon_core/hosts/blender/api/lib.py b/client/ayon_core/hosts/blender/api/lib.py
index 458a275b51..32137f0fcd 100644
--- a/client/ayon_core/hosts/blender/api/lib.py
+++ b/client/ayon_core/hosts/blender/api/lib.py
@@ -33,7 +33,7 @@ def load_scripts(paths):
if register:
try:
register()
- except:
+ except: # noqa E722
traceback.print_exc()
else:
print("\nWarning! '%s' has no register function, "
@@ -45,7 +45,7 @@ def load_scripts(paths):
if unregister:
try:
unregister()
- except:
+ except: # noqa E722
traceback.print_exc()
def test_reload(mod):
@@ -57,7 +57,7 @@ def load_scripts(paths):
try:
return importlib.reload(mod)
- except:
+ except: # noqa E722
traceback.print_exc()
def test_register(mod):
diff --git a/client/ayon_core/hosts/blender/api/plugin.py b/client/ayon_core/hosts/blender/api/plugin.py
index 6c9bfb6569..4a13d16805 100644
--- a/client/ayon_core/hosts/blender/api/plugin.py
+++ b/client/ayon_core/hosts/blender/api/plugin.py
@@ -143,13 +143,19 @@ def deselect_all():
if obj.mode != 'OBJECT':
modes.append((obj, obj.mode))
bpy.context.view_layer.objects.active = obj
- bpy.ops.object.mode_set(mode='OBJECT')
+ context_override = create_blender_context(active=obj)
+ with bpy.context.temp_override(**context_override):
+ bpy.ops.object.mode_set(mode='OBJECT')
- bpy.ops.object.select_all(action='DESELECT')
+ context_override = create_blender_context()
+ with bpy.context.temp_override(**context_override):
+ bpy.ops.object.select_all(action='DESELECT')
for p in modes:
bpy.context.view_layer.objects.active = p[0]
- bpy.ops.object.mode_set(mode=p[1])
+ context_override = create_blender_context(active=p[0])
+ with bpy.context.temp_override(**context_override):
+ bpy.ops.object.mode_set(mode=p[1])
bpy.context.view_layer.objects.active = active
diff --git a/client/ayon_core/hosts/blender/plugins/load/load_camera_abc.py b/client/ayon_core/hosts/blender/plugins/load/load_camera_abc.py
index 6178578081..a49bb40d9a 100644
--- a/client/ayon_core/hosts/blender/plugins/load/load_camera_abc.py
+++ b/client/ayon_core/hosts/blender/plugins/load/load_camera_abc.py
@@ -43,7 +43,10 @@ class AbcCameraLoader(plugin.AssetLoader):
def _process(self, libpath, asset_group, group_name):
plugin.deselect_all()
- bpy.ops.wm.alembic_import(filepath=libpath)
+ # Force the creation of the transform cache even if the camera
+ # doesn't have an animation. We use the cache to update the camera.
+ bpy.ops.wm.alembic_import(
+ filepath=libpath, always_add_cache_reader=True)
objects = lib.get_selection()
@@ -178,12 +181,33 @@ class AbcCameraLoader(plugin.AssetLoader):
self.log.info("Library already loaded, not updating...")
return
- mat = asset_group.matrix_basis.copy()
+ for obj in asset_group.children:
+ found = False
+ for constraint in obj.constraints:
+ if constraint.type == "TRANSFORM_CACHE":
+ constraint.cache_file.filepath = libpath.as_posix()
+ found = True
+ break
+ if not found:
+ # This is to keep compatibility with cameras loaded with
+ # the old loader
+ # Create a new constraint for the cache file
+ constraint = obj.constraints.new("TRANSFORM_CACHE")
+ bpy.ops.cachefile.open(filepath=libpath.as_posix())
+ constraint.cache_file = bpy.data.cache_files[-1]
+ constraint.cache_file.scale = 1.0
- self._remove(asset_group)
- self._process(str(libpath), asset_group, object_name)
+ # This is a workaround to set the object path. Blender doesn't
+ # load the list of object paths until the object is evaluated.
+ # This is a hack to force the object to be evaluated.
+ # The modifier doesn't need to be removed because camera
+ # objects don't have modifiers.
+ obj.modifiers.new(
+ name='MeshSequenceCache', type='MESH_SEQUENCE_CACHE')
+ bpy.context.evaluated_depsgraph_get()
- asset_group.matrix_basis = mat
+ constraint.object_path = (
+ constraint.cache_file.object_paths[0].path)
metadata["libpath"] = str(libpath)
metadata["representation"] = repre_entity["id"]
diff --git a/client/ayon_core/hosts/fusion/api/lib.py b/client/ayon_core/hosts/fusion/api/lib.py
index 08722463e1..7f7d20010d 100644
--- a/client/ayon_core/hosts/fusion/api/lib.py
+++ b/client/ayon_core/hosts/fusion/api/lib.py
@@ -169,7 +169,7 @@ def validate_comp_prefs(comp=None, force_repair=False):
def _on_repair():
attributes = dict()
for key, comp_key, _label in validations:
- value = folder_value[key]
+ value = folder_attributes[key]
comp_key_full = "Comp.FrameFormat.{}".format(comp_key)
attributes[comp_key_full] = value
comp.SetPrefs(attributes)
diff --git a/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py b/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py
index ee7b6d728e..9c04e59717 100644
--- a/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py
+++ b/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py
@@ -115,6 +115,7 @@ class CollectFusionRender(
if "review" in instance.families:
# to skip ExtractReview locally
instance.families.remove("review")
+ instance.deadline = inst.data.get("deadline")
instances.append(instance)
diff --git a/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py b/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py
index 156e2ac6ba..c63eb114e5 100644
--- a/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py
+++ b/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py
@@ -177,7 +177,10 @@ class CollectFarmRender(publish.AbstractCollectRender):
outputFormat=info[1],
outputStartFrame=info[3],
leadingZeros=info[2],
- ignoreFrameHandleCheck=True
+ ignoreFrameHandleCheck=True,
+ #todo: inst is not available, must be determined, fix when
+ #reworking to Publisher
+ # deadline=inst.data.get("deadline")
)
render_instance.context = context
diff --git a/client/ayon_core/hosts/hiero/api/lib.py b/client/ayon_core/hosts/hiero/api/lib.py
index aaf99546c7..456a68f125 100644
--- a/client/ayon_core/hosts/hiero/api/lib.py
+++ b/client/ayon_core/hosts/hiero/api/lib.py
@@ -1110,10 +1110,7 @@ def apply_colorspace_project():
'''
# backward compatibility layer
# TODO: remove this after some time
- config_data = get_imageio_config(
- project_name=get_current_project_name(),
- host_name="hiero"
- )
+ config_data = get_current_context_imageio_config_preset()
if config_data:
presets.update({
diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_arnold_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_arnold_rop.py
index f65b54a452..1208cfc1ea 100644
--- a/client/ayon_core/hosts/houdini/plugins/create/create_arnold_rop.py
+++ b/client/ayon_core/hosts/houdini/plugins/create/create_arnold_rop.py
@@ -13,11 +13,17 @@ class CreateArnoldRop(plugin.HoudiniCreator):
# Default extension
ext = "exr"
- # Default to split export and render jobs
- export_job = True
+ # Default render target
+ render_target = "farm_split"
def create(self, product_name, instance_data, pre_create_data):
import hou
+ # Transfer settings from pre create to instance
+ creator_attributes = instance_data.setdefault(
+ "creator_attributes", dict())
+ for key in ["render_target", "review"]:
+ if key in pre_create_data:
+ creator_attributes[key] = pre_create_data[key]
# Remove the active, we are checking the bypass flag of the nodes
instance_data.pop("active", None)
@@ -25,8 +31,6 @@ class CreateArnoldRop(plugin.HoudiniCreator):
# Add chunk size attribute
instance_data["chunkSize"] = 1
- # Submit for job publishing
- instance_data["farm"] = pre_create_data.get("farm")
instance = super(CreateArnoldRop, self).create(
product_name,
@@ -51,7 +55,7 @@ class CreateArnoldRop(plugin.HoudiniCreator):
"ar_exr_half_precision": 1 # half precision
}
- if pre_create_data.get("export_job"):
+ if pre_create_data.get("render_target") == "farm_split":
ass_filepath = \
"{export_dir}{product_name}/{product_name}.$F4.ass".format(
export_dir=hou.text.expandString("$HIP/pyblish/ass/"),
@@ -66,23 +70,41 @@ class CreateArnoldRop(plugin.HoudiniCreator):
to_lock = ["productType", "id"]
self.lock_parameters(instance_node, to_lock)
- def get_pre_create_attr_defs(self):
- attrs = super(CreateArnoldRop, self).get_pre_create_attr_defs()
+ def get_instance_attr_defs(self):
+ """get instance attribute definitions.
+ Attributes defined in this method are exposed in
+ publish tab in the publisher UI.
+ """
+
+ render_target_items = {
+ "local": "Local machine rendering",
+ "local_no_render": "Use existing frames (local)",
+ "farm": "Farm Rendering",
+ "farm_split": "Farm Rendering - Split export & render jobs",
+ }
+
+ return [
+ BoolDef("review",
+ label="Review",
+ tooltip="Mark as reviewable",
+ default=True),
+ EnumDef("render_target",
+ items=render_target_items,
+ label="Render target",
+ default=self.render_target),
+ ]
+
+ def get_pre_create_attr_defs(self):
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
- return attrs + [
- BoolDef("farm",
- label="Submitting to Farm",
- default=True),
- BoolDef("export_job",
- label="Split export and render jobs",
- default=self.export_job),
+ attrs = [
EnumDef("image_format",
image_format_enum,
default=self.ext,
- label="Image Format Options")
+ label="Image Format Options"),
]
+ return attrs + self.get_instance_attr_defs()
diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_karma_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_karma_rop.py
index e91ddbc0ac..48cf5057ab 100644
--- a/client/ayon_core/hosts/houdini/plugins/create/create_karma_rop.py
+++ b/client/ayon_core/hosts/houdini/plugins/create/create_karma_rop.py
@@ -11,15 +11,23 @@ class CreateKarmaROP(plugin.HoudiniCreator):
product_type = "karma_rop"
icon = "magic"
+ # Default render target
+ render_target = "farm"
+
def create(self, product_name, instance_data, pre_create_data):
import hou # noqa
+ # Transfer settings from pre create to instance
+ creator_attributes = instance_data.setdefault(
+ "creator_attributes", dict())
+
+ for key in ["render_target", "review"]:
+ if key in pre_create_data:
+ creator_attributes[key] = pre_create_data[key]
instance_data.pop("active", None)
instance_data.update({"node_type": "karma"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
- # Submit for job publishing
- instance_data["farm"] = pre_create_data.get("farm")
instance = super(CreateKarmaROP, self).create(
product_name,
@@ -86,18 +94,40 @@ class CreateKarmaROP(plugin.HoudiniCreator):
to_lock = ["productType", "id"]
self.lock_parameters(instance_node, to_lock)
- def get_pre_create_attr_defs(self):
- attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs()
+ def get_instance_attr_defs(self):
+ """get instance attribute definitions.
+ Attributes defined in this method are exposed in
+ publish tab in the publisher UI.
+ """
+
+ render_target_items = {
+ "local": "Local machine rendering",
+ "local_no_render": "Use existing frames (local)",
+ "farm": "Farm Rendering",
+ }
+
+ return [
+ BoolDef("review",
+ label="Review",
+ tooltip="Mark as reviewable",
+ default=True),
+ EnumDef("render_target",
+ items=render_target_items,
+ label="Render target",
+ default=self.render_target)
+ ]
+
+
+ def get_pre_create_attr_defs(self):
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
- return attrs + [
- BoolDef("farm",
- label="Submitting to Farm",
- default=True),
+ attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs()
+
+ attrs += [
EnumDef("image_format",
image_format_enum,
default="exr",
@@ -112,5 +142,6 @@ class CreateKarmaROP(plugin.HoudiniCreator):
decimals=0),
BoolDef("cam_res",
label="Camera Resolution",
- default=False)
+ default=False),
]
+ return attrs + self.get_instance_attr_defs()
diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_mantra_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_mantra_rop.py
index 64ecf428e9..05b4431aba 100644
--- a/client/ayon_core/hosts/houdini/plugins/create/create_mantra_rop.py
+++ b/client/ayon_core/hosts/houdini/plugins/create/create_mantra_rop.py
@@ -11,18 +11,22 @@ class CreateMantraROP(plugin.HoudiniCreator):
product_type = "mantra_rop"
icon = "magic"
- # Default to split export and render jobs
- export_job = True
+ # Default render target
+ render_target = "farm_split"
def create(self, product_name, instance_data, pre_create_data):
import hou # noqa
+ # Transfer settings from pre create to instance
+ creator_attributes = instance_data.setdefault(
+ "creator_attributes", dict())
+ for key in ["render_target", "review"]:
+ if key in pre_create_data:
+ creator_attributes[key] = pre_create_data[key]
instance_data.pop("active", None)
instance_data.update({"node_type": "ifd"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
- # Submit for job publishing
- instance_data["farm"] = pre_create_data.get("farm")
instance = super(CreateMantraROP, self).create(
product_name,
@@ -46,7 +50,7 @@ class CreateMantraROP(plugin.HoudiniCreator):
"vm_picture": filepath,
}
- if pre_create_data.get("export_job"):
+ if pre_create_data.get("render_target") == "farm_split":
ifd_filepath = \
"{export_dir}{product_name}/{product_name}.$F4.ifd".format(
export_dir=hou.text.expandString("$HIP/pyblish/ifd/"),
@@ -77,21 +81,40 @@ class CreateMantraROP(plugin.HoudiniCreator):
to_lock = ["productType", "id"]
self.lock_parameters(instance_node, to_lock)
- def get_pre_create_attr_defs(self):
- attrs = super(CreateMantraROP, self).get_pre_create_attr_defs()
+ def get_instance_attr_defs(self):
+ """get instance attribute definitions.
+ Attributes defined in this method are exposed in
+ publish tab in the publisher UI.
+ """
+
+ render_target_items = {
+ "local": "Local machine rendering",
+ "local_no_render": "Use existing frames (local)",
+ "farm": "Farm Rendering",
+ "farm_split": "Farm Rendering - Split export & render jobs",
+ }
+
+ return [
+ BoolDef("review",
+ label="Review",
+ tooltip="Mark as reviewable",
+ default=True),
+ EnumDef("render_target",
+ items=render_target_items,
+ label="Render target",
+ default=self.render_target)
+ ]
+
+ def get_pre_create_attr_defs(self):
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
- return attrs + [
- BoolDef("farm",
- label="Submitting to Farm",
- default=True),
- BoolDef("export_job",
- label="Split export and render jobs",
- default=self.export_job),
+ attrs = super(CreateMantraROP, self).get_pre_create_attr_defs()
+
+ attrs += [
EnumDef("image_format",
image_format_enum,
default="exr",
@@ -100,5 +123,6 @@ class CreateMantraROP(plugin.HoudiniCreator):
label="Override Camera Resolution",
tooltip="Override the current camera "
"resolution, recommended for IPR.",
- default=False)
+ default=False),
]
+ return attrs + self.get_instance_attr_defs()
diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_redshift_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_redshift_rop.py
index 1cd239e929..3ecb09ee9b 100644
--- a/client/ayon_core/hosts/houdini/plugins/create/create_redshift_rop.py
+++ b/client/ayon_core/hosts/houdini/plugins/create/create_redshift_rop.py
@@ -17,17 +17,21 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
ext = "exr"
multi_layered_mode = "No Multi-Layered EXR File"
- # Default to split export and render jobs
- split_render = True
+ # Default render target
+ render_target = "farm_split"
def create(self, product_name, instance_data, pre_create_data):
+ # Transfer settings from pre create to instance
+ creator_attributes = instance_data.setdefault(
+ "creator_attributes", dict())
+ for key in ["render_target", "review"]:
+ if key in pre_create_data:
+ creator_attributes[key] = pre_create_data[key]
instance_data.pop("active", None)
instance_data.update({"node_type": "Redshift_ROP"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
- # Submit for job publishing
- instance_data["farm"] = pre_create_data.get("farm")
instance = super(CreateRedshiftROP, self).create(
product_name,
@@ -99,7 +103,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
rs_filepath = f"{export_dir}{product_name}/{product_name}.$F4.rs"
parms["RS_archive_file"] = rs_filepath
- if pre_create_data.get("split_render", self.split_render):
+ if pre_create_data.get("render_target") == "farm_split":
parms["RS_archive_enable"] = 1
instance_node.setParms(parms)
@@ -118,24 +122,44 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
return super(CreateRedshiftROP, self).remove_instances(instances)
+ def get_instance_attr_defs(self):
+ """get instance attribute definitions.
+
+ Attributes defined in this method are exposed in
+ publish tab in the publisher UI.
+ """
+
+ render_target_items = {
+ "local": "Local machine rendering",
+ "local_no_render": "Use existing frames (local)",
+ "farm": "Farm Rendering",
+ "farm_split": "Farm Rendering - Split export & render jobs",
+ }
+
+ return [
+ BoolDef("review",
+ label="Review",
+ tooltip="Mark as reviewable",
+ default=True),
+ EnumDef("render_target",
+ items=render_target_items,
+ label="Render target",
+ default=self.render_target)
+ ]
+
def get_pre_create_attr_defs(self):
- attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
+
image_format_enum = [
"exr", "tif", "jpg", "png",
]
+
multi_layered_mode = [
"No Multi-Layered EXR File",
"Full Multi-Layered EXR File"
]
-
- return attrs + [
- BoolDef("farm",
- label="Submitting to Farm",
- default=True),
- BoolDef("split_render",
- label="Split export and render jobs",
- default=self.split_render),
+ attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
+ attrs += [
EnumDef("image_format",
image_format_enum,
default=self.ext,
@@ -143,5 +167,6 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
EnumDef("multi_layered_mode",
multi_layered_mode,
default=self.multi_layered_mode,
- label="Multi-Layered EXR")
+ label="Multi-Layered EXR"),
]
+ return attrs + self.get_instance_attr_defs()
diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_vray_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_vray_rop.py
index 5ed9e848a7..9e4633e745 100644
--- a/client/ayon_core/hosts/houdini/plugins/create/create_vray_rop.py
+++ b/client/ayon_core/hosts/houdini/plugins/create/create_vray_rop.py
@@ -16,17 +16,21 @@ class CreateVrayROP(plugin.HoudiniCreator):
icon = "magic"
ext = "exr"
- # Default to split export and render jobs
- export_job = True
+ # Default render target
+ render_target = "farm_split"
def create(self, product_name, instance_data, pre_create_data):
+ # Transfer settings from pre create to instance
+ creator_attributes = instance_data.setdefault(
+ "creator_attributes", dict())
+ for key in ["render_target", "review"]:
+ if key in pre_create_data:
+ creator_attributes[key] = pre_create_data[key]
instance_data.pop("active", None)
instance_data.update({"node_type": "vray_renderer"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
- # Submit for job publishing
- instance_data["farm"] = pre_create_data.get("farm")
instance = super(CreateVrayROP, self).create(
product_name,
@@ -55,7 +59,7 @@ class CreateVrayROP(plugin.HoudiniCreator):
"SettingsEXR_bits_per_channel": "16" # half precision
}
- if pre_create_data.get("export_job"):
+ if pre_create_data.get("render_target") == "farm_split":
scene_filepath = \
"{export_dir}{product_name}/{product_name}.$F4.vrscene".format(
export_dir=hou.text.expandString("$HIP/pyblish/vrscene/"),
@@ -143,20 +147,41 @@ class CreateVrayROP(plugin.HoudiniCreator):
return super(CreateVrayROP, self).remove_instances(instances)
+ def get_instance_attr_defs(self):
+ """get instance attribute definitions.
+
+ Attributes defined in this method are exposed in
+ publish tab in the publisher UI.
+ """
+
+
+ render_target_items = {
+ "local": "Local machine rendering",
+ "local_no_render": "Use existing frames (local)",
+ "farm": "Farm Rendering",
+ "farm_split": "Farm Rendering - Split export & render jobs",
+ }
+
+ return [
+ BoolDef("review",
+ label="Review",
+ tooltip="Mark as reviewable",
+ default=True),
+ EnumDef("render_target",
+ items=render_target_items,
+ label="Render target",
+ default=self.render_target)
+ ]
+
def get_pre_create_attr_defs(self):
- attrs = super(CreateVrayROP, self).get_pre_create_attr_defs()
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
- return attrs + [
- BoolDef("farm",
- label="Submitting to Farm",
- default=True),
- BoolDef("export_job",
- label="Split export and render jobs",
- default=self.export_job),
+ attrs = super(CreateVrayROP, self).get_pre_create_attr_defs()
+
+ attrs += [
EnumDef("image_format",
image_format_enum,
default=self.ext,
@@ -172,3 +197,4 @@ class CreateVrayROP(plugin.HoudiniCreator):
"if enabled",
default=False)
]
+ return attrs + self.get_instance_attr_defs()
diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_workfile.py b/client/ayon_core/hosts/houdini/plugins/create/create_workfile.py
index a958509e25..40a607e81a 100644
--- a/client/ayon_core/hosts/houdini/plugins/create/create_workfile.py
+++ b/client/ayon_core/hosts/houdini/plugins/create/create_workfile.py
@@ -95,7 +95,7 @@ class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator):
# write workfile information to context container.
op_ctx = hou.node(CONTEXT_CONTAINER)
if not op_ctx:
- op_ctx = self.create_context_node()
+ op_ctx = self.host.create_context_node()
workfile_data = {"workfile": current_instance.data_to_store()}
imprint(op_ctx, workfile_data)
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_arnold_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_arnold_rop.py
index 7fe38555a3..53a3e52717 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/collect_arnold_rop.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_arnold_rop.py
@@ -40,12 +40,9 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
default_prefix = evalParmNoFrame(rop, "ar_picture")
render_products = []
- # Store whether we are splitting the render job (export + render)
- split_render = bool(rop.parm("ar_ass_export_enable").eval())
- instance.data["splitRender"] = split_render
export_prefix = None
export_products = []
- if split_render:
+ if instance.data["splitRender"]:
export_prefix = evalParmNoFrame(
rop, "ar_ass_file", pad_character="0"
)
@@ -68,7 +65,12 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
"": self.generate_expected_files(instance, beauty_product)
}
+ # Assume it's a multipartExr Render.
+ multipartExr = True
+
num_aovs = rop.evalParm("ar_aovs")
+ # TODO: Check the following logic.
+ # as it always assumes that all AOV are not merged.
for index in range(1, num_aovs + 1):
# Skip disabled AOVs
if not rop.evalParm("ar_enable_aov{}".format(index)):
@@ -85,6 +87,14 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
files_by_aov[label] = self.generate_expected_files(instance,
aov_product)
+ # Set to False as soon as we have a separated aov.
+ multipartExr = False
+
+ # Review Logic expects this key to exist and be True
+ # if render is a multipart Exr.
+ # As long as we have one AOV then multipartExr should be True.
+ instance.data["multipartExr"] = multipartExr
+
for product in render_products:
self.log.debug("Found render product: {}".format(product))
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py
index 040ad68a1a..e931c7bf1b 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py
@@ -7,7 +7,8 @@ from ayon_core.hosts.houdini.api import lib
class CollectDataforCache(pyblish.api.InstancePlugin):
"""Collect data for caching to Deadline."""
- order = pyblish.api.CollectorOrder + 0.04
+ # Run after Collect Frames
+ order = pyblish.api.CollectorOrder + 0.11
families = ["ass", "pointcache",
"mantraifd", "redshiftproxy",
"vdbcache"]
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_farm_instances.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_farm_instances.py
new file mode 100644
index 0000000000..586aa2da57
--- /dev/null
+++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_farm_instances.py
@@ -0,0 +1,35 @@
+import pyblish.api
+
+
+class CollectFarmInstances(pyblish.api.InstancePlugin):
+ """Collect instances for farm render."""
+
+ order = pyblish.api.CollectorOrder
+ families = ["mantra_rop",
+ "karma_rop",
+ "redshift_rop",
+ "arnold_rop",
+ "vray_rop"]
+
+ hosts = ["houdini"]
+ targets = ["local", "remote"]
+ label = "Collect farm instances"
+
+ def process(self, instance):
+
+ creator_attribute = instance.data["creator_attributes"]
+
+ # Collect Render Target
+ if creator_attribute.get("render_target") not in {
+ "farm_split", "farm"
+ }:
+ instance.data["farm"] = False
+ instance.data["splitRender"] = False
+ self.log.debug("Render on farm is disabled. "
+ "Skipping farm collecting.")
+ return
+
+ instance.data["farm"] = True
+ instance.data["splitRender"] = (
+ creator_attribute.get("render_target") == "farm_split"
+ )
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py
index a643ab0d38..b38ebc6e2f 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py
@@ -17,7 +17,7 @@ class CollectFrames(pyblish.api.InstancePlugin):
label = "Collect Frames"
families = ["vdbcache", "imagesequence", "ass",
"mantraifd", "redshiftproxy", "review",
- "bgeo"]
+ "pointcache"]
def process(self, instance):
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_karma_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_karma_rop.py
index 78651b0c69..662ed7ae30 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/collect_karma_rop.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_karma_rop.py
@@ -55,6 +55,12 @@ class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin):
beauty_product)
}
+ # Review Logic expects this key to exist and be True
+ # if render is a multipart Exr.
+ # As long as we have one AOV then multipartExr should be True.
+ # By default karma render is a multipart Exr.
+ instance.data["multipartExr"] = True
+
filenames = list(render_products)
instance.data["files"] = filenames
instance.data["renderProducts"] = colorspace.ARenderProduct()
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_local_render_instances.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_local_render_instances.py
new file mode 100644
index 0000000000..5a446fa0d3
--- /dev/null
+++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_local_render_instances.py
@@ -0,0 +1,137 @@
+import os
+import pyblish.api
+from ayon_core.pipeline.create import get_product_name
+from ayon_core.pipeline.farm.patterning import match_aov_pattern
+from ayon_core.pipeline.publish import (
+ get_plugin_settings,
+ apply_plugin_settings_automatically
+)
+
+
+class CollectLocalRenderInstances(pyblish.api.InstancePlugin):
+ """Collect instances for local render.
+
+ Agnostic Local Render Collector.
+ """
+
+ # this plugin runs after Collect Render Products
+ order = pyblish.api.CollectorOrder + 0.12
+ families = ["mantra_rop",
+ "karma_rop",
+ "redshift_rop",
+ "arnold_rop",
+ "vray_rop"]
+
+ hosts = ["houdini"]
+ label = "Collect local render instances"
+
+ use_deadline_aov_filter = False
+ aov_filter = {"host_name": "houdini",
+ "value": [".*([Bb]eauty).*"]}
+
+ @classmethod
+ def apply_settings(cls, project_settings):
+ # Preserve automatic settings applying logic
+ settings = get_plugin_settings(plugin=cls,
+ project_settings=project_settings,
+ log=cls.log,
+ category="houdini")
+ apply_plugin_settings_automatically(cls, settings, logger=cls.log)
+
+ if not cls.use_deadline_aov_filter:
+ # get aov_filter from collector settings
+ # and restructure it as match_aov_pattern requires.
+ cls.aov_filter = {
+ cls.aov_filter["host_name"]: cls.aov_filter["value"]
+ }
+ else:
+ # get aov_filter from deadline settings
+ cls.aov_filter = project_settings["deadline"]["publish"]["ProcessSubmittedJobOnFarm"]["aov_filter"]
+ cls.aov_filter = {
+ item["name"]: item["value"]
+ for item in cls.aov_filter
+ }
+
+ def process(self, instance):
+
+ if instance.data["farm"]:
+ self.log.debug("Render on farm is enabled. "
+ "Skipping local render collecting.")
+ return
+
+ # Create Instance for each AOV.
+ context = instance.context
+ expectedFiles = next(iter(instance.data["expectedFiles"]), {})
+
+ product_type = "render" # is always render
+ product_group = get_product_name(
+ context.data["projectName"],
+ context.data["taskEntity"]["name"],
+ context.data["taskEntity"]["taskType"],
+ context.data["hostName"],
+ product_type,
+ instance.data["productName"]
+ )
+
+ for aov_name, aov_filepaths in expectedFiles.items():
+ product_name = product_group
+
+ if aov_name:
+ product_name = "{}_{}".format(product_name, aov_name)
+
+ # Create instance for each AOV
+ aov_instance = context.create_instance(product_name)
+
+ # Prepare Representation for each AOV
+ aov_filenames = [os.path.basename(path) for path in aov_filepaths]
+ staging_dir = os.path.dirname(aov_filepaths[0])
+ ext = aov_filepaths[0].split(".")[-1]
+
+ # Decide if instance is reviewable
+ preview = False
+ if instance.data.get("multipartExr", False):
+ # Add preview tag because its multipartExr.
+ preview = True
+ else:
+ # Add Preview tag if the AOV matches the filter.
+ preview = match_aov_pattern(
+ "houdini", self.aov_filter, aov_filenames[0]
+ )
+
+ preview = preview and instance.data.get("review", False)
+
+ # Support Single frame.
+ # The integrator wants single files to be a single
+ # filename instead of a list.
+ # More info: https://github.com/ynput/ayon-core/issues/238
+ if len(aov_filenames) == 1:
+ aov_filenames = aov_filenames[0]
+
+ aov_instance.data.update({
+ # 'label': label,
+ "task": instance.data["task"],
+ "folderPath": instance.data["folderPath"],
+ "frameStart": instance.data["frameStartHandle"],
+ "frameEnd": instance.data["frameEndHandle"],
+ "productType": product_type,
+ "family": product_type,
+ "productName": product_name,
+ "productGroup": product_group,
+ "families": ["render.local.hou", "review"],
+ "instance_node": instance.data["instance_node"],
+ "representations": [
+ {
+ "stagingDir": staging_dir,
+ "ext": ext,
+ "name": ext,
+ "tags": ["review"] if preview else [],
+ "files": aov_filenames,
+ "frameStart": instance.data["frameStartHandle"],
+ "frameEnd": instance.data["frameEndHandle"]
+ }
+ ]
+ })
+
+ # Remove original render instance
+ # I can't remove it here as I still need it to trigger the render.
+ # context.remove(instance)
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_mantra_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_mantra_rop.py
index df9acc4b61..7b247768fc 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/collect_mantra_rop.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_mantra_rop.py
@@ -44,12 +44,9 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
default_prefix = evalParmNoFrame(rop, "vm_picture")
render_products = []
- # Store whether we are splitting the render job (export + render)
- split_render = bool(rop.parm("soho_outputmode").eval())
- instance.data["splitRender"] = split_render
export_prefix = None
export_products = []
- if split_render:
+ if instance.data["splitRender"]:
export_prefix = evalParmNoFrame(
rop, "soho_diskfile", pad_character="0"
)
@@ -74,6 +71,11 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
beauty_product)
}
+ # Assume it's a multipartExr Render.
+ multipartExr = True
+
+ # TODO: This logic doesn't take into considerations
+ # cryptomatte defined in 'Images > Cryptomatte'
aov_numbers = rop.evalParm("vm_numaux")
if aov_numbers > 0:
# get the filenames of the AOVs
@@ -93,6 +95,14 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
files_by_aov[var] = self.generate_expected_files(instance, aov_product) # noqa
+ # Set to False as soon as we have a separated aov.
+ multipartExr = False
+
+ # Review Logic expects this key to exist and be True
+ # if render is a multipart Exr.
+ # As long as we have one AOV then multipartExr should be True.
+ instance.data["multipartExr"] = multipartExr
+
for product in render_products:
self.log.debug("Found render product: %s" % product)
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_redshift_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_redshift_rop.py
index 55a55bb12a..ce90ae2413 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/collect_redshift_rop.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_redshift_rop.py
@@ -42,11 +42,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
default_prefix = evalParmNoFrame(rop, "RS_outputFileNamePrefix")
beauty_suffix = rop.evalParm("RS_outputBeautyAOVSuffix")
- # Store whether we are splitting the render job (export + render)
- split_render = bool(rop.parm("RS_archive_enable").eval())
- instance.data["splitRender"] = split_render
+
export_products = []
- if split_render:
+ if instance.data["splitRender"]:
export_prefix = evalParmNoFrame(
rop, "RS_archive_file", pad_character="0"
)
@@ -63,9 +61,12 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
full_exr_mode = (rop.evalParm("RS_outputMultilayerMode") == "2")
if full_exr_mode:
# Ignore beauty suffix if full mode is enabled
- # As this is what the rop does.
+ # As this is what the rop does.
beauty_suffix = ""
+ # Assume it's a multipartExr Render.
+ multipartExr = True
+
# Default beauty/main layer AOV
beauty_product = self.get_render_product_name(
prefix=default_prefix, suffix=beauty_suffix
@@ -75,7 +76,7 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
beauty_suffix: self.generate_expected_files(instance,
beauty_product)
}
-
+
aovs_rop = rop.parm("RS_aovGetFromNode").evalAsNode()
if aovs_rop:
rop = aovs_rop
@@ -98,13 +99,21 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
if rop.parm(f"RS_aovID_{i}").evalAsString() == "CRYPTOMATTE" or \
not full_exr_mode:
-
+
aov_product = self.get_render_product_name(aov_prefix, aov_suffix)
render_products.append(aov_product)
files_by_aov[aov_suffix] = self.generate_expected_files(instance,
aov_product) # noqa
+ # Set to False as soon as we have a separated aov.
+ multipartExr = False
+
+ # Review Logic expects this key to exist and be True
+ # if render is a multipart Exr.
+ # As long as we have one AOV then multipartExr should be True.
+ instance.data["multipartExr"] = multipartExr
+
for product in render_products:
self.log.debug("Found render product: %s" % product)
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_review_data.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_review_data.py
index 9671945b9a..ed2de785a2 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/collect_review_data.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_review_data.py
@@ -8,7 +8,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
label = "Collect Review Data"
# This specific order value is used so that
# this plugin runs after CollectRopFrameRange
- order = pyblish.api.CollectorOrder + 0.1
+ # Also after CollectLocalRenderInstances
+ order = pyblish.api.CollectorOrder + 0.13
hosts = ["houdini"]
families = ["review"]
@@ -28,7 +29,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
ropnode_path = instance.data["instance_node"]
ropnode = hou.node(ropnode_path)
- camera_path = ropnode.parm("camera").eval()
+ # Get camera based on the instance_node type.
+ camera_path = self._get_camera_path(ropnode)
camera_node = hou.node(camera_path)
if not camera_node:
self.log.warning("No valid camera node found on review node: "
@@ -55,3 +57,29 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
# Store focal length in `burninDataMembers`
burnin_members = instance.data.setdefault("burninDataMembers", {})
burnin_members["focalLength"] = focal_length
+
+ def _get_camera_path(self, ropnode):
+ """Get the camera path associated with the given rop node.
+
+ This function evaluates the camera parameter according to the
+ type of the given rop node.
+
+ Returns:
+ Union[str, None]: Camera path or None.
+
+ This function can return empty string if the camera
+ path is empty i.e. no camera path.
+ """
+
+ if ropnode.type().name() in {
+ "opengl", "karma", "ifd", "arnold"
+ }:
+ return ropnode.parm("camera").eval()
+
+ elif ropnode.type().name() == "Redshift_ROP":
+ return ropnode.parm("RS_renderCamera").eval()
+
+ elif ropnode.type().name() == "vray_renderer":
+ return ropnode.parm("render_camera").eval()
+
+ return None
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_reviewable_instances.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_reviewable_instances.py
new file mode 100644
index 0000000000..78dc5fe11a
--- /dev/null
+++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_reviewable_instances.py
@@ -0,0 +1,22 @@
+import pyblish.api
+
+
+class CollectReviewableInstances(pyblish.api.InstancePlugin):
+ """Collect Reviewable Instances.
+
+ Basically, all instances of the specified families
+ with creator_attribure["review"]
+ """
+
+ order = pyblish.api.CollectorOrder
+ label = "Collect Reviewable Instances"
+ families = ["mantra_rop",
+ "karma_rop",
+ "redshift_rop",
+ "arnold_rop",
+ "vray_rop"]
+
+ def process(self, instance):
+ creator_attribute = instance.data["creator_attributes"]
+
+ instance.data["review"] = creator_attribute.get("review", False)
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_vray_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_vray_rop.py
index 62b7dcdd5d..c39b1db103 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/collect_vray_rop.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_vray_rop.py
@@ -45,12 +45,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
render_products = []
# TODO: add render elements if render element
- # Store whether we are splitting the render job in an export + render
- split_render = rop.parm("render_export_mode").eval() == "2"
- instance.data["splitRender"] = split_render
export_prefix = None
export_products = []
- if split_render:
+ if instance.data["splitRender"]:
export_prefix = evalParmNoFrame(
rop, "render_export_filepath", pad_character="0"
)
@@ -70,6 +67,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
"": self.generate_expected_files(instance,
beauty_product)}
+ # Assume it's a multipartExr Render.
+ multipartExr = True
+
if instance.data.get("RenderElement", True):
render_element = self.get_render_element_name(rop, default_prefix)
if render_element:
@@ -77,7 +77,13 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
render_products.append(renderpass)
files_by_aov[aov] = self.generate_expected_files(
instance, renderpass)
+ # Set to False as soon as we have a separated aov.
+ multipartExr = False
+ # Review Logic expects this key to exist and be True
+ # if render is a multipart Exr.
+ # As long as we have one AOV then multipartExr should be True.
+ instance.data["multipartExr"] = multipartExr
for product in render_products:
self.log.debug("Found render product: %s" % product)
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py
index daf30b26ed..7ae476d2b4 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py
@@ -28,10 +28,15 @@ class ExtractAlembic(publish.Extractor):
staging_dir = os.path.dirname(output)
instance.data["stagingDir"] = staging_dir
- file_name = os.path.basename(output)
+ if instance.data.get("frames"):
+ # list of files
+ files = instance.data["frames"]
+ else:
+ # single file
+ files = os.path.basename(output)
# We run the render
- self.log.info("Writing alembic '%s' to '%s'" % (file_name,
+ self.log.info("Writing alembic '%s' to '%s'" % (files,
staging_dir))
render_rop(ropnode)
@@ -42,7 +47,7 @@ class ExtractAlembic(publish.Extractor):
representation = {
'name': 'abc',
'ext': 'abc',
- 'files': file_name,
+ 'files': files,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py
index 57bb8b881a..26a216e335 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py
@@ -19,6 +19,16 @@ class ExtractOpenGL(publish.Extractor,
def process(self, instance):
ropnode = hou.node(instance.data.get("instance_node"))
+ # This plugin is triggered when marking render as reviewable.
+ # Therefore, this plugin will run on over wrong instances.
+ # TODO: Don't run this plugin on wrong instances.
+ # This plugin should run only on review product type
+ # with instance node of opengl type.
+ if ropnode.type().name() != "opengl":
+ self.log.debug("Skipping OpenGl extraction. Rop node {} "
+ "is not an OpenGl node.".format(ropnode.path()))
+ return
+
output = ropnode.evalParm("picture")
staging_dir = os.path.normpath(os.path.dirname(output))
instance.data["stagingDir"] = staging_dir
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_render.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_render.py
new file mode 100644
index 0000000000..7b4762a25f
--- /dev/null
+++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_render.py
@@ -0,0 +1,74 @@
+import pyblish.api
+
+from ayon_core.pipeline import publish
+from ayon_core.hosts.houdini.api.lib import render_rop
+import hou
+import os
+
+
+class ExtractRender(publish.Extractor):
+
+ order = pyblish.api.ExtractorOrder
+ label = "Extract Render"
+ hosts = ["houdini"]
+ families = ["mantra_rop",
+ "karma_rop",
+ "redshift_rop",
+ "arnold_rop",
+ "vray_rop"]
+
+ def process(self, instance):
+ creator_attribute = instance.data["creator_attributes"]
+ product_type = instance.data["productType"]
+ rop_node = hou.node(instance.data.get("instance_node"))
+
+ # Align split parameter value on rop node to the render target.
+ if instance.data["splitRender"]:
+ if product_type == "arnold_rop":
+ rop_node.setParms({"ar_ass_export_enable": 1})
+ elif product_type == "mantra_rop":
+ rop_node.setParms({"soho_outputmode": 1})
+ elif product_type == "redshift_rop":
+ rop_node.setParms({"RS_archive_enable": 1})
+ elif product_type == "vray_rop":
+ rop_node.setParms({"render_export_mode": "2"})
+ else:
+ if product_type == "arnold_rop":
+ rop_node.setParms({"ar_ass_export_enable": 0})
+ elif product_type == "mantra_rop":
+ rop_node.setParms({"soho_outputmode": 0})
+ elif product_type == "redshift_rop":
+ rop_node.setParms({"RS_archive_enable": 0})
+ elif product_type == "vray_rop":
+ rop_node.setParms({"render_export_mode": "1"})
+
+ if instance.data.get("farm"):
+ self.log.debug("Render should be processed on farm, skipping local render.")
+ return
+
+ if creator_attribute.get("render_target") == "local":
+ ropnode = hou.node(instance.data.get("instance_node"))
+ render_rop(ropnode)
+
+ # `ExpectedFiles` is a list that includes one dict.
+ expected_files = instance.data["expectedFiles"][0]
+ # Each key in that dict is a list of files.
+ # Combine lists of files into one big list.
+ all_frames = []
+ for value in expected_files.values():
+ if isinstance(value, str):
+ all_frames.append(value)
+ elif isinstance(value, list):
+ all_frames.extend(value)
+ # Check missing frames.
+ # Frames won't exist if user cancels the render.
+ missing_frames = [
+ frame
+ for frame in all_frames
+ if not os.path.exists(frame)
+ ]
+ if missing_frames:
+ # TODO: Use user friendly error reporting.
+ raise RuntimeError("Failed to complete render extraction. "
+ "Missing output files: {}".format(
+ missing_frames))
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/increment_current_file.py b/client/ayon_core/hosts/houdini/plugins/publish/increment_current_file.py
index fe8fa25f10..3e9291d5c2 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/increment_current_file.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/increment_current_file.py
@@ -17,11 +17,13 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin):
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["houdini"]
families = ["workfile",
- "redshift_rop",
- "arnold_rop",
+ "usdrender",
"mantra_rop",
"karma_rop",
- "usdrender",
+ "redshift_rop",
+ "arnold_rop",
+ "vray_rop",
+ "render.local.hou",
"publish.hou"]
optional = True
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py
index e7f528ba57..fa532c5437 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py
@@ -56,6 +56,18 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
def process(self, instance):
+ rop_node = hou.node(instance.data["instance_node"])
+
+ # This plugin is triggered when marking render as reviewable.
+ # Therefore, this plugin will run on over wrong instances.
+ # TODO: Don't run this plugin on wrong instances.
+ # This plugin should run only on review product type
+ # with instance node of opengl type.
+ if rop_node.type().name() != "opengl":
+ self.log.debug("Skipping Validation. Rop node {} "
+ "is not an OpenGl node.".format(rop_node.path()))
+ return
+
if not self.is_active(instance.data):
return
@@ -66,7 +78,6 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
)
return
- rop_node = hou.node(instance.data["instance_node"])
if rop_node.evalParm("colorcorrect") != 2:
# any colorspace settings other than default requires
# 'Color Correct' parm to be set to 'OpenColorIO'
diff --git a/client/ayon_core/hosts/houdini/plugins/publish/validate_scene_review.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_scene_review.py
index b6007d3f0f..0b09306b0d 100644
--- a/client/ayon_core/hosts/houdini/plugins/publish/validate_scene_review.py
+++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_scene_review.py
@@ -20,6 +20,16 @@ class ValidateSceneReview(pyblish.api.InstancePlugin):
report = []
instance_node = hou.node(instance.data.get("instance_node"))
+ # This plugin is triggered when marking render as reviewable.
+ # Therefore, this plugin will run on over wrong instances.
+ # TODO: Don't run this plugin on wrong instances.
+ # This plugin should run only on review product type
+ # with instance node of opengl type.
+ if instance_node.type().name() != "opengl":
+ self.log.debug("Skipping Validation. Rop node {} "
+ "is not an OpenGl node.".format(instance_node.path()))
+ return
+
invalid = self.get_invalid_scene_path(instance_node)
if invalid:
report.append(invalid)
diff --git a/client/ayon_core/hosts/houdini/startup/OPmenu.xml b/client/ayon_core/hosts/houdini/startup/OPmenu.xml
new file mode 100644
index 0000000000..0a7b265fa1
--- /dev/null
+++ b/client/ayon_core/hosts/houdini/startup/OPmenu.xml
@@ -0,0 +1,29 @@
+
+
+
+
+
+
diff --git a/client/ayon_core/hosts/max/api/lib.py b/client/ayon_core/hosts/max/api/lib.py
index d9a3af3336..f20f754248 100644
--- a/client/ayon_core/hosts/max/api/lib.py
+++ b/client/ayon_core/hosts/max/api/lib.py
@@ -6,12 +6,9 @@ import json
from typing import Any, Dict, Union
import six
-import ayon_api
from ayon_core.pipeline import (
get_current_project_name,
- get_current_folder_path,
- get_current_task_name,
colorspace
)
from ayon_core.settings import get_project_settings
@@ -372,12 +369,8 @@ def reset_colorspace():
"""
if int(get_max_version()) < 2024:
return
- project_name = get_current_project_name()
- colorspace_mgr = rt.ColorPipelineMgr
- project_settings = get_project_settings(project_name)
- max_config_data = colorspace.get_imageio_config(
- project_name, "max", project_settings)
+ max_config_data = colorspace.get_current_context_imageio_config_preset()
if max_config_data:
ocio_config_path = max_config_data["path"]
colorspace_mgr = rt.ColorPipelineMgr
@@ -392,10 +385,7 @@ def check_colorspace():
"because Max main window can't be found.")
if int(get_max_version()) >= 2024:
color_mgr = rt.ColorPipelineMgr
- project_name = get_current_project_name()
- project_settings = get_project_settings(project_name)
- max_config_data = colorspace.get_imageio_config(
- project_name, "max", project_settings)
+ max_config_data = colorspace.get_current_context_imageio_config_preset()
if max_config_data and color_mgr.Mode != rt.Name("OCIO_Custom"):
if not is_headless():
from ayon_core.tools.utils import SimplePopup
diff --git a/client/ayon_core/hosts/max/api/pipeline.py b/client/ayon_core/hosts/max/api/pipeline.py
index dc13f47795..d9cfc3407f 100644
--- a/client/ayon_core/hosts/max/api/pipeline.py
+++ b/client/ayon_core/hosts/max/api/pipeline.py
@@ -52,11 +52,7 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
self._has_been_setup = True
- def context_setting():
- return lib.set_context_setting()
-
- rt.callbacks.addScript(rt.Name('systemPostNew'),
- context_setting)
+ rt.callbacks.addScript(rt.Name('systemPostNew'), on_new)
rt.callbacks.addScript(rt.Name('filePostOpen'),
lib.check_colorspace)
@@ -163,6 +159,14 @@ def ls() -> list:
yield lib.read(container)
+def on_new():
+ lib.set_context_setting()
+ if rt.checkForSave():
+ rt.resetMaxFile(rt.Name("noPrompt"))
+ rt.clearUndoBuffer()
+ rt.redrawViews()
+
+
def containerise(name: str, nodes: list, context,
namespace=None, loader=None, suffix="_CON"):
data = {
diff --git a/client/ayon_core/hosts/maya/api/alembic.py b/client/ayon_core/hosts/maya/api/alembic.py
index bf887df4c7..6bd00e1cb1 100644
--- a/client/ayon_core/hosts/maya/api/alembic.py
+++ b/client/ayon_core/hosts/maya/api/alembic.py
@@ -22,7 +22,6 @@ ALEMBIC_ARGS = {
"melPostJobCallback": str,
"noNormals": bool,
"preRoll": bool,
- "preRollStartFrame": int,
"pythonPerFrameCallback": str,
"pythonPostJobCallback": str,
"renderableOnly": bool,
@@ -54,15 +53,22 @@ def extract_alembic(
endFrame=None,
eulerFilter=True,
frameRange="",
+ melPerFrameCallback=None,
+ melPostJobCallback=None,
noNormals=False,
preRoll=False,
preRollStartFrame=0,
+ pythonPerFrameCallback=None,
+ pythonPostJobCallback=None,
renderableOnly=False,
root=None,
selection=True,
startFrame=None,
step=1.0,
stripNamespaces=True,
+ userAttr=None,
+ userAttrPrefix=None,
+ uvsOnly=False,
uvWrite=True,
verbose=False,
wholeFrameGeo=False,
@@ -102,6 +108,11 @@ def extract_alembic(
string formatted as: "startFrame endFrame". This argument
overrides `startFrame` and `endFrame` arguments.
+ melPerFrameCallback (Optional[str]): MEL callback run per frame.
+
+ melPostJobCallback (Optional[str]): MEL callback after last frame is
+ written.
+
noNormals (bool): When on, normal data from the original polygon
objects is not included in the exported Alembic cache file.
@@ -113,6 +124,11 @@ def extract_alembic(
dependent translations and can be used to evaluate run-up that
isn't actually translated. Defaults to 0.
+ pythonPerFrameCallback (Optional[str]): Python callback run per frame.
+
+ pythonPostJobCallback (Optional[str]): Python callback after last frame
+ is written.
+
renderableOnly (bool): When on, any non-renderable nodes or hierarchy,
such as hidden objects, are not included in the Alembic file.
Defaults to False.
@@ -137,6 +153,15 @@ def extract_alembic(
object with the namespace taco:foo:bar appears as bar in the
Alembic file.
+ userAttr (list of str, optional): A specific user defined attribute to
+ write out. Defaults to [].
+
+ userAttrPrefix (list of str, optional): Prefix filter for determining
+ which user defined attributes to write out. Defaults to [].
+
+ uvsOnly (bool): When on, only uv data for PolyMesh and SubD shapes
+ will be written to the Alembic file.
+
uvWrite (bool): When on, UV data from polygon meshes and subdivision
objects are written to the Alembic file. Only the current UV map is
included.
@@ -183,6 +208,8 @@ def extract_alembic(
# Ensure list arguments are valid.
attr = attr or []
attrPrefix = attrPrefix or []
+ userAttr = userAttr or []
+ userAttrPrefix = userAttrPrefix or []
root = root or []
# Pass the start and end frame on as `frameRange` so that it
@@ -213,8 +240,10 @@ def extract_alembic(
"eulerFilter": eulerFilter,
"noNormals": noNormals,
"preRoll": preRoll,
+ "root": root,
"renderableOnly": renderableOnly,
"uvWrite": uvWrite,
+ "uvsOnly": uvsOnly,
"writeColorSets": writeColorSets,
"writeFaceSets": writeFaceSets,
"wholeFrameGeo": wholeFrameGeo,
@@ -226,9 +255,10 @@ def extract_alembic(
"step": step,
"attr": attr,
"attrPrefix": attrPrefix,
+ "userAttr": userAttr,
+ "userAttrPrefix": userAttrPrefix,
"stripNamespaces": stripNamespaces,
- "verbose": verbose,
- "preRollStartFrame": preRollStartFrame
+ "verbose": verbose
}
# Validate options
@@ -264,6 +294,17 @@ def extract_alembic(
if maya_version >= 2018:
options['autoSubd'] = options.pop('writeCreases', False)
+ # Only add callbacks if they are set so that we're not passing `None`
+ callbacks = {
+ "melPerFrameCallback": melPerFrameCallback,
+ "melPostJobCallback": melPostJobCallback,
+ "pythonPerFrameCallback": pythonPerFrameCallback,
+ "pythonPostJobCallback": pythonPostJobCallback,
+ }
+ for key, callback in callbacks.items():
+ if callback:
+ options[key] = str(callback)
+
# Format the job string from options
job_args = list()
for key, value in options.items():
@@ -297,7 +338,11 @@ def extract_alembic(
# exports are made. (PLN-31)
# TODO: Make sure this actually fixes the issues
with evaluation("off"):
- cmds.AbcExport(j=job_str, verbose=verbose)
+ cmds.AbcExport(
+ j=job_str,
+ verbose=verbose,
+ preRollStartFrame=preRollStartFrame
+ )
if verbose:
log.debug("Extracted Alembic to: %s", file)
diff --git a/client/ayon_core/hosts/maya/api/fbx.py b/client/ayon_core/hosts/maya/api/fbx.py
index 939da4011b..fd1bf2c901 100644
--- a/client/ayon_core/hosts/maya/api/fbx.py
+++ b/client/ayon_core/hosts/maya/api/fbx.py
@@ -47,7 +47,7 @@ class FBXExtractor:
"smoothMesh": bool,
"instances": bool,
# "referencedContainersContent": bool, # deprecated in Maya 2016+
- "bakeComplexAnimation": int,
+ "bakeComplexAnimation": bool,
"bakeComplexStart": int,
"bakeComplexEnd": int,
"bakeComplexStep": int,
@@ -59,6 +59,7 @@ class FBXExtractor:
"constraints": bool,
"lights": bool,
"embeddedTextures": bool,
+ "includeChildren": bool,
"inputConnections": bool,
"upAxis": str, # x, y or z,
"triangulate": bool,
@@ -102,6 +103,7 @@ class FBXExtractor:
"constraints": False,
"lights": True,
"embeddedTextures": False,
+ "includeChildren": True,
"inputConnections": True,
"upAxis": "y",
"triangulate": False,
diff --git a/client/ayon_core/hosts/maya/api/lib.py b/client/ayon_core/hosts/maya/api/lib.py
index 89efc69ba8..2b41ffc06c 100644
--- a/client/ayon_core/hosts/maya/api/lib.py
+++ b/client/ayon_core/hosts/maya/api/lib.py
@@ -1299,7 +1299,7 @@ def is_visible(node,
override_enabled = cmds.getAttr('{}.overrideEnabled'.format(node))
override_visibility = cmds.getAttr('{}.overrideVisibility'.format(
node))
- if override_enabled and override_visibility:
+ if override_enabled and not override_visibility:
return False
if parentHidden:
diff --git a/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py b/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py
index 08d50a1ab8..069762e4ae 100644
--- a/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py
+++ b/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py
@@ -6,7 +6,6 @@ from ayon_core.lib import (
BoolDef,
NumberDef,
)
-from ayon_core.pipeline import CreatedInstance
def _get_animation_attr_defs(cls):
diff --git a/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py
index dc0ffb02c1..e321c13ca0 100644
--- a/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py
+++ b/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py
@@ -1,3 +1,5 @@
+from maya import cmds
+
from ayon_core.hosts.maya.api import (
lib,
plugin
@@ -87,16 +89,24 @@ class CreateArnoldSceneSource(plugin.MayaCreator):
return defs
+
+class CreateArnoldSceneSourceProxy(CreateArnoldSceneSource):
+ """Arnold Scene Source Proxy
+
+ This product type facilitates working with proxy geometry in the viewport.
+ """
+
+ identifier = "io.openpype.creators.maya.assproxy"
+ label = "Arnold Scene Source Proxy"
+ product_type = "assProxy"
+ icon = "cube"
+
def create(self, product_name, instance_data, pre_create_data):
-
- from maya import cmds
-
instance = super(CreateArnoldSceneSource, self).create(
product_name, instance_data, pre_create_data
)
instance_node = instance.get("instance_node")
- content = cmds.sets(name=instance_node + "_content_SET", empty=True)
proxy = cmds.sets(name=instance_node + "_proxy_SET", empty=True)
- cmds.sets([content, proxy], forceElement=instance_node)
+ cmds.sets([proxy], forceElement=instance_node)
diff --git a/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py b/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py
index 4b7d2f42ab..ae3b68965a 100644
--- a/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py
+++ b/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py
@@ -12,6 +12,7 @@ from ayon_core.hosts.maya.api.lib import (
unique_namespace,
get_attribute_input,
maintained_selection,
+ get_fps_for_current_context
)
from ayon_core.hosts.maya.api.pipeline import containerise
from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type
@@ -29,7 +30,13 @@ class ArnoldStandinLoader(load.LoaderPlugin):
"""Load as Arnold standin"""
product_types = {
- "ass", "animation", "model", "proxyAbc", "pointcache", "usd"
+ "ass",
+ "assProxy",
+ "animation",
+ "model",
+ "proxyAbc",
+ "pointcache",
+ "usd"
}
representations = {"ass", "abc", "usda", "usdc", "usd"}
@@ -95,8 +102,10 @@ class ArnoldStandinLoader(load.LoaderPlugin):
sequence = is_sequence(os.listdir(os.path.dirname(repre_path)))
cmds.setAttr(standin_shape + ".useFrameExtension", sequence)
- fps = float(version_attributes.get("fps")) or 25
- cmds.setAttr(standin_shape + ".abcFPS", fps)
+ fps = (
+ version_attributes.get("fps") or get_fps_for_current_context()
+ )
+ cmds.setAttr(standin_shape + ".abcFPS", float(fps))
nodes = [root, standin, standin_shape]
if operator is not None:
@@ -128,6 +137,18 @@ class ArnoldStandinLoader(load.LoaderPlugin):
proxy_path = "/".join([os.path.dirname(path), proxy_basename])
return proxy_basename, proxy_path
+ def _update_operators(self, string_replace_operator, proxy_basename, path):
+ cmds.setAttr(
+ string_replace_operator + ".match",
+ proxy_basename.split(".")[0],
+ type="string"
+ )
+ cmds.setAttr(
+ string_replace_operator + ".replace",
+ os.path.basename(path).split(".")[0],
+ type="string"
+ )
+
def _setup_proxy(self, shape, path, namespace):
proxy_basename, proxy_path = self._get_proxy_path(path)
@@ -150,16 +171,7 @@ class ArnoldStandinLoader(load.LoaderPlugin):
"*.(@node=='{}')".format(node_type),
type="string"
)
- cmds.setAttr(
- string_replace_operator + ".match",
- proxy_basename,
- type="string"
- )
- cmds.setAttr(
- string_replace_operator + ".replace",
- os.path.basename(path),
- type="string"
- )
+ self._update_operators(string_replace_operator, proxy_basename, path)
cmds.connectAttr(
string_replace_operator + ".out",
@@ -194,18 +206,9 @@ class ArnoldStandinLoader(load.LoaderPlugin):
path = get_representation_path(repre_entity)
proxy_basename, proxy_path = self._get_proxy_path(path)
- # Whether there is proxy or so, we still update the string operator.
+ # Whether there is proxy or not, we still update the string operator.
# If no proxy exists, the string operator won't replace anything.
- cmds.setAttr(
- string_replace_operator + ".match",
- proxy_basename,
- type="string"
- )
- cmds.setAttr(
- string_replace_operator + ".replace",
- os.path.basename(path),
- type="string"
- )
+ self._update_operators(string_replace_operator, proxy_basename, path)
dso_path = path
if os.path.exists(proxy_path):
diff --git a/client/ayon_core/hosts/maya/plugins/load/load_image.py b/client/ayon_core/hosts/maya/plugins/load/load_image.py
index 5b0858ce70..171920f747 100644
--- a/client/ayon_core/hosts/maya/plugins/load/load_image.py
+++ b/client/ayon_core/hosts/maya/plugins/load/load_image.py
@@ -8,7 +8,7 @@ from ayon_core.pipeline import (
from ayon_core.pipeline.load.utils import get_representation_path_from_context
from ayon_core.pipeline.colorspace import (
get_imageio_file_rules_colorspace_from_filepath,
- get_imageio_config,
+ get_current_context_imageio_config_preset,
get_imageio_file_rules
)
from ayon_core.settings import get_project_settings
@@ -270,8 +270,7 @@ class FileNodeLoader(load.LoaderPlugin):
host_name = get_current_host_name()
project_settings = get_project_settings(project_name)
- config_data = get_imageio_config(
- project_name, host_name,
+ config_data = get_current_context_imageio_config_preset(
project_settings=project_settings
)
diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py
index 0db89bee31..fb71e128eb 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py
@@ -10,21 +10,23 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
# Offset to be after renderable camera collection.
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Arnold Scene Source"
- families = ["ass"]
+ families = ["ass", "assProxy"]
def process(self, instance):
- objsets = instance.data["setMembers"]
+ instance.data["members"] = []
+ for set_member in instance.data["setMembers"]:
+ if cmds.nodeType(set_member) != "objectSet":
+ instance.data["members"].extend(self.get_hierarchy(set_member))
+ continue
- for objset in objsets:
- objset = str(objset)
- members = cmds.sets(objset, query=True)
+ members = cmds.sets(set_member, query=True)
members = cmds.ls(members, long=True)
if members is None:
- self.log.warning("Skipped empty instance: \"%s\" " % objset)
+ self.log.warning(
+ "Skipped empty instance: \"%s\" " % set_member
+ )
continue
- if objset.endswith("content_SET"):
- instance.data["contentMembers"] = self.get_hierarchy(members)
- if objset.endswith("proxy_SET"):
+ if set_member.endswith("proxy_SET"):
instance.data["proxy"] = self.get_hierarchy(members)
# Use camera in object set if present else default to render globals
@@ -33,7 +35,7 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)]
if renderable:
camera = renderable[0]
- for node in instance.data["contentMembers"]:
+ for node in instance.data["members"]:
camera_shapes = cmds.listRelatives(
node, shapes=True, type="camera"
)
@@ -46,18 +48,11 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
self.log.debug("data: {}".format(instance.data))
def get_hierarchy(self, nodes):
- """Return nodes with all their children.
-
- Arguments:
- nodes (List[str]): List of nodes to collect children hierarchy for
-
- Returns:
- list: Input nodes with their children hierarchy
-
- """
+ """Return nodes with all their children"""
nodes = cmds.ls(nodes, long=True)
if not nodes:
return []
-
- children = get_all_children(nodes, ignore_intermediate_objects=True)
- return list(children.union(nodes))
+ children = get_all_children(nodes)
+ # Make sure nodes merged with children only
+ # contains unique entries
+ return list(set(nodes + list(children)))
diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py
index ed8f2ad40c..fb4c41f1de 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py
@@ -17,8 +17,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
families = ["ass"]
asciiAss = False
- def process(self, instance):
- staging_dir = self.staging_dir(instance)
+ def _pre_process(self, instance, staging_dir):
file_path = os.path.join(staging_dir, "{}.ass".format(instance.name))
# Mask
@@ -70,24 +69,38 @@ class ExtractArnoldSceneSource(publish.Extractor):
"mask": mask
}
- filenames, nodes_by_id = self._extract(
- instance.data["contentMembers"], attribute_data, kwargs
- )
-
if "representations" not in instance.data:
instance.data["representations"] = []
+ return attribute_data, kwargs
+
+ def process(self, instance):
+ staging_dir = self.staging_dir(instance)
+ attribute_data, kwargs = self._pre_process(instance, staging_dir)
+
+ filenames = self._extract(
+ instance.data["members"], attribute_data, kwargs
+ )
+
+ self._post_process(
+ instance, filenames, staging_dir, kwargs["startFrame"]
+ )
+
+ def _post_process(self, instance, filenames, staging_dir, frame_start):
+ nodes_by_id = self._nodes_by_id(instance[:])
representation = {
"name": "ass",
"ext": "ass",
"files": filenames if len(filenames) > 1 else filenames[0],
"stagingDir": staging_dir,
- "frameStart": kwargs["startFrame"]
+ "frameStart": frame_start
}
instance.data["representations"].append(representation)
- json_path = os.path.join(staging_dir, "{}.json".format(instance.name))
+ json_path = os.path.join(
+ staging_dir, "{}.json".format(instance.name)
+ )
with open(json_path, "w") as f:
json.dump(nodes_by_id, f)
@@ -104,13 +117,68 @@ class ExtractArnoldSceneSource(publish.Extractor):
"Extracted instance {} to: {}".format(instance.name, staging_dir)
)
- # Extract proxy.
- if not instance.data.get("proxy", []):
- return
+ def _nodes_by_id(self, nodes):
+ nodes_by_id = defaultdict(list)
- kwargs["filename"] = file_path.replace(".ass", "_proxy.ass")
+ for node in nodes:
+ id = lib.get_id(node)
- filenames, _ = self._extract(
+ if id is None:
+ continue
+
+ # Converting Maya hierarchy separator "|" to Arnold separator "/".
+ nodes_by_id[id].append(node.replace("|", "/"))
+
+ return nodes_by_id
+
+ def _extract(self, nodes, attribute_data, kwargs):
+ filenames = []
+ with lib.attribute_values(attribute_data):
+ with lib.maintained_selection():
+ self.log.debug(
+ "Writing: {}".format(nodes)
+ )
+ cmds.select(nodes, noExpand=True)
+
+ self.log.debug(
+ "Extracting ass sequence with: {}".format(kwargs)
+ )
+
+ exported_files = cmds.arnoldExportAss(**kwargs)
+
+ for file in exported_files:
+ filenames.append(os.path.split(file)[1])
+
+ self.log.debug("Exported: {}".format(filenames))
+
+ return filenames
+
+
+class ExtractArnoldSceneSourceProxy(ExtractArnoldSceneSource):
+ """Extract the content of the instance to an Arnold Scene Source file."""
+
+ label = "Extract Arnold Scene Source Proxy"
+ hosts = ["maya"]
+ families = ["assProxy"]
+ asciiAss = True
+
+ def process(self, instance):
+ staging_dir = self.staging_dir(instance)
+ attribute_data, kwargs = self._pre_process(instance, staging_dir)
+
+ filenames, _ = self._duplicate_extract(
+ instance.data["members"], attribute_data, kwargs
+ )
+
+ self._post_process(
+ instance, filenames, staging_dir, kwargs["startFrame"]
+ )
+
+ kwargs["filename"] = os.path.join(
+ staging_dir, "{}_proxy.ass".format(instance.name)
+ )
+
+ filenames, _ = self._duplicate_extract(
instance.data["proxy"], attribute_data, kwargs
)
@@ -125,12 +193,11 @@ class ExtractArnoldSceneSource(publish.Extractor):
instance.data["representations"].append(representation)
- def _extract(self, nodes, attribute_data, kwargs):
+ def _duplicate_extract(self, nodes, attribute_data, kwargs):
self.log.debug(
"Writing {} with:\n{}".format(kwargs["filename"], kwargs)
)
filenames = []
- nodes_by_id = defaultdict(list)
# Duplicating nodes so they are direct children of the world. This
# makes the hierarchy of any exported ass file the same.
with lib.delete_after() as delete_bin:
@@ -147,7 +214,9 @@ class ExtractArnoldSceneSource(publish.Extractor):
if not shapes:
continue
- duplicate_transform = cmds.duplicate(node)[0]
+ basename = cmds.duplicate(node)[0]
+ parents = cmds.ls(node, long=True)[0].split("|")[:-1]
+ duplicate_transform = "|".join(parents + [basename])
if cmds.listRelatives(duplicate_transform, parent=True):
duplicate_transform = cmds.parent(
@@ -172,28 +241,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
duplicate_nodes.extend(shapes)
delete_bin.append(duplicate_transform)
- # Copy cbId to mtoa_constant.
- for node in duplicate_nodes:
- # Converting Maya hierarchy separator "|" to Arnold
- # separator "/".
- nodes_by_id[lib.get_id(node)].append(node.replace("|", "/"))
-
- with lib.attribute_values(attribute_data):
- with lib.maintained_selection():
- self.log.debug(
- "Writing: {}".format(duplicate_nodes)
- )
- cmds.select(duplicate_nodes, noExpand=True)
-
- self.log.debug(
- "Extracting ass sequence with: {}".format(kwargs)
- )
-
- exported_files = cmds.arnoldExportAss(**kwargs)
-
- for file in exported_files:
- filenames.append(os.path.split(file)[1])
-
- self.log.debug("Exported: {}".format(filenames))
+ nodes_by_id = self._nodes_by_id(duplicate_nodes)
+ filenames = self._extract(duplicate_nodes, attribute_data, kwargs)
return filenames, nodes_by_id
diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_fbx_animation.py b/client/ayon_core/hosts/maya/plugins/publish/extract_fbx_animation.py
index ee66ed2fb7..77b5b79b5f 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/extract_fbx_animation.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/extract_fbx_animation.py
@@ -35,7 +35,8 @@ class ExtractFBXAnimation(publish.Extractor):
fbx_exporter = fbx.FBXExtractor(log=self.log)
out_members = instance.data.get("animated_skeleton", [])
# Export
- instance.data["constraints"] = True
+ # TODO: need to set up the options for users to set up
+ # the flags they intended to export
instance.data["skeletonDefinitions"] = True
instance.data["referencedAssetsContent"] = True
fbx_exporter.set_options_from_instance(instance)
diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py b/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py
index d7f9594374..cc930e49cc 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py
@@ -6,6 +6,7 @@ from maya import cmds
from ayon_core.pipeline import publish
from ayon_core.hosts.maya.api.alembic import extract_alembic
from ayon_core.hosts.maya.api.lib import (
+ get_all_children,
suspended_refresh,
maintained_selection,
iter_visible_nodes_in_range
@@ -40,7 +41,6 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
# From settings
attr = []
attrPrefix = []
- autoSubd = False
bake_attributes = []
bake_attribute_prefixes = []
dataFormat = "ogawa"
@@ -63,6 +63,7 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
wholeFrameGeo = False
worldSpace = True
writeColorSets = False
+ writeCreases = False
writeFaceSets = False
writeNormals = True
writeUVSets = False
@@ -173,15 +174,9 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
"writeVisibility": attribute_values.get(
"writeVisibility", self.writeVisibility
),
- "autoSubd": attribute_values.get(
- "autoSubd", self.autoSubd
- ),
"uvsOnly": attribute_values.get(
"uvsOnly", self.uvsOnly
),
- "writeNormals": attribute_values.get(
- "writeNormals", self.writeNormals
- ),
"melPerFrameCallback": attribute_values.get(
"melPerFrameCallback", self.melPerFrameCallback
),
@@ -193,7 +188,12 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
),
"pythonPostJobCallback": attribute_values.get(
"pythonPostJobCallback", self.pythonPostJobCallback
- )
+ ),
+ # Note that this converts `writeNormals` to `noNormals` for the
+ # `AbcExport` equivalent in `extract_alembic`
+ "noNormals": not attribute_values.get(
+ "writeNormals", self.writeNormals
+ ),
}
if instance.data.get("visibleOnly", False):
@@ -249,7 +249,6 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
with maintained_selection():
cmds.select(instance.data["proxy"])
extract_alembic(**kwargs)
-
representation = {
"name": "proxy",
"ext": "abc",
@@ -268,20 +267,6 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
return []
override_defs = OrderedDict({
- "autoSubd": BoolDef(
- "autoSubd",
- label="Auto Subd",
- default=cls.autoSubd,
- tooltip=(
- "If this flag is present and the mesh has crease edges, "
- "crease vertices or holes, the mesh (OPolyMesh) would now "
- "be written out as an OSubD and crease info will be stored"
- " in the Alembic file. Otherwise, creases info won't be "
- "preserved in Alembic file unless a custom Boolean "
- "attribute SubDivisionMesh has been added to mesh node and"
- " its value is true."
- )
- ),
"eulerFilter": BoolDef(
"eulerFilter",
label="Euler Filter",
@@ -354,6 +339,13 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
default=cls.writeColorSets,
tooltip="Write vertex colors with the geometry."
),
+ "writeCreases": BoolDef(
+ "writeCreases",
+ label="Write Creases",
+ default=cls.writeCreases,
+ tooltip="Write the geometry's edge and vertex crease "
+ "information."
+ ),
"writeFaceSets": BoolDef(
"writeFaceSets",
label="Write Face Sets",
@@ -527,9 +519,7 @@ class ExtractAnimation(ExtractAlembic):
roots = cmds.sets(out_set, query=True) or []
# Include all descendants
- nodes = roots
- nodes += cmds.listRelatives(
- roots, allDescendents=True, fullPath=True
- ) or []
+ nodes = roots.copy()
+ nodes.extend(get_all_children(roots, ignore_intermediate_objects=True))
return nodes, roots
diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py b/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py
index 5197100406..11f4c313fa 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py
@@ -1,3 +1,4 @@
+import inspect
import pyblish.api
from ayon_core.pipeline import OptionalPyblishPluginMixin
@@ -29,29 +30,28 @@ class ValidateAlembicDefaultsPointcache(
@classmethod
def _get_publish_attributes(cls, instance):
- attributes = instance.data["publish_attributes"][
- cls.plugin_name(
- instance.data["publish_attributes"]
- )
- ]
-
- return attributes
+ return instance.data["publish_attributes"][cls.plugin_name]
def process(self, instance):
if not self.is_active(instance.data):
return
settings = self._get_settings(instance.context)
-
attributes = self._get_publish_attributes(instance)
- msg = (
- "Alembic Extract setting \"{}\" is not the default value:"
- "\nCurrent: {}"
- "\nDefault Value: {}\n"
- )
- errors = []
+ invalid = {}
for key, value in attributes.items():
+ if key not in settings:
+ # This may occur if attributes have changed over time and an
+ # existing instance has older legacy attributes that do not
+ # match the current settings definition.
+ self.log.warning(
+ "Publish attribute %s not found in Alembic Export "
+ "default settings. Ignoring validation for attribute.",
+ key
+ )
+ continue
+
default_value = settings[key]
# Lists are best to compared sorted since we cant rely on the order
@@ -61,10 +61,35 @@ class ValidateAlembicDefaultsPointcache(
default_value = sorted(default_value)
if value != default_value:
- errors.append(msg.format(key, value, default_value))
+ invalid[key] = value, default_value
- if errors:
- raise PublishValidationError("\n".join(errors))
+ if invalid:
+ non_defaults = "\n".join(
+ f"- {key}: {value} \t(default: {default_value})"
+ for key, (value, default_value) in invalid.items()
+ )
+
+ raise PublishValidationError(
+ "Alembic extract options differ from default values:\n"
+ f"{non_defaults}",
+ description=self.get_description()
+ )
+
+ @staticmethod
+ def get_description():
+ return inspect.cleandoc(
+ """### Alembic Extract settings differ from defaults
+
+ The alembic export options differ from the project default values.
+
+ If this is intentional you can disable this validation by
+ disabling **Validate Alembic Options Default**.
+
+ If not you may use the "Repair" action to revert all the options to
+ their default values.
+
+ """
+ )
@classmethod
def repair(cls, instance):
@@ -75,13 +100,20 @@ class ValidateAlembicDefaultsPointcache(
)
# Set the settings values on the create context then save to workfile.
- publish_attributes = instance.data["publish_attributes"]
- plugin_name = cls.plugin_name(publish_attributes)
- attributes = cls._get_publish_attributes(instance)
settings = cls._get_settings(instance.context)
- create_publish_attributes = create_instance.data["publish_attributes"]
+ attributes = cls._get_publish_attributes(create_instance)
for key in attributes:
- create_publish_attributes[plugin_name][key] = settings[key]
+ if key not in settings:
+ # This may occur if attributes have changed over time and an
+ # existing instance has older legacy attributes that do not
+ # match the current settings definition.
+ cls.log.warning(
+ "Publish attribute %s not found in Alembic Export "
+ "default settings. Ignoring repair for attribute.",
+ key
+ )
+ continue
+ attributes[key] = settings[key]
create_context.save_changes()
@@ -93,6 +125,6 @@ class ValidateAlembicDefaultsAnimation(
The defaults are defined in the project settings.
"""
- label = "Validate Alembic Options Defaults"
+ label = "Validate Alembic Options Defaults"
families = ["animation"]
plugin_name = "ExtractAnimation"
diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py
index 92b4922492..8574b3ecc8 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py
@@ -1,30 +1,56 @@
+from maya import cmds
+
import pyblish.api
+
from ayon_core.pipeline.publish import (
ValidateContentsOrder, PublishValidationError
)
+from ayon_core.hosts.maya.api.lib import is_visible
class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
"""Validate Arnold Scene Source.
- We require at least 1 root node/parent for the meshes. This is to ensure we
- can duplicate the nodes and preserve the names.
+ Ensure no nodes are hidden.
+ """
- If using proxies we need the nodes to share the same names and not be
+ order = ValidateContentsOrder
+ hosts = ["maya"]
+ families = ["ass", "assProxy"]
+ label = "Validate Arnold Scene Source"
+
+ def process(self, instance):
+ # Validate against having nodes hidden, which will result in the
+ # extraction to ignore the node.
+ nodes = instance.data["members"] + instance.data.get("proxy", [])
+ nodes = [x for x in nodes if cmds.objectType(x, isAType='dagNode')]
+ hidden_nodes = [
+ x for x in nodes if not is_visible(x, intermediateObject=False)
+ ]
+ if hidden_nodes:
+ raise PublishValidationError(
+ "Found hidden nodes:\n\n{}\n\nPlease unhide for"
+ " publishing.".format("\n".join(hidden_nodes))
+ )
+
+
+class ValidateArnoldSceneSourceProxy(pyblish.api.InstancePlugin):
+ """Validate Arnold Scene Source Proxy.
+
+ When using proxies we need the nodes to share the same names and not be
parent to the world. This ends up needing at least two groups with content
nodes and proxy nodes in another.
"""
order = ValidateContentsOrder
hosts = ["maya"]
- families = ["ass"]
- label = "Validate Arnold Scene Source"
+ families = ["assProxy"]
+ label = "Validate Arnold Scene Source Proxy"
def _get_nodes_by_name(self, nodes):
ungrouped_nodes = []
nodes_by_name = {}
parents = []
- same_named_nodes = {}
for node in nodes:
node_split = node.split("|")
if len(node_split) == 2:
@@ -35,33 +61,16 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
parents.append(parent)
node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1]
-
- # Check for same same nodes, which can happen in different
- # hierarchies.
- if node_name in nodes_by_name:
- try:
- same_named_nodes[node_name].append(node)
- except KeyError:
- same_named_nodes[node_name] = [
- nodes_by_name[node_name], node
- ]
-
nodes_by_name[node_name] = node
- if same_named_nodes:
- message = "Found nodes with the same name:"
- for name, nodes in same_named_nodes.items():
- message += "\n\n\"{}\":\n{}".format(name, "\n".join(nodes))
-
- raise PublishValidationError(message)
-
return ungrouped_nodes, nodes_by_name, parents
def process(self, instance):
+ # Validate against nodes directly parented to world.
ungrouped_nodes = []
nodes, content_nodes_by_name, content_parents = (
- self._get_nodes_by_name(instance.data["contentMembers"])
+ self._get_nodes_by_name(instance.data["members"])
)
ungrouped_nodes.extend(nodes)
@@ -70,24 +79,21 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
)
ungrouped_nodes.extend(nodes)
- # Validate against nodes directly parented to world.
if ungrouped_nodes:
raise PublishValidationError(
"Found nodes parented to the world: {}\n"
"All nodes need to be grouped.".format(ungrouped_nodes)
)
- # Proxy validation.
- if not instance.data.get("proxy", []):
- return
-
# Validate for content and proxy nodes amount being the same.
- if len(instance.data["contentMembers"]) != len(instance.data["proxy"]):
+ if len(instance.data["members"]) != len(instance.data["proxy"]):
raise PublishValidationError(
"Amount of content nodes ({}) and proxy nodes ({}) needs to "
- "be the same.".format(
- len(instance.data["contentMembers"]),
- len(instance.data["proxy"])
+ "be the same.\nContent nodes: {}\nProxy nodes:{}".format(
+ len(instance.data["members"]),
+ len(instance.data["proxy"]),
+ instance.data["members"],
+ instance.data["proxy"]
)
)
diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py
index a9d896952d..e5dbe178fc 100644
--- a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py
+++ b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py
@@ -17,7 +17,7 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin,
order = ValidateContentsOrder
hosts = ["maya"]
- families = ["ass"]
+ families = ["assProxy"]
label = "Validate Arnold Scene Source CBID"
actions = [RepairAction]
optional = False
@@ -40,15 +40,11 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin,
@classmethod
def get_invalid_couples(cls, instance):
- content_nodes_by_name = cls._get_nodes_by_name(
- instance.data["contentMembers"]
- )
- proxy_nodes_by_name = cls._get_nodes_by_name(
- instance.data.get("proxy", [])
- )
+ nodes_by_name = cls._get_nodes_by_name(instance.data["members"])
+ proxy_nodes_by_name = cls._get_nodes_by_name(instance.data["proxy"])
invalid_couples = []
- for content_name, content_node in content_nodes_by_name.items():
+ for content_name, content_node in nodes_by_name.items():
proxy_node = proxy_nodes_by_name.get(content_name, None)
if not proxy_node:
@@ -70,7 +66,7 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin,
if not self.is_active(instance.data):
return
# Proxy validation.
- if not instance.data.get("proxy", []):
+ if not instance.data["proxy"]:
return
# Validate for proxy nodes sharing the same cbId as content nodes.
diff --git a/client/ayon_core/hosts/maya/plugins/workfile_build/script_placeholder.py b/client/ayon_core/hosts/maya/plugins/workfile_build/script_placeholder.py
new file mode 100644
index 0000000000..62e10ba023
--- /dev/null
+++ b/client/ayon_core/hosts/maya/plugins/workfile_build/script_placeholder.py
@@ -0,0 +1,201 @@
+from maya import cmds
+
+from ayon_core.hosts.maya.api.workfile_template_builder import (
+ MayaPlaceholderPlugin
+)
+from ayon_core.lib import NumberDef, TextDef, EnumDef
+from ayon_core.lib.events import weakref_partial
+
+
+EXAMPLE_SCRIPT = """
+# Access maya commands
+from maya import cmds
+
+# Access the placeholder node
+placeholder_node = placeholder.scene_identifier
+
+# Access the event callback
+if event is None:
+ print(f"Populating {placeholder}")
+else:
+ if event.topic == "template.depth_processed":
+ print(f"Processed depth: {event.get('depth')}")
+ elif event.topic == "template.finished":
+ print("Build finished.")
+""".strip()
+
+
+class MayaPlaceholderScriptPlugin(MayaPlaceholderPlugin):
+ """Execute a script at the given `order` during workfile build.
+
+ This is a very low-level placeholder to run Python scripts at a given
+ point in time during the workfile template build.
+
+ It can create either a locator or an objectSet as placeholder node.
+ It defaults to an objectSet, since allowing to run on e.g. other
+ placeholder node members can be useful, e.g. using:
+
+ >>> members = cmds.sets(placeholder.scene_identifier, query=True)
+
+ """
+
+ identifier = "maya.runscript"
+ label = "Run Python Script"
+
+ use_selection_as_parent = False
+
+ def get_placeholder_options(self, options=None):
+ options = options or {}
+ return [
+ NumberDef(
+ "order",
+ label="Order",
+ default=options.get("order") or 0,
+ decimals=0,
+ minimum=0,
+ maximum=999,
+ tooltip=(
+ "Order"
+ "\nOrder defines asset loading priority (0 to 999)"
+ "\nPriority rule is : \"lowest is first to load\"."
+ )
+ ),
+ TextDef(
+ "prepare_script",
+ label="Run at\nprepare",
+ tooltip="Run before populate at prepare order",
+ multiline=True,
+ default=options.get("prepare_script", "")
+ ),
+ TextDef(
+ "populate_script",
+ label="Run at\npopulate",
+ tooltip="Run script at populate node order
"
+ "This is the default behavior",
+ multiline=True,
+ default=options.get("populate_script", EXAMPLE_SCRIPT)
+ ),
+ TextDef(
+ "depth_processed_script",
+ label="Run after\ndepth\niteration",
+ tooltip="Run script after every build depth iteration",
+ multiline=True,
+ default=options.get("depth_processed_script", "")
+ ),
+ TextDef(
+ "finished_script",
+ label="Run after\nbuild",
+ tooltip=(
+ "Run script at build finished.
"
+ "Note: this even runs if other placeholders had "
+ "errors during the build"
+ ),
+ multiline=True,
+ default=options.get("finished_script", "")
+ ),
+ EnumDef(
+ "create_nodetype",
+ label="Nodetype",
+ items={
+ "spaceLocator": "Locator",
+ "objectSet": "ObjectSet"
+ },
+ tooltip=(
+ "The placeholder's node type to be created.
"
+ "Note this only works on create, not on update"
+ ),
+ default=options.get("create_nodetype", "objectSet")
+ ),
+ ]
+
+ def create_placeholder(self, placeholder_data):
+ nodetype = placeholder_data.get("create_nodetype", "objectSet")
+
+ if nodetype == "spaceLocator":
+ super(MayaPlaceholderScriptPlugin, self).create_placeholder(
+ placeholder_data
+ )
+ elif nodetype == "objectSet":
+ placeholder_data["plugin_identifier"] = self.identifier
+
+ # Create maya objectSet on selection
+ selection = cmds.ls(selection=True, long=True)
+ name = self._create_placeholder_name(placeholder_data)
+ node = cmds.sets(selection, name=name)
+
+ self.imprint(node, placeholder_data)
+
+ def prepare_placeholders(self, placeholders):
+ super(MayaPlaceholderScriptPlugin, self).prepare_placeholders(
+ placeholders
+ )
+ for placeholder in placeholders:
+ prepare_script = placeholder.data.get("prepare_script")
+ if not prepare_script:
+ continue
+
+ self.run_script(placeholder, prepare_script)
+
+ def populate_placeholder(self, placeholder):
+
+ populate_script = placeholder.data.get("populate_script")
+ depth_script = placeholder.data.get("depth_processed_script")
+ finished_script = placeholder.data.get("finished_script")
+
+ # Run now
+ if populate_script:
+ self.run_script(placeholder, populate_script)
+
+ if not any([depth_script, finished_script]):
+ # No callback scripts to run
+ if not placeholder.data.get("keep_placeholder", True):
+ self.delete_placeholder(placeholder)
+ return
+
+ # Run at each depth processed
+ if depth_script:
+ callback = weakref_partial(
+ self.run_script, placeholder, depth_script)
+ self.builder.add_on_depth_processed_callback(
+ callback, order=placeholder.order)
+
+ # Run at build finish
+ if finished_script:
+ callback = weakref_partial(
+ self.run_script, placeholder, finished_script)
+ self.builder.add_on_finished_callback(
+ callback, order=placeholder.order)
+
+ # If placeholder should be deleted, delete it after finish so
+ # the scripts have access to it up to the last run
+ if not placeholder.data.get("keep_placeholder", True):
+ delete_callback = weakref_partial(
+ self.delete_placeholder, placeholder)
+ self.builder.add_on_finished_callback(
+ delete_callback, order=placeholder.order + 1)
+
+ def run_script(self, placeholder, script, event=None):
+ """Run script
+
+ Even though `placeholder` is an unused arguments by exposing it as
+ an input argument it means it makes it available through
+ globals()/locals() in the `exec` call, giving the script access
+ to the placeholder.
+
+ For example:
+ >>> node = placeholder.scene_identifier
+
+ In the case the script is running at a callback level (not during
+ populate) then it has access to the `event` as well, otherwise the
+ value is None if it runs during `populate_placeholder` directly.
+
+ For example adding this as the callback script:
+ >>> if event is not None:
+ >>> if event.topic == "on_depth_processed":
+ >>> print(f"Processed depth: {event.get('depth')}")
+ >>> elif event.topic == "on_finished":
+ >>> print("Build finished.")
+
+ """
+ self.log.debug(f"Running script at event: {event}")
+ exec(script, locals())
diff --git a/client/ayon_core/hosts/nuke/api/lib.py b/client/ayon_core/hosts/nuke/api/lib.py
index e3505a16f2..0a4755c166 100644
--- a/client/ayon_core/hosts/nuke/api/lib.py
+++ b/client/ayon_core/hosts/nuke/api/lib.py
@@ -43,7 +43,9 @@ from ayon_core.pipeline import (
from ayon_core.pipeline.context_tools import (
get_current_context_custom_workfile_template
)
-from ayon_core.pipeline.colorspace import get_imageio_config
+from ayon_core.pipeline.colorspace import (
+ get_current_context_imageio_config_preset
+)
from ayon_core.pipeline.workfile import BuildWorkfile
from . import gizmo_menu
from .constants import ASSIST
@@ -1552,10 +1554,7 @@ class WorkfileSettings(object):
imageio_host (dict): host colorspace configurations
'''
- config_data = get_imageio_config(
- project_name=get_current_project_name(),
- host_name="nuke"
- )
+ config_data = get_current_context_imageio_config_preset()
workfile_settings = imageio_host["workfile"]
color_management = workfile_settings["color_management"]
diff --git a/client/ayon_core/hosts/nuke/api/plugin.py b/client/ayon_core/hosts/nuke/api/plugin.py
index fb56dec833..ec13104d4d 100644
--- a/client/ayon_core/hosts/nuke/api/plugin.py
+++ b/client/ayon_core/hosts/nuke/api/plugin.py
@@ -778,6 +778,7 @@ class ExporterReviewMov(ExporterReview):
# deal with now lut defined in viewer lut
self.viewer_lut_raw = klass.viewer_lut_raw
self.write_colorspace = instance.data["colorspace"]
+ self.color_channels = instance.data["color_channels"]
self.name = name or "baked"
self.ext = ext or "mov"
@@ -834,7 +835,7 @@ class ExporterReviewMov(ExporterReview):
self.log.info("Nodes exported...")
return path
- def generate_mov(self, farm=False, **kwargs):
+ def generate_mov(self, farm=False, delete=True, **kwargs):
# colorspace data
colorspace = None
# get colorspace settings
@@ -947,6 +948,8 @@ class ExporterReviewMov(ExporterReview):
self.log.debug("Path: {}".format(self.path))
write_node["file"].setValue(str(self.path))
write_node["file_type"].setValue(str(self.ext))
+ write_node["channels"].setValue(str(self.color_channels))
+
# Knobs `meta_codec` and `mov64_codec` are not available on centos.
# TODO shouldn't this come from settings on outputs?
try:
@@ -987,8 +990,13 @@ class ExporterReviewMov(ExporterReview):
self.render(write_node.name())
# ---------- generate representation data
+ tags = ["review", "need_thumbnail"]
+
+ if delete:
+ tags.append("delete")
+
self.get_representation_data(
- tags=["review", "need_thumbnail", "delete"] + add_tags,
+ tags=tags + add_tags,
custom_tags=add_custom_tags,
range=True,
colorspace=colorspace
diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_backdrop.py b/client/ayon_core/hosts/nuke/plugins/load/load_backdrop.py
index 7d823919dc..50af8a4eb9 100644
--- a/client/ayon_core/hosts/nuke/plugins/load/load_backdrop.py
+++ b/client/ayon_core/hosts/nuke/plugins/load/load_backdrop.py
@@ -62,7 +62,7 @@ class LoadBackdropNodes(load.LoaderPlugin):
}
# add attributes from the version to imprint to metadata knob
- for k in ["source", "author", "fps"]:
+ for k in ["source", "fps"]:
data_imprint[k] = version_attributes[k]
# getting file path
@@ -206,7 +206,7 @@ class LoadBackdropNodes(load.LoaderPlugin):
"colorspaceInput": colorspace,
}
- for k in ["source", "author", "fps"]:
+ for k in ["source", "fps"]:
data_imprint[k] = version_attributes[k]
# adding nodes to node graph
diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_camera_abc.py b/client/ayon_core/hosts/nuke/plugins/load/load_camera_abc.py
index 14c54c3adc..3c7d4f3bb2 100644
--- a/client/ayon_core/hosts/nuke/plugins/load/load_camera_abc.py
+++ b/client/ayon_core/hosts/nuke/plugins/load/load_camera_abc.py
@@ -48,7 +48,7 @@ class AlembicCameraLoader(load.LoaderPlugin):
"frameEnd": last,
"version": version_entity["version"],
}
- for k in ["source", "author", "fps"]:
+ for k in ["source", "fps"]:
data_imprint[k] = version_attributes[k]
# getting file path
@@ -123,7 +123,7 @@ class AlembicCameraLoader(load.LoaderPlugin):
}
# add attributes from the version to imprint to metadata knob
- for k in ["source", "author", "fps"]:
+ for k in ["source", "fps"]:
data_imprint[k] = version_attributes[k]
# getting file path
diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_clip.py b/client/ayon_core/hosts/nuke/plugins/load/load_clip.py
index df8f2ab018..7fa90da86f 100644
--- a/client/ayon_core/hosts/nuke/plugins/load/load_clip.py
+++ b/client/ayon_core/hosts/nuke/plugins/load/load_clip.py
@@ -9,7 +9,8 @@ from ayon_core.pipeline import (
get_representation_path,
)
from ayon_core.pipeline.colorspace import (
- get_imageio_file_rules_colorspace_from_filepath
+ get_imageio_file_rules_colorspace_from_filepath,
+ get_current_context_imageio_config_preset,
)
from ayon_core.hosts.nuke.api.lib import (
get_imageio_input_colorspace,
@@ -197,7 +198,6 @@ class LoadClip(plugin.NukeLoader):
"frameStart",
"frameEnd",
"source",
- "author",
"fps",
"handleStart",
"handleEnd",
@@ -347,8 +347,7 @@ class LoadClip(plugin.NukeLoader):
"source": version_attributes.get("source"),
"handleStart": str(self.handle_start),
"handleEnd": str(self.handle_end),
- "fps": str(version_attributes.get("fps")),
- "author": version_attributes.get("author")
+ "fps": str(version_attributes.get("fps"))
}
last_version_entity = ayon_api.get_last_version_by_product_id(
@@ -547,9 +546,10 @@ class LoadClip(plugin.NukeLoader):
f"Colorspace from representation colorspaceData: {colorspace}"
)
+ config_data = get_current_context_imageio_config_preset()
# check if any filerules are not applicable
new_parsed_colorspace = get_imageio_file_rules_colorspace_from_filepath( # noqa
- filepath, "nuke", project_name
+ filepath, "nuke", project_name, config_data=config_data
)
self.log.debug(f"Colorspace new filerules: {new_parsed_colorspace}")
diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_effects.py b/client/ayon_core/hosts/nuke/plugins/load/load_effects.py
index a87c81295a..be7420fcf0 100644
--- a/client/ayon_core/hosts/nuke/plugins/load/load_effects.py
+++ b/client/ayon_core/hosts/nuke/plugins/load/load_effects.py
@@ -69,7 +69,6 @@ class LoadEffects(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
- "author",
"fps"
]:
data_imprint[k] = version_attributes[k]
@@ -189,7 +188,6 @@ class LoadEffects(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
- "author",
"fps",
]:
data_imprint[k] = version_attributes[k]
diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_effects_ip.py b/client/ayon_core/hosts/nuke/plugins/load/load_effects_ip.py
index 8fa1347598..9bb430b37b 100644
--- a/client/ayon_core/hosts/nuke/plugins/load/load_effects_ip.py
+++ b/client/ayon_core/hosts/nuke/plugins/load/load_effects_ip.py
@@ -69,7 +69,6 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
- "author",
"fps"
]:
data_imprint[k] = version_attributes[k]
@@ -192,7 +191,6 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
- "author",
"fps"
]:
data_imprint[k] = version_attributes[k]
diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_gizmo.py b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo.py
index 95f85bacfc..57d00795ae 100644
--- a/client/ayon_core/hosts/nuke/plugins/load/load_gizmo.py
+++ b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo.py
@@ -71,7 +71,6 @@ class LoadGizmo(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
- "author",
"fps"
]:
data_imprint[k] = version_attributes[k]
@@ -139,7 +138,6 @@ class LoadGizmo(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
- "author",
"fps"
]:
data_imprint[k] = version_attributes[k]
diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_gizmo_ip.py b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo_ip.py
index 3112e27811..ed2b1ec458 100644
--- a/client/ayon_core/hosts/nuke/plugins/load/load_gizmo_ip.py
+++ b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo_ip.py
@@ -73,7 +73,6 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
- "author",
"fps"
]:
data_imprint[k] = version_attributes[k]
@@ -145,7 +144,6 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
- "author",
"fps"
]:
data_imprint[k] = version_attributes[k]
diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_image.py b/client/ayon_core/hosts/nuke/plugins/load/load_image.py
index d825b621fc..b5fccd8a0d 100644
--- a/client/ayon_core/hosts/nuke/plugins/load/load_image.py
+++ b/client/ayon_core/hosts/nuke/plugins/load/load_image.py
@@ -133,7 +133,7 @@ class LoadImage(load.LoaderPlugin):
"version": version_entity["version"],
"colorspace": colorspace,
}
- for k in ["source", "author", "fps"]:
+ for k in ["source", "fps"]:
data_imprint[k] = version_attributes.get(k, str(None))
r["tile_color"].setValue(int("0x4ecd25ff", 16))
@@ -207,7 +207,6 @@ class LoadImage(load.LoaderPlugin):
"colorspace": version_attributes.get("colorSpace"),
"source": version_attributes.get("source"),
"fps": str(version_attributes.get("fps")),
- "author": version_attributes.get("author")
}
# change color of node
diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_model.py b/client/ayon_core/hosts/nuke/plugins/load/load_model.py
index 0326e0a4fc..40862cd1e0 100644
--- a/client/ayon_core/hosts/nuke/plugins/load/load_model.py
+++ b/client/ayon_core/hosts/nuke/plugins/load/load_model.py
@@ -47,7 +47,7 @@ class AlembicModelLoader(load.LoaderPlugin):
"version": version_entity["version"]
}
# add attributes from the version to imprint to metadata knob
- for k in ["source", "author", "fps"]:
+ for k in ["source", "fps"]:
data_imprint[k] = version_attributes[k]
# getting file path
@@ -130,7 +130,7 @@ class AlembicModelLoader(load.LoaderPlugin):
}
# add additional metadata from the version to imprint to Avalon knob
- for k in ["source", "author", "fps"]:
+ for k in ["source", "fps"]:
data_imprint[k] = version_attributes[k]
# getting file path
diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_script_precomp.py b/client/ayon_core/hosts/nuke/plugins/load/load_script_precomp.py
index 3e554f9d3b..d6699be164 100644
--- a/client/ayon_core/hosts/nuke/plugins/load/load_script_precomp.py
+++ b/client/ayon_core/hosts/nuke/plugins/load/load_script_precomp.py
@@ -55,7 +55,6 @@ class LinkAsGroup(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
- "author",
"fps"
]:
data_imprint[k] = version_attributes[k]
@@ -131,7 +130,6 @@ class LinkAsGroup(load.LoaderPlugin):
"colorspace": version_attributes.get("colorSpace"),
"source": version_attributes.get("source"),
"fps": version_attributes.get("fps"),
- "author": version_attributes.get("author")
}
# Update the imprinted representation
diff --git a/client/ayon_core/hosts/nuke/plugins/publish/collect_writes.py b/client/ayon_core/hosts/nuke/plugins/publish/collect_writes.py
index 745351dc49..27525bcad1 100644
--- a/client/ayon_core/hosts/nuke/plugins/publish/collect_writes.py
+++ b/client/ayon_core/hosts/nuke/plugins/publish/collect_writes.py
@@ -153,6 +153,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin,
# Determine defined file type
ext = write_node["file_type"].value()
+ # determine defined channel type
+ color_channels = write_node["channels"].value()
+
# get frame range data
handle_start = instance.context.data["handleStart"]
handle_end = instance.context.data["handleEnd"]
@@ -172,7 +175,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin,
"path": write_file_path,
"outputDir": output_dir,
"ext": ext,
- "colorspace": colorspace
+ "colorspace": colorspace,
+ "color_channels": color_channels
})
if product_type == "render":
diff --git a/client/ayon_core/hosts/nuke/plugins/publish/extract_review_intermediates.py b/client/ayon_core/hosts/nuke/plugins/publish/extract_review_intermediates.py
index 8d7a3ec311..82c7b6e4c5 100644
--- a/client/ayon_core/hosts/nuke/plugins/publish/extract_review_intermediates.py
+++ b/client/ayon_core/hosts/nuke/plugins/publish/extract_review_intermediates.py
@@ -136,11 +136,16 @@ class ExtractReviewIntermediates(publish.Extractor):
self, instance, o_name, o_data["extension"],
multiple_presets)
+ o_data["add_custom_tags"].append("intermediate")
+ delete = not o_data.get("publish", False)
+
if instance.data.get("farm"):
if "review" in instance.data["families"]:
instance.data["families"].remove("review")
- data = exporter.generate_mov(farm=True, **o_data)
+ data = exporter.generate_mov(
+ farm=True, delete=delete, **o_data
+ )
self.log.debug(
"_ data: {}".format(data))
@@ -154,7 +159,7 @@ class ExtractReviewIntermediates(publish.Extractor):
"bakeWriteNodeName": data.get("bakeWriteNodeName")
})
else:
- data = exporter.generate_mov(**o_data)
+ data = exporter.generate_mov(delete=delete, **o_data)
# add representation generated by exporter
generated_repres.extend(data["representations"])
diff --git a/client/ayon_core/hosts/photoshop/plugins/create/create_image.py b/client/ayon_core/hosts/photoshop/plugins/create/create_image.py
index 26f2469844..a44c3490c6 100644
--- a/client/ayon_core/hosts/photoshop/plugins/create/create_image.py
+++ b/client/ayon_core/hosts/photoshop/plugins/create/create_image.py
@@ -35,8 +35,12 @@ class ImageCreator(Creator):
create_empty_group = False
stub = api.stub() # only after PS is up
- top_level_selected_items = stub.get_selected_layers()
if pre_create_data.get("use_selection"):
+ try:
+ top_level_selected_items = stub.get_selected_layers()
+ except ValueError:
+ raise CreatorError("Cannot group locked Background layer!")
+
only_single_item_selected = len(top_level_selected_items) == 1
if (
only_single_item_selected or
@@ -50,11 +54,12 @@ class ImageCreator(Creator):
group = stub.group_selected_layers(product_name_from_ui)
groups_to_create.append(group)
else:
- stub.select_layers(stub.get_layers())
try:
+ stub.select_layers(stub.get_layers())
group = stub.group_selected_layers(product_name_from_ui)
- except:
+ except ValueError:
raise CreatorError("Cannot group locked Background layer!")
+
groups_to_create.append(group)
# create empty group if nothing selected
diff --git a/client/ayon_core/hosts/traypublisher/csv_publish.py b/client/ayon_core/hosts/traypublisher/csv_publish.py
index b43792a357..2762172936 100644
--- a/client/ayon_core/hosts/traypublisher/csv_publish.py
+++ b/client/ayon_core/hosts/traypublisher/csv_publish.py
@@ -1,5 +1,3 @@
-import os
-
import pyblish.api
import pyblish.util
diff --git a/client/ayon_core/hosts/traypublisher/plugins/create/create_colorspace_look.py b/client/ayon_core/hosts/traypublisher/plugins/create/create_colorspace_look.py
index 4d865c1c5c..da05afe86b 100644
--- a/client/ayon_core/hosts/traypublisher/plugins/create/create_colorspace_look.py
+++ b/client/ayon_core/hosts/traypublisher/plugins/create/create_colorspace_look.py
@@ -156,14 +156,9 @@ This creator publishes color space look file (LUT).
]
def apply_settings(self, project_settings):
- host = self.create_context.host
- host_name = host.name
- project_name = host.get_current_project_name()
- config_data = colorspace.get_imageio_config(
- project_name, host_name,
+ config_data = colorspace.get_current_context_imageio_config_preset(
project_settings=project_settings
)
-
if not config_data:
self.enabled = False
return
diff --git a/client/ayon_core/hosts/traypublisher/plugins/create/create_editorial_package.py b/client/ayon_core/hosts/traypublisher/plugins/create/create_editorial_package.py
new file mode 100644
index 0000000000..82b109be28
--- /dev/null
+++ b/client/ayon_core/hosts/traypublisher/plugins/create/create_editorial_package.py
@@ -0,0 +1,96 @@
+from pathlib import Path
+
+from ayon_core.pipeline import (
+ CreatedInstance,
+)
+
+from ayon_core.lib.attribute_definitions import (
+ FileDef,
+ BoolDef,
+ TextDef,
+)
+from ayon_core.hosts.traypublisher.api.plugin import TrayPublishCreator
+
+
+class EditorialPackageCreator(TrayPublishCreator):
+ """Creates instance for OTIO file from published folder.
+
+ Folder contains OTIO file and exported .mov files. Process should publish
+ whole folder as single `editorial_pckg` product type and (possibly) convert
+ .mov files into different format and copy them into `publish` `resources`
+ subfolder.
+ """
+ identifier = "editorial_pckg"
+ label = "Editorial package"
+ product_type = "editorial_pckg"
+ description = "Publish folder with OTIO file and resources"
+
+ # Position batch creator after simple creators
+ order = 120
+
+ conversion_enabled = False
+
+ def apply_settings(self, project_settings):
+ self.conversion_enabled = (
+ project_settings["traypublisher"]
+ ["publish"]
+ ["ExtractEditorialPckgConversion"]
+ ["conversion_enabled"]
+ )
+
+ def get_icon(self):
+ return "fa.folder"
+
+ def create(self, product_name, instance_data, pre_create_data):
+ folder_path = pre_create_data.get("folder_path")
+ if not folder_path:
+ return
+
+ instance_data["creator_attributes"] = {
+ "folder_path": (Path(folder_path["directory"]) /
+ Path(folder_path["filenames"][0])).as_posix(),
+ "conversion_enabled": pre_create_data["conversion_enabled"]
+ }
+
+ # Create new instance
+ new_instance = CreatedInstance(self.product_type, product_name,
+ instance_data, self)
+ self._store_new_instance(new_instance)
+
+ def get_pre_create_attr_defs(self):
+ # Use same attributes as for instance attributes
+ return [
+ FileDef(
+ "folder_path",
+ folders=True,
+ single_item=True,
+ extensions=[],
+ allow_sequences=False,
+ label="Folder path"
+ ),
+ BoolDef("conversion_enabled",
+ tooltip="Convert to output defined in Settings.",
+ default=self.conversion_enabled,
+ label="Convert resources"),
+ ]
+
+ def get_instance_attr_defs(self):
+ return [
+ TextDef(
+ "folder_path",
+ label="Folder path",
+ disabled=True
+ ),
+ BoolDef("conversion_enabled",
+ tooltip="Convert to output defined in Settings.",
+ label="Convert resources"),
+ ]
+
+ def get_detail_description(self):
+ return """# Publish folder with OTIO file and video clips
+
+ Folder contains OTIO file and exported .mov files. Process should
+ publish whole folder as single `editorial_pckg` product type and
+ (possibly) convert .mov files into different format and copy them into
+ `publish` `resources` subfolder.
+ """
diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_editorial_package.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_editorial_package.py
new file mode 100644
index 0000000000..cb1277546c
--- /dev/null
+++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_editorial_package.py
@@ -0,0 +1,58 @@
+"""Produces instance.data["editorial_pckg"] data used during integration.
+
+Requires:
+ instance.data["creator_attributes"]["path"] - from creator
+
+Provides:
+ instance -> editorial_pckg (dict):
+ folder_path (str)
+ otio_path (str) - from dragged folder
+ resource_paths (list)
+
+"""
+import os
+
+import pyblish.api
+
+from ayon_core.lib.transcoding import VIDEO_EXTENSIONS
+
+
+class CollectEditorialPackage(pyblish.api.InstancePlugin):
+ """Collects path to OTIO file and resources"""
+
+ label = "Collect Editorial Package"
+ order = pyblish.api.CollectorOrder - 0.1
+
+ hosts = ["traypublisher"]
+ families = ["editorial_pckg"]
+
+ def process(self, instance):
+ folder_path = instance.data["creator_attributes"]["folder_path"]
+ if not folder_path or not os.path.exists(folder_path):
+ self.log.info((
+ "Instance doesn't contain collected existing folder path."
+ ))
+ return
+
+ instance.data["editorial_pckg"] = {}
+ instance.data["editorial_pckg"]["folder_path"] = folder_path
+
+ otio_path, resource_paths = (
+ self._get_otio_and_resource_paths(folder_path))
+
+ instance.data["editorial_pckg"]["otio_path"] = otio_path
+ instance.data["editorial_pckg"]["resource_paths"] = resource_paths
+
+ def _get_otio_and_resource_paths(self, folder_path):
+ otio_path = None
+ resource_paths = []
+
+ file_names = os.listdir(folder_path)
+ for filename in file_names:
+ _, ext = os.path.splitext(filename)
+ file_path = os.path.join(folder_path, filename)
+ if ext == ".otio":
+ otio_path = file_path
+ elif ext in VIDEO_EXTENSIONS:
+ resource_paths.append(file_path)
+ return otio_path, resource_paths
diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py
index 8e29a0048d..5fbb9a6f4c 100644
--- a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py
+++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py
@@ -1,10 +1,7 @@
import pyblish.api
-from ayon_core.pipeline import (
- publish,
- registered_host
-)
from ayon_core.lib import EnumDef
from ayon_core.pipeline import colorspace
+from ayon_core.pipeline import publish
from ayon_core.pipeline.publish import KnownPublishError
@@ -19,9 +16,10 @@ class CollectColorspace(pyblish.api.InstancePlugin,
families = ["render", "plate", "reference", "image", "online"]
enabled = False
- colorspace_items = [
+ default_colorspace_items = [
(None, "Don't override")
]
+ colorspace_items = list(default_colorspace_items)
colorspace_attr_show = False
config_items = None
@@ -69,14 +67,13 @@ class CollectColorspace(pyblish.api.InstancePlugin,
@classmethod
def apply_settings(cls, project_settings):
- host = registered_host()
- host_name = host.name
- project_name = host.get_current_project_name()
- config_data = colorspace.get_imageio_config(
- project_name, host_name,
+ config_data = colorspace.get_current_context_imageio_config_preset(
project_settings=project_settings
)
+ enabled = False
+ colorspace_items = list(cls.default_colorspace_items)
+ config_items = None
if config_data:
filepath = config_data["path"]
config_items = colorspace.get_ocio_config_colorspaces(filepath)
@@ -85,9 +82,11 @@ class CollectColorspace(pyblish.api.InstancePlugin,
include_aliases=True,
include_roles=True
)
- cls.config_items = config_items
- cls.colorspace_items.extend(labeled_colorspaces)
- cls.enabled = True
+ colorspace_items.extend(labeled_colorspaces)
+
+ cls.config_items = config_items
+ cls.colorspace_items = colorspace_items
+ cls.enabled = enabled
@classmethod
def get_attribute_defs(cls):
diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_asset_entity.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_folder_entity.py
similarity index 64%
rename from client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_asset_entity.py
rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_folder_entity.py
index 4d203649c7..2e564a2e4e 100644
--- a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_asset_entity.py
+++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_folder_entity.py
@@ -10,9 +10,13 @@ class CollectFrameDataFromAssetEntity(pyblish.api.InstancePlugin):
order = pyblish.api.CollectorOrder + 0.491
label = "Collect Missing Frame Data From Folder"
- families = ["plate", "pointcache",
- "vdbcache", "online",
- "render"]
+ families = [
+ "plate",
+ "pointcache",
+ "vdbcache",
+ "online",
+ "render",
+ ]
hosts = ["traypublisher"]
def process(self, instance):
@@ -22,16 +26,26 @@ class CollectFrameDataFromAssetEntity(pyblish.api.InstancePlugin):
"frameStart",
"frameEnd",
"handleStart",
- "handleEnd"
+ "handleEnd",
):
if key not in instance.data:
missing_keys.append(key)
+
+ # Skip the logic if all keys are already collected.
+ # NOTE: In editorial is not 'folderEntity' filled, so it would crash
+ # even if we don't need it.
+ if not missing_keys:
+ return
+
keys_set = []
folder_attributes = instance.data["folderEntity"]["attrib"]
for key in missing_keys:
if key in folder_attributes:
instance.data[key] = folder_attributes[key]
keys_set.append(key)
+
if keys_set:
- self.log.debug(f"Frame range data {keys_set} "
- "has been collected from folder entity.")
+ self.log.debug(
+ f"Frame range data {keys_set} "
+ "has been collected from folder entity."
+ )
diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/extract_editorial_pckg.py b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_editorial_pckg.py
new file mode 100644
index 0000000000..6dd4e84704
--- /dev/null
+++ b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_editorial_pckg.py
@@ -0,0 +1,232 @@
+import copy
+import os.path
+import subprocess
+
+import opentimelineio
+
+import pyblish.api
+
+from ayon_core.lib import get_ffmpeg_tool_args, run_subprocess
+from ayon_core.pipeline import publish
+
+
+class ExtractEditorialPckgConversion(publish.Extractor):
+ """Replaces movie paths in otio file with publish rootless
+
+ Prepares movie resources for integration (adds them to `transfers`).
+ Converts .mov files according to output definition.
+ """
+
+ label = "Extract Editorial Package"
+ order = pyblish.api.ExtractorOrder - 0.45
+ hosts = ["traypublisher"]
+ families = ["editorial_pckg"]
+
+ def process(self, instance):
+ editorial_pckg_data = instance.data.get("editorial_pckg")
+
+ otio_path = editorial_pckg_data["otio_path"]
+ otio_basename = os.path.basename(otio_path)
+ staging_dir = self.staging_dir(instance)
+
+ editorial_pckg_repre = {
+ 'name': "editorial_pckg",
+ 'ext': "otio",
+ 'files': otio_basename,
+ "stagingDir": staging_dir,
+ }
+ otio_staging_path = os.path.join(staging_dir, otio_basename)
+
+ instance.data["representations"].append(editorial_pckg_repre)
+
+ publish_resource_folder = self._get_publish_resource_folder(instance)
+ resource_paths = editorial_pckg_data["resource_paths"]
+ transfers = self._get_transfers(resource_paths,
+ publish_resource_folder)
+
+ project_settings = instance.context.data["project_settings"]
+ output_def = (project_settings["traypublisher"]
+ ["publish"]
+ ["ExtractEditorialPckgConversion"]
+ ["output"])
+
+ conversion_enabled = (instance.data["creator_attributes"]
+ ["conversion_enabled"])
+
+ if conversion_enabled and output_def["ext"]:
+ transfers = self._convert_resources(output_def, transfers)
+
+ instance.data["transfers"] = transfers
+
+ source_to_rootless = self._get_resource_path_mapping(instance,
+ transfers)
+
+ otio_data = editorial_pckg_data["otio_data"]
+ otio_data = self._replace_target_urls(otio_data, source_to_rootless)
+
+ opentimelineio.adapters.write_to_file(otio_data, otio_staging_path)
+
+ self.log.info("Added Editorial Package representation: {}".format(
+ editorial_pckg_repre))
+
+ def _get_publish_resource_folder(self, instance):
+ """Calculates publish folder and create it."""
+ publish_path = self._get_published_path(instance)
+ publish_folder = os.path.dirname(publish_path)
+ publish_resource_folder = os.path.join(publish_folder, "resources")
+
+ if not os.path.exists(publish_resource_folder):
+ os.makedirs(publish_resource_folder, exist_ok=True)
+ return publish_resource_folder
+
+ def _get_resource_path_mapping(self, instance, transfers):
+ """Returns dict of {source_mov_path: rootless_published_path}."""
+ replace_paths = {}
+ anatomy = instance.context.data["anatomy"]
+ for source, destination in transfers:
+ rootless_path = self._get_rootless(anatomy, destination)
+ source_file_name = os.path.basename(source)
+ replace_paths[source_file_name] = rootless_path
+ return replace_paths
+
+ def _get_transfers(self, resource_paths, publish_resource_folder):
+ """Returns list of tuples (source, destination) with movie paths."""
+ transfers = []
+ for res_path in resource_paths:
+ res_basename = os.path.basename(res_path)
+ pub_res_path = os.path.join(publish_resource_folder, res_basename)
+ transfers.append((res_path, pub_res_path))
+ return transfers
+
+ def _replace_target_urls(self, otio_data, replace_paths):
+ """Replace original movie paths with published rootless ones."""
+ for track in otio_data.tracks:
+ for clip in track:
+ # Check if the clip has a media reference
+ if clip.media_reference is not None:
+ # Access the target_url from the media reference
+ target_url = clip.media_reference.target_url
+ if not target_url:
+ continue
+ file_name = os.path.basename(target_url)
+ replace_path = replace_paths.get(file_name)
+ if replace_path:
+ clip.media_reference.target_url = replace_path
+ if clip.name == file_name:
+ clip.name = os.path.basename(replace_path)
+
+ return otio_data
+
+ def _get_rootless(self, anatomy, path):
+ """Try to find rootless {root[work]} path from `path`"""
+ success, rootless_path = anatomy.find_root_template_from_path(
+ path)
+ if not success:
+ # `rootless_path` is not set to `output_dir` if none of roots match
+ self.log.warning(
+ f"Could not find root path for remapping '{path}'."
+ )
+ rootless_path = path
+
+ return rootless_path
+
+ def _get_published_path(self, instance):
+ """Calculates expected `publish` folder"""
+ # determine published path from Anatomy.
+ template_data = instance.data.get("anatomyData")
+ rep = instance.data["representations"][0]
+ template_data["representation"] = rep.get("name")
+ template_data["ext"] = rep.get("ext")
+ template_data["comment"] = None
+
+ anatomy = instance.context.data["anatomy"]
+ template_data["root"] = anatomy.roots
+ template = anatomy.get_template_item("publish", "default", "path")
+ template_filled = template.format_strict(template_data)
+ return os.path.normpath(template_filled)
+
+ def _convert_resources(self, output_def, transfers):
+ """Converts all resource files to configured format."""
+ out_extension = output_def["ext"]
+ if not out_extension:
+ self.log.warning("No output extension configured in "
+ "ayon+settings://traypublisher/publish/ExtractEditorialPckgConversion") # noqa
+ return transfers
+
+ final_transfers = []
+ out_def_ffmpeg_args = output_def["ffmpeg_args"]
+ ffmpeg_input_args = [
+ value.strip()
+ for value in out_def_ffmpeg_args["input"]
+ if value.strip()
+ ]
+ ffmpeg_video_filters = [
+ value.strip()
+ for value in out_def_ffmpeg_args["video_filters"]
+ if value.strip()
+ ]
+ ffmpeg_audio_filters = [
+ value.strip()
+ for value in out_def_ffmpeg_args["audio_filters"]
+ if value.strip()
+ ]
+ ffmpeg_output_args = [
+ value.strip()
+ for value in out_def_ffmpeg_args["output"]
+ if value.strip()
+ ]
+ ffmpeg_input_args = self._split_ffmpeg_args(ffmpeg_input_args)
+
+ generic_args = [
+ subprocess.list2cmdline(get_ffmpeg_tool_args("ffmpeg"))
+ ]
+ generic_args.extend(ffmpeg_input_args)
+ if ffmpeg_video_filters:
+ generic_args.append("-filter:v")
+ generic_args.append(
+ "\"{}\"".format(",".join(ffmpeg_video_filters)))
+
+ if ffmpeg_audio_filters:
+ generic_args.append("-filter:a")
+ generic_args.append(
+ "\"{}\"".format(",".join(ffmpeg_audio_filters)))
+
+ for source, destination in transfers:
+ base_name = os.path.basename(destination)
+ file_name, ext = os.path.splitext(base_name)
+ dest_path = os.path.join(os.path.dirname(destination),
+ f"{file_name}.{out_extension}")
+ final_transfers.append((source, dest_path))
+
+ all_args = copy.deepcopy(generic_args)
+ all_args.append(f"-i \"{source}\"")
+ all_args.extend(ffmpeg_output_args) # order matters
+ all_args.append(f"\"{dest_path}\"")
+ subprcs_cmd = " ".join(all_args)
+
+ # run subprocess
+ self.log.debug("Executing: {}".format(subprcs_cmd))
+ run_subprocess(subprcs_cmd, shell=True, logger=self.log)
+ return final_transfers
+
+ def _split_ffmpeg_args(self, in_args):
+ """Makes sure all entered arguments are separated in individual items.
+
+ Split each argument string with " -" to identify if string contains
+ one or more arguments.
+ """
+ splitted_args = []
+ for arg in in_args:
+ sub_args = arg.split(" -")
+ if len(sub_args) == 1:
+ if arg and arg not in splitted_args:
+ splitted_args.append(arg)
+ continue
+
+ for idx, arg in enumerate(sub_args):
+ if idx != 0:
+ arg = "-" + arg
+
+ if arg and arg not in splitted_args:
+ splitted_args.append(arg)
+ return splitted_args
diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_editorial_package.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_editorial_package.py
new file mode 100644
index 0000000000..c63c4a6a73
--- /dev/null
+++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_editorial_package.py
@@ -0,0 +1,68 @@
+import os
+import opentimelineio
+
+import pyblish.api
+from ayon_core.pipeline import PublishValidationError
+
+
+class ValidateEditorialPackage(pyblish.api.InstancePlugin):
+ """Checks that published folder contains all resources from otio
+
+ Currently checks only by file names and expects flat structure.
+ It ignores path to resources in otio file as folder might be dragged in and
+ published from different location than it was created.
+ """
+
+ label = "Validate Editorial Package"
+ order = pyblish.api.ValidatorOrder - 0.49
+
+ hosts = ["traypublisher"]
+ families = ["editorial_pckg"]
+
+ def process(self, instance):
+ editorial_pckg_data = instance.data.get("editorial_pckg")
+ if not editorial_pckg_data:
+ raise PublishValidationError("Editorial package not collected")
+
+ folder_path = editorial_pckg_data["folder_path"]
+
+ otio_path = editorial_pckg_data["otio_path"]
+ if not otio_path:
+ raise PublishValidationError(
+ f"Folder {folder_path} missing otio file")
+
+ resource_paths = editorial_pckg_data["resource_paths"]
+
+ resource_file_names = {os.path.basename(path)
+ for path in resource_paths}
+
+ otio_data = opentimelineio.adapters.read_from_file(otio_path)
+
+ target_urls = self._get_all_target_urls(otio_data)
+ missing_files = set()
+ for target_url in target_urls:
+ target_basename = os.path.basename(target_url)
+ if target_basename not in resource_file_names:
+ missing_files.add(target_basename)
+
+ if missing_files:
+ raise PublishValidationError(
+ f"Otio file contains missing files `{missing_files}`.\n\n"
+ f"Please add them to `{folder_path}` and republish.")
+
+ instance.data["editorial_pckg"]["otio_data"] = otio_data
+
+ def _get_all_target_urls(self, otio_data):
+ target_urls = []
+
+ # Iterate through tracks, clips, or other elements
+ for track in otio_data.tracks:
+ for clip in track:
+ # Check if the clip has a media reference
+ if clip.media_reference is not None:
+ # Access the target_url from the media reference
+ target_url = clip.media_reference.target_url
+ if target_url:
+ target_urls.append(target_url)
+
+ return target_urls
diff --git a/client/ayon_core/lib/__init__.py b/client/ayon_core/lib/__init__.py
index e436396c6c..e25d3479ee 100644
--- a/client/ayon_core/lib/__init__.py
+++ b/client/ayon_core/lib/__init__.py
@@ -139,6 +139,7 @@ from .path_tools import (
)
from .ayon_info import (
+ is_in_ayon_launcher_process,
is_running_from_build,
is_using_ayon_console,
is_staging_enabled,
@@ -248,6 +249,7 @@ __all__ = [
"Logger",
+ "is_in_ayon_launcher_process",
"is_running_from_build",
"is_using_ayon_console",
"is_staging_enabled",
diff --git a/client/ayon_core/lib/ayon_info.py b/client/ayon_core/lib/ayon_info.py
index fc09a7c90c..c4333fab95 100644
--- a/client/ayon_core/lib/ayon_info.py
+++ b/client/ayon_core/lib/ayon_info.py
@@ -1,4 +1,5 @@
import os
+import sys
import json
import datetime
import platform
@@ -25,6 +26,18 @@ def get_ayon_launcher_version():
return content["__version__"]
+def is_in_ayon_launcher_process():
+ """Determine if current process is running from AYON launcher.
+
+ Returns:
+ bool: True if running from AYON launcher.
+
+ """
+ ayon_executable_path = os.path.normpath(os.environ["AYON_EXECUTABLE"])
+ executable_path = os.path.normpath(sys.executable)
+ return ayon_executable_path == executable_path
+
+
def is_running_from_build():
"""Determine if current process is running from build or code.
diff --git a/client/ayon_core/modules/deadline/__init__.py b/client/ayon_core/modules/deadline/__init__.py
index 5631e501d8..683d8dbe4a 100644
--- a/client/ayon_core/modules/deadline/__init__.py
+++ b/client/ayon_core/modules/deadline/__init__.py
@@ -1,6 +1,8 @@
from .deadline_module import DeadlineModule
+from .version import __version__
__all__ = (
"DeadlineModule",
+ "__version__"
)
diff --git a/client/ayon_core/modules/deadline/abstract_submit_deadline.py b/client/ayon_core/modules/deadline/abstract_submit_deadline.py
index 2e0518ae20..00e51100bc 100644
--- a/client/ayon_core/modules/deadline/abstract_submit_deadline.py
+++ b/client/ayon_core/modules/deadline/abstract_submit_deadline.py
@@ -49,6 +49,10 @@ def requests_post(*args, **kwargs):
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL",
True) else True # noqa
+
+ auth = kwargs.get("auth")
+ if auth:
+ kwargs["auth"] = tuple(auth) # explicit cast to tuple
# add 10sec timeout before bailing out
kwargs['timeout'] = 10
return requests.post(*args, **kwargs)
@@ -70,6 +74,9 @@ def requests_get(*args, **kwargs):
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL",
True) else True # noqa
+ auth = kwargs.get("auth")
+ if auth:
+ kwargs["auth"] = tuple(auth)
# add 10sec timeout before bailing out
kwargs['timeout'] = 10
return requests.get(*args, **kwargs)
@@ -434,9 +441,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
"""Plugin entry point."""
self._instance = instance
context = instance.context
- self._deadline_url = context.data.get("defaultDeadline")
- self._deadline_url = instance.data.get(
- "deadlineUrl", self._deadline_url)
+ self._deadline_url = instance.data["deadline"]["url"]
assert self._deadline_url, "Requires Deadline Webservice URL"
@@ -460,7 +465,8 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
self.plugin_info = self.get_plugin_info()
self.aux_files = self.get_aux_files()
- job_id = self.process_submission()
+ auth = instance.data["deadline"]["auth"]
+ job_id = self.process_submission(auth)
self.log.info("Submitted job to Deadline: {}.".format(job_id))
# TODO: Find a way that's more generic and not render type specific
@@ -473,10 +479,10 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
job_info=render_job_info,
plugin_info=render_plugin_info
)
- render_job_id = self.submit(payload)
+ render_job_id = self.submit(payload, auth)
self.log.info("Render job id: %s", render_job_id)
- def process_submission(self):
+ def process_submission(self, auth=None):
"""Process data for submission.
This takes Deadline JobInfo, PluginInfo, AuxFile, creates payload
@@ -487,7 +493,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
"""
payload = self.assemble_payload()
- return self.submit(payload)
+ return self.submit(payload, auth)
@abstractmethod
def get_job_info(self):
@@ -577,7 +583,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
"AuxFiles": aux_files or self.aux_files
}
- def submit(self, payload):
+ def submit(self, payload, auth):
"""Submit payload to Deadline API end-point.
This takes payload in the form of JSON file and POST it to
@@ -585,6 +591,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
Args:
payload (dict): dict to become json in deadline submission.
+ auth (tuple): (username, password)
Returns:
str: resulting Deadline job id.
@@ -594,7 +601,8 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
"""
url = "{}/api/jobs".format(self._deadline_url)
- response = requests_post(url, json=payload)
+ response = requests_post(url, json=payload,
+ auth=auth)
if not response.ok:
self.log.error("Submission failed!")
self.log.error(response.status_code)
diff --git a/client/ayon_core/modules/deadline/deadline_module.py b/client/ayon_core/modules/deadline/deadline_module.py
index c0ba83477e..b1089bbfe2 100644
--- a/client/ayon_core/modules/deadline/deadline_module.py
+++ b/client/ayon_core/modules/deadline/deadline_module.py
@@ -19,23 +19,23 @@ class DeadlineModule(AYONAddon, IPluginPaths):
def initialize(self, studio_settings):
# This module is always enabled
- deadline_urls = {}
+ deadline_servers_info = {}
enabled = self.name in studio_settings
if enabled:
deadline_settings = studio_settings[self.name]
- deadline_urls = {
- url_item["name"]: url_item["value"]
+ deadline_servers_info = {
+ url_item["name"]: url_item
for url_item in deadline_settings["deadline_urls"]
}
- if enabled and not deadline_urls:
+ if enabled and not deadline_servers_info:
enabled = False
self.log.warning((
"Deadline Webservice URLs are not specified. Disabling addon."
))
self.enabled = enabled
- self.deadline_urls = deadline_urls
+ self.deadline_servers_info = deadline_servers_info
def get_plugin_paths(self):
"""Deadline plugin paths."""
@@ -45,13 +45,15 @@ class DeadlineModule(AYONAddon, IPluginPaths):
}
@staticmethod
- def get_deadline_pools(webservice, log=None):
+ def get_deadline_pools(webservice, auth=None, log=None):
"""Get pools from Deadline.
Args:
webservice (str): Server url.
- log (Logger)
+ auth (Optional[Tuple[str, str]]): Tuple containing username,
+ password
+ log (Optional[Logger]): Logger to log errors to, if provided.
Returns:
- list: Pools.
+ List[str]: Pools.
Throws:
RuntimeError: If deadline webservice is unreachable.
@@ -63,7 +65,10 @@ class DeadlineModule(AYONAddon, IPluginPaths):
argument = "{}/api/pools?NamesOnly=true".format(webservice)
try:
- response = requests_get(argument)
+ kwargs = {}
+ if auth:
+ kwargs["auth"] = auth
+ response = requests_get(argument, **kwargs)
except requests.exceptions.ConnectionError as exc:
msg = 'Cannot connect to DL web service {}'.format(webservice)
log.error(msg)
diff --git a/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py
index ea4b7a213e..22022831a0 100644
--- a/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py
+++ b/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py
@@ -13,17 +13,45 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
"""Collect Deadline Webservice URL from instance."""
# Run before collect_render.
- order = pyblish.api.CollectorOrder + 0.005
+ order = pyblish.api.CollectorOrder + 0.225
label = "Deadline Webservice from the Instance"
- families = ["rendering", "renderlayer"]
- hosts = ["maya"]
+ targets = ["local"]
+ families = ["render",
+ "rendering",
+ "render.farm",
+ "renderFarm",
+ "renderlayer",
+ "maxrender",
+ "usdrender",
+ "redshift_rop",
+ "arnold_rop",
+ "mantra_rop",
+ "karma_rop",
+ "vray_rop",
+ "publish.hou",
+ "image"] # for Fusion
def process(self, instance):
- instance.data["deadlineUrl"] = self._collect_deadline_url(instance)
- instance.data["deadlineUrl"] = \
- instance.data["deadlineUrl"].strip().rstrip("/")
+ if not instance.data.get("farm"):
+ self.log.debug("Should not be processed on farm, skipping.")
+ return
+
+ if not instance.data.get("deadline"):
+ instance.data["deadline"] = {}
+
+ # todo: separate logic should be removed, all hosts should have same
+ host_name = instance.context.data["hostName"]
+ if host_name == "maya":
+ deadline_url = self._collect_deadline_url(instance)
+ else:
+ deadline_url = (instance.data.get("deadlineUrl") or # backwards
+ instance.data.get("deadline", {}).get("url"))
+ if deadline_url:
+ instance.data["deadline"]["url"] = deadline_url.strip().rstrip("/")
+ else:
+ instance.data["deadline"]["url"] = instance.context.data["deadline"]["defaultUrl"] # noqa
self.log.debug(
- "Using {} for submission.".format(instance.data["deadlineUrl"]))
+ "Using {} for submission".format(instance.data["deadline"]["url"]))
def _collect_deadline_url(self, render_instance):
# type: (pyblish.api.Instance) -> str
@@ -49,13 +77,13 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
["project_settings"]
["deadline"]
)
-
- default_server = render_instance.context.data["defaultDeadline"]
+ default_server_url = (render_instance.context.data["deadline"]
+ ["defaultUrl"])
# QUESTION How and where is this is set? Should be removed?
instance_server = render_instance.data.get("deadlineServers")
if not instance_server:
self.log.debug("Using default server.")
- return default_server
+ return default_server_url
# Get instance server as sting.
if isinstance(instance_server, int):
@@ -66,7 +94,7 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
default_servers = {
url_item["name"]: url_item["value"]
- for url_item in deadline_settings["deadline_urls"]
+ for url_item in deadline_settings["deadline_servers_info"]
}
project_servers = (
render_instance.context.data
diff --git a/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py b/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py
index b7ca227b01..9238e0ed95 100644
--- a/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py
+++ b/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py
@@ -18,10 +18,9 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
"""
# Run before collect_deadline_server_instance.
- order = pyblish.api.CollectorOrder + 0.0025
+ order = pyblish.api.CollectorOrder + 0.200
label = "Default Deadline Webservice"
-
- pass_mongo_url = False
+ targets = ["local"]
def process(self, context):
try:
@@ -33,15 +32,17 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
deadline_settings = context.data["project_settings"]["deadline"]
deadline_server_name = deadline_settings["deadline_server"]
- deadline_webservice = None
+ dl_server_info = None
if deadline_server_name:
- deadline_webservice = deadline_module.deadline_urls.get(
+ dl_server_info = deadline_module.deadline_servers_info.get(
deadline_server_name)
- default_deadline_webservice = deadline_module.deadline_urls["default"]
- deadline_webservice = (
- deadline_webservice
- or default_deadline_webservice
- )
+ if dl_server_info:
+ deadline_url = dl_server_info["value"]
+ else:
+ default_dl_server_info = deadline_module.deadline_servers_info[0]
+ deadline_url = default_dl_server_info["value"]
- context.data["defaultDeadline"] = deadline_webservice.strip().rstrip("/") # noqa
+ context.data["deadline"] = {}
+ context.data["deadline"]["defaultUrl"] = (
+ deadline_url.strip().rstrip("/"))
diff --git a/client/ayon_core/modules/deadline/plugins/publish/collect_user_credentials.py b/client/ayon_core/modules/deadline/plugins/publish/collect_user_credentials.py
new file mode 100644
index 0000000000..5d03523c89
--- /dev/null
+++ b/client/ayon_core/modules/deadline/plugins/publish/collect_user_credentials.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+"""Collect user credentials
+
+Requires:
+ context -> project_settings
+ instance.data["deadline"]["url"]
+
+Provides:
+ instance.data["deadline"] -> require_authentication (bool)
+ instance.data["deadline"] -> auth (tuple (str, str)) -
+ (username, password) or None
+"""
+import pyblish.api
+
+from ayon_api import get_server_api_connection
+from ayon_core.modules.deadline.deadline_module import DeadlineModule
+from ayon_core.modules.deadline import __version__
+
+
+class CollectDeadlineUserCredentials(pyblish.api.InstancePlugin):
+ """Collects user name and password for artist if DL requires authentication
+ """
+ order = pyblish.api.CollectorOrder + 0.250
+ label = "Collect Deadline User Credentials"
+
+ targets = ["local"]
+ hosts = ["aftereffects",
+ "blender",
+ "fusion",
+ "harmony",
+ "nuke",
+ "maya",
+ "max",
+ "houdini"]
+
+ families = ["render",
+ "rendering",
+ "render.farm",
+ "renderFarm",
+ "renderlayer",
+ "maxrender",
+ "usdrender",
+ "redshift_rop",
+ "arnold_rop",
+ "mantra_rop",
+ "karma_rop",
+ "vray_rop",
+ "publish.hou"]
+
+ def process(self, instance):
+ if not instance.data.get("farm"):
+ self.log.debug("Should not be processed on farm, skipping.")
+ return
+
+ collected_deadline_url = instance.data["deadline"]["url"]
+ if not collected_deadline_url:
+ raise ValueError("Instance doesn't have '[deadline][url]'.")
+ context_data = instance.context.data
+ deadline_settings = context_data["project_settings"]["deadline"]
+
+ deadline_server_name = None
+ # deadline url might be set directly from instance, need to find
+ # metadata for it
+ for deadline_info in deadline_settings["deadline_urls"]:
+ dl_settings_url = deadline_info["value"].strip().rstrip("/")
+ if dl_settings_url == collected_deadline_url:
+ deadline_server_name = deadline_info["name"]
+ break
+
+ if not deadline_server_name:
+ raise ValueError(f"Collected {collected_deadline_url} doesn't "
+ "match any site configured in Studio Settings")
+
+ instance.data["deadline"]["require_authentication"] = (
+ deadline_info["require_authentication"]
+ )
+ instance.data["deadline"]["auth"] = None
+
+ if not deadline_info["require_authentication"]:
+ return
+ # TODO import 'get_addon_site_settings' when available
+ # in public 'ayon_api'
+ local_settings = get_server_api_connection().get_addon_site_settings(
+ DeadlineModule.name, __version__)
+ local_settings = local_settings["local_settings"]
+ for server_info in local_settings:
+ if deadline_server_name == server_info["server_name"]:
+ instance.data["deadline"]["auth"] = (server_info["username"],
+ server_info["password"])
diff --git a/client/ayon_core/modules/deadline/plugins/publish/help/validate_deadline_connection.xml b/client/ayon_core/modules/deadline/plugins/publish/help/validate_deadline_connection.xml
new file mode 100644
index 0000000000..eec05df08a
--- /dev/null
+++ b/client/ayon_core/modules/deadline/plugins/publish/help/validate_deadline_connection.xml
@@ -0,0 +1,17 @@
+
+
+
+ Deadline Authentication
+
+## Deadline authentication is required
+
+This project has set in Settings that Deadline requires authentication.
+
+### How to repair?
+
+Please go to Ayon Server > Site Settings and provide your Deadline username and password.
+In some cases the password may be empty if Deadline is configured to allow that. Ask your administrator.
+
+
+
+
\ No newline at end of file
diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py
index ab342c1a9d..f5805beb5c 100644
--- a/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py
+++ b/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py
@@ -174,7 +174,8 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
instance.data["toBeRenderedOn"] = "deadline"
payload = self.assemble_payload()
- return self.submit(payload)
+ return self.submit(payload,
+ auth=instance.data["deadline"]["auth"])
def from_published_scene(self):
"""
diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py
index 1fae23c9b2..2220442dac 100644
--- a/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py
+++ b/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py
@@ -2,9 +2,10 @@ import os
import re
import json
import getpass
-import requests
import pyblish.api
+from openpype_modules.deadline.abstract_submit_deadline import requests_post
+
class CelactionSubmitDeadline(pyblish.api.InstancePlugin):
"""Submit CelAction2D scene to Deadline
@@ -30,11 +31,7 @@ class CelactionSubmitDeadline(pyblish.api.InstancePlugin):
context = instance.context
- # get default deadline webservice url from deadline module
- deadline_url = instance.context.data["defaultDeadline"]
- # if custom one is set in instance, use that
- if instance.data.get("deadlineUrl"):
- deadline_url = instance.data.get("deadlineUrl")
+ deadline_url = instance.data["deadline"]["url"]
assert deadline_url, "Requires Deadline Webservice URL"
self.deadline_url = "{}/api/jobs".format(deadline_url)
@@ -197,7 +194,8 @@ class CelactionSubmitDeadline(pyblish.api.InstancePlugin):
self.log.debug("__ expectedFiles: `{}`".format(
instance.data["expectedFiles"]))
- response = requests.post(self.deadline_url, json=payload)
+ response = requests_post(self.deadline_url, json=payload,
+ auth=instance.data["deadline"]["require_authentication"])
if not response.ok:
self.log.error(
diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py
index e3a4cd8030..e9b93a47cd 100644
--- a/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py
+++ b/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py
@@ -2,17 +2,13 @@ import os
import json
import getpass
-import requests
-
import pyblish.api
+from openpype_modules.deadline.abstract_submit_deadline import requests_post
from ayon_core.pipeline.publish import (
AYONPyblishPluginMixin
)
-from ayon_core.lib import (
- BoolDef,
- NumberDef,
-)
+from ayon_core.lib import NumberDef
class FusionSubmitDeadline(
@@ -64,11 +60,6 @@ class FusionSubmitDeadline(
decimals=0,
minimum=1,
maximum=10
- ),
- BoolDef(
- "suspend_publish",
- default=False,
- label="Suspend publish"
)
]
@@ -80,10 +71,6 @@ class FusionSubmitDeadline(
attribute_values = self.get_attr_values_from_data(
instance.data)
- # add suspend_publish attributeValue to instance data
- instance.data["suspend_publish"] = attribute_values[
- "suspend_publish"]
-
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)
@@ -94,11 +81,7 @@ class FusionSubmitDeadline(
from ayon_core.hosts.fusion.api.lib import get_frame_path
- # get default deadline webservice url from deadline module
- deadline_url = instance.context.data["defaultDeadline"]
- # if custom one is set in instance, use that
- if instance.data.get("deadlineUrl"):
- deadline_url = instance.data.get("deadlineUrl")
+ deadline_url = instance.data["deadline"]["url"]
assert deadline_url, "Requires Deadline Webservice URL"
# Collect all saver instances in context that are to be rendered
@@ -258,7 +241,8 @@ class FusionSubmitDeadline(
# E.g. http://192.168.0.1:8082/api/jobs
url = "{}/api/jobs".format(deadline_url)
- response = requests.post(url, json=payload)
+ auth = instance.data["deadline"]["auth"]
+ response = requests_post(url, json=payload, auth=auth)
if not response.ok:
raise Exception(response.text)
diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py
index 6952604293..590abc3f12 100644
--- a/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py
+++ b/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py
@@ -10,7 +10,6 @@ from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
from ayon_core.lib import (
is_in_tests,
- BoolDef,
TextDef,
NumberDef
)
@@ -86,15 +85,10 @@ class HoudiniSubmitDeadline(
priority = 50
chunk_size = 1
group = ""
-
+
@classmethod
def get_attribute_defs(cls):
return [
- BoolDef(
- "suspend_publish",
- default=False,
- label="Suspend publish"
- ),
NumberDef(
"priority",
label="Priority",
@@ -194,7 +188,7 @@ class HoudiniSubmitDeadline(
job_info.Pool = instance.data.get("primaryPool")
job_info.SecondaryPool = instance.data.get("secondaryPool")
-
+
if split_render_job and is_export_job:
job_info.Priority = attribute_values.get(
"export_priority", self.export_priority
@@ -315,6 +309,11 @@ class HoudiniSubmitDeadline(
return attr.asdict(plugin_info)
def process(self, instance):
+ if not instance.data["farm"]:
+ self.log.debug("Render on farm is disabled. "
+ "Skipping deadline submission.")
+ return
+
super(HoudiniSubmitDeadline, self).process(instance)
# TODO: Avoid the need for this logic here, needed for submit publish
diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py
index cba05f6948..e9f6c382c5 100644
--- a/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py
+++ b/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py
@@ -187,11 +187,13 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
payload_data, project_settings)
job_infos, plugin_infos = payload
for job_info, plugin_info in zip(job_infos, plugin_infos):
- self.submit(self.assemble_payload(job_info, plugin_info))
+ self.submit(self.assemble_payload(job_info, plugin_info),
+ instance.data["deadline"]["auth"])
else:
payload = self._use_published_name(payload_data, project_settings)
job_info, plugin_info = payload
- self.submit(self.assemble_payload(job_info, plugin_info))
+ self.submit(self.assemble_payload(job_info, plugin_info),
+ instance.data["deadline"]["auth"])
def _use_published_name(self, data, project_settings):
# Not all hosts can import these modules.
diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py
index 0300b12104..250dc8b7ea 100644
--- a/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py
+++ b/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py
@@ -292,7 +292,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
return plugin_payload
- def process_submission(self):
+ def process_submission(self, auth=None):
from maya import cmds
instance = self._instance
@@ -332,7 +332,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
if "vrayscene" in instance.data["families"]:
self.log.debug("Submitting V-Ray scene render..")
vray_export_payload = self._get_vray_export_payload(payload_data)
- export_job = self.submit(vray_export_payload)
+ export_job = self.submit(vray_export_payload,
+ instance.data["deadline"]["auth"])
payload = self._get_vray_render_payload(payload_data)
@@ -351,7 +352,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
else:
# Submit main render job
job_info, plugin_info = payload
- self.submit(self.assemble_payload(job_info, plugin_info))
+ self.submit(self.assemble_payload(job_info, plugin_info),
+ instance.data["deadline"]["auth"])
def _tile_render(self, payload):
"""Submit as tile render per frame with dependent assembly jobs."""
@@ -451,7 +453,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
# Submit frame tile jobs
frame_tile_job_id = {}
for frame, tile_job_payload in frame_payloads.items():
- job_id = self.submit(tile_job_payload)
+ job_id = self.submit(tile_job_payload,
+ instance.data["deadline"]["auth"])
frame_tile_job_id[frame] = job_id
# Define assembly payloads
@@ -559,7 +562,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
"submitting assembly job {} of {}".format(i + 1,
num_assemblies)
)
- assembly_job_id = self.submit(payload)
+ assembly_job_id = self.submit(payload,
+ instance.data["deadline"]["auth"])
assembly_job_ids.append(assembly_job_id)
instance.data["assemblySubmissionJobs"] = assembly_job_ids
diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py
index d70cb75bf3..ef744ae1e1 100644
--- a/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py
+++ b/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py
@@ -4,9 +4,9 @@ import json
import getpass
from datetime import datetime
-import requests
import pyblish.api
+from openpype_modules.deadline.abstract_submit_deadline import requests_post
from ayon_core.pipeline.publish import (
AYONPyblishPluginMixin
)
@@ -76,11 +76,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
default=cls.use_gpu,
label="Use GPU"
),
- BoolDef(
- "suspend_publish",
- default=False,
- label="Suspend publish"
- ),
BoolDef(
"workfile_dependency",
default=cls.workfile_dependency,
@@ -100,20 +95,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
instance.data["attributeValues"] = self.get_attr_values_from_data(
instance.data)
- # add suspend_publish attributeValue to instance data
- instance.data["suspend_publish"] = instance.data["attributeValues"][
- "suspend_publish"]
-
families = instance.data["families"]
node = instance.data["transientData"]["node"]
context = instance.context
- # get default deadline webservice url from deadline module
- deadline_url = instance.context.data["defaultDeadline"]
- # if custom one is set in instance, use that
- if instance.data.get("deadlineUrl"):
- deadline_url = instance.data.get("deadlineUrl")
+ deadline_url = instance.data["deadline"]["url"]
assert deadline_url, "Requires Deadline Webservice URL"
self.deadline_url = "{}/api/jobs".format(deadline_url)
@@ -436,7 +423,9 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
self.log.debug("__ expectedFiles: `{}`".format(
instance.data["expectedFiles"]))
- response = requests.post(self.deadline_url, json=payload, timeout=10)
+ auth = instance.data["deadline"]["auth"]
+ response = requests_post(self.deadline_url, json=payload, timeout=10,
+ auth=auth)
if not response.ok:
raise Exception(response.text)
diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py
index 4e4657d886..ce15eda9a0 100644
--- a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py
+++ b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py
@@ -5,10 +5,10 @@ import json
import re
from copy import deepcopy
-import requests
import ayon_api
import pyblish.api
+from openpype_modules.deadline.abstract_submit_deadline import requests_post
from ayon_core.pipeline import publish
from ayon_core.lib import EnumDef, is_in_tests
from ayon_core.pipeline.version_start import get_versioning_start
@@ -147,9 +147,6 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin,
instance_settings = self.get_attr_values_from_data(instance.data)
initial_status = instance_settings.get("publishJobState", "Active")
- # TODO: Remove this backwards compatibility of `suspend_publish`
- if instance.data.get("suspend_publish"):
- initial_status = "Suspended"
args = [
"--headless",
@@ -212,7 +209,9 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin,
self.log.debug("Submitting Deadline publish job ...")
url = "{}/api/jobs".format(self.deadline_url)
- response = requests.post(url, json=payload, timeout=10)
+ auth = instance.data["deadline"]["auth"]
+ response = requests_post(url, json=payload, timeout=10,
+ auth=auth)
if not response.ok:
raise Exception(response.text)
@@ -344,11 +343,7 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin,
deadline_publish_job_id = None
if submission_type == "deadline":
- # get default deadline webservice url from deadline module
- self.deadline_url = instance.context.data["defaultDeadline"]
- # if custom one is set in instance, use that
- if instance.data.get("deadlineUrl"):
- self.deadline_url = instance.data.get("deadlineUrl")
+ self.deadline_url = instance.data["deadline"]["url"]
assert self.deadline_url, "Requires Deadline Webservice URL"
deadline_publish_job_id = \
@@ -356,7 +351,9 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin,
# Inject deadline url to instances.
for inst in instances:
- inst["deadlineUrl"] = self.deadline_url
+ if "deadline" not in inst:
+ inst["deadline"] = {}
+ inst["deadline"] = instance.data["deadline"]
# publish job file
publish_job = {
diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py
index 8def9cc63c..0f505dce78 100644
--- a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py
+++ b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py
@@ -5,11 +5,11 @@ import json
import re
from copy import deepcopy
-import requests
import clique
import ayon_api
import pyblish.api
+from openpype_modules.deadline.abstract_submit_deadline import requests_post
from ayon_core.pipeline import publish
from ayon_core.lib import EnumDef, is_in_tests
from ayon_core.pipeline.version_start import get_versioning_start
@@ -88,9 +88,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
hosts = ["fusion", "max", "maya", "nuke", "houdini",
"celaction", "aftereffects", "harmony", "blender"]
- families = ["render.farm", "render.frames_farm",
- "prerender.farm", "prerender.frames_farm",
- "renderlayer", "imagesequence",
+ families = ["render", "render.farm", "render.frames_farm",
+ "prerender", "prerender.farm", "prerender.frames_farm",
+ "renderlayer", "imagesequence", "image",
"vrayscene", "maxrender",
"arnold_rop", "mantra_rop",
"karma_rop", "vray_rop",
@@ -224,9 +224,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
instance_settings = self.get_attr_values_from_data(instance.data)
initial_status = instance_settings.get("publishJobState", "Active")
- # TODO: Remove this backwards compatibility of `suspend_publish`
- if instance.data.get("suspend_publish"):
- initial_status = "Suspended"
args = [
"--headless",
@@ -306,7 +303,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
self.log.debug("Submitting Deadline publish job ...")
url = "{}/api/jobs".format(self.deadline_url)
- response = requests.post(url, json=payload, timeout=10)
+ auth = instance.data["deadline"]["auth"]
+ response = requests_post(url, json=payload, timeout=10,
+ auth=auth)
if not response.ok:
raise Exception(response.text)
@@ -314,7 +313,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
return deadline_publish_job_id
-
def process(self, instance):
# type: (pyblish.api.Instance) -> None
"""Process plugin.
@@ -461,18 +459,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
}
# get default deadline webservice url from deadline module
- self.deadline_url = instance.context.data["defaultDeadline"]
- # if custom one is set in instance, use that
- if instance.data.get("deadlineUrl"):
- self.deadline_url = instance.data.get("deadlineUrl")
+ self.deadline_url = instance.data["deadline"]["url"]
assert self.deadline_url, "Requires Deadline Webservice URL"
deadline_publish_job_id = \
self._submit_deadline_post_job(instance, render_job, instances)
- # Inject deadline url to instances.
+ # Inject deadline url to instances to query DL for job id for overrides
for inst in instances:
- inst["deadlineUrl"] = self.deadline_url
+ inst["deadline"] = instance.data["deadline"]
# publish job file
publish_job = {
diff --git a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py
index a7b300beff..8fffd47786 100644
--- a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py
+++ b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py
@@ -1,5 +1,7 @@
import pyblish.api
+from ayon_core.pipeline import PublishXmlValidationError
+
from openpype_modules.deadline.abstract_submit_deadline import requests_get
@@ -8,27 +10,42 @@ class ValidateDeadlineConnection(pyblish.api.InstancePlugin):
label = "Validate Deadline Web Service"
order = pyblish.api.ValidatorOrder
- hosts = ["maya", "nuke"]
- families = ["renderlayer", "render"]
+ hosts = ["maya", "nuke", "aftereffects", "harmony", "fusion"]
+ families = ["renderlayer", "render", "render.farm"]
# cache
responses = {}
def process(self, instance):
- # get default deadline webservice url from deadline module
- deadline_url = instance.context.data["defaultDeadline"]
- # if custom one is set in instance, use that
- if instance.data.get("deadlineUrl"):
- deadline_url = instance.data.get("deadlineUrl")
- self.log.debug(
- "We have deadline URL on instance {}".format(deadline_url)
- )
+ if not instance.data.get("farm"):
+ self.log.debug("Should not be processed on farm, skipping.")
+ return
+
+ deadline_url = instance.data["deadline"]["url"]
assert deadline_url, "Requires Deadline Webservice URL"
+ kwargs = {}
+ if instance.data["deadline"]["require_authentication"]:
+ auth = instance.data["deadline"]["auth"]
+ kwargs["auth"] = auth
+
+ if not auth[0]:
+ raise PublishXmlValidationError(
+ self,
+ "Deadline requires authentication. "
+ "At least username is required to be set in "
+ "Site Settings.")
+
if deadline_url not in self.responses:
- self.responses[deadline_url] = requests_get(deadline_url)
+ self.responses[deadline_url] = requests_get(deadline_url, **kwargs)
response = self.responses[deadline_url]
+ if response.status_code == 401:
+ raise PublishXmlValidationError(
+ self,
+ "Deadline requires authentication. "
+ "Provided credentials are not working. "
+ "Please change them in Site Settings")
assert response.ok, "Response must be ok"
assert response.text.startswith("Deadline Web Service "), (
"Web service did not respond with 'Deadline Web Service'"
diff --git a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py
index 2feb044cf1..2fb511bf51 100644
--- a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py
+++ b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py
@@ -37,8 +37,9 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin,
self.log.debug("Skipping local instance.")
return
- deadline_url = self.get_deadline_url(instance)
- pools = self.get_pools(deadline_url)
+ deadline_url = instance.data["deadline"]["url"]
+ pools = self.get_pools(deadline_url,
+ instance.data["deadline"].get("auth"))
invalid_pools = {}
primary_pool = instance.data.get("primaryPool")
@@ -61,22 +62,18 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin,
formatting_data={"pools_str": ", ".join(pools)}
)
- def get_deadline_url(self, instance):
- # get default deadline webservice url from deadline module
- deadline_url = instance.context.data["defaultDeadline"]
- if instance.data.get("deadlineUrl"):
- # if custom one is set in instance, use that
- deadline_url = instance.data.get("deadlineUrl")
- return deadline_url
-
- def get_pools(self, deadline_url):
+ def get_pools(self, deadline_url, auth):
if deadline_url not in self.pools_per_url:
self.log.debug(
"Querying available pools for Deadline url: {}".format(
deadline_url)
)
pools = DeadlineModule.get_deadline_pools(deadline_url,
+ auth=auth,
log=self.log)
+ # some DL return "none" as a pool name
+ if "none" not in pools:
+ pools.append("none")
self.log.info("Available pools: {}".format(pools))
self.pools_per_url[deadline_url] = pools
diff --git a/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py b/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py
index 6263526d5c..83e867408c 100644
--- a/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py
+++ b/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py
@@ -199,16 +199,16 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
(dict): Job info from Deadline
"""
- # get default deadline webservice url from deadline module
- deadline_url = instance.context.data["defaultDeadline"]
- # if custom one is set in instance, use that
- if instance.data.get("deadlineUrl"):
- deadline_url = instance.data.get("deadlineUrl")
+ deadline_url = instance.data["deadline"]["url"]
assert deadline_url, "Requires Deadline Webservice URL"
url = "{}/api/jobs?JobID={}".format(deadline_url, job_id)
try:
- response = requests_get(url)
+ kwargs = {}
+ auth = instance.data["deadline"]["auth"]
+ if auth:
+ kwargs["auth"] = auth
+ response = requests_get(url, **kwargs)
except requests.exceptions.ConnectionError:
self.log.error("Deadline is not accessible at "
"{}".format(deadline_url))
diff --git a/client/ayon_core/modules/deadline/version.py b/client/ayon_core/modules/deadline/version.py
new file mode 100644
index 0000000000..569b1212f7
--- /dev/null
+++ b/client/ayon_core/modules/deadline/version.py
@@ -0,0 +1 @@
+__version__ = "0.1.10"
diff --git a/client/ayon_core/modules/royalrender/api.py b/client/ayon_core/modules/royalrender/api.py
index a69f88c43c..ef715811c5 100644
--- a/client/ayon_core/modules/royalrender/api.py
+++ b/client/ayon_core/modules/royalrender/api.py
@@ -7,7 +7,7 @@ from ayon_core.lib import Logger, run_subprocess, AYONSettingsRegistry
from ayon_core.lib.vendor_bin_utils import find_tool_in_custom_paths
from .rr_job import SubmitFile
-from .rr_job import RRjob, SubmitterParameter # noqa F401
+from .rr_job import RRJob, SubmitterParameter # noqa F401
class Api:
diff --git a/client/ayon_core/pipeline/colorspace.py b/client/ayon_core/pipeline/colorspace.py
index efa3bbf968..239c187959 100644
--- a/client/ayon_core/pipeline/colorspace.py
+++ b/client/ayon_core/pipeline/colorspace.py
@@ -8,16 +8,20 @@ import tempfile
import warnings
from copy import deepcopy
+import ayon_api
+
from ayon_core import AYON_CORE_ROOT
from ayon_core.settings import get_project_settings
from ayon_core.lib import (
+ filter_profiles,
StringTemplate,
run_ayon_launcher_process,
- Logger
+ Logger,
)
-from ayon_core.pipeline import Anatomy
from ayon_core.lib.transcoding import VIDEO_EXTENSIONS, IMAGE_EXTENSIONS
-
+from ayon_core.pipeline import Anatomy
+from ayon_core.pipeline.template_data import get_template_data
+from ayon_core.pipeline.load import get_representation_path_with_anatomy
log = Logger.get_logger(__name__)
@@ -32,10 +36,6 @@ class CachedData:
}
-class DeprecatedWarning(DeprecationWarning):
- pass
-
-
def deprecated(new_destination):
"""Mark functions as deprecated.
@@ -60,13 +60,13 @@ def deprecated(new_destination):
@functools.wraps(decorated_func)
def wrapper(*args, **kwargs):
- warnings.simplefilter("always", DeprecatedWarning)
+ warnings.simplefilter("always", DeprecationWarning)
warnings.warn(
(
"Call to deprecated function '{}'"
"\nFunction was moved or removed.{}"
).format(decorated_func.__name__, warning_message),
- category=DeprecatedWarning,
+ category=DeprecationWarning,
stacklevel=4
)
return decorated_func(*args, **kwargs)
@@ -81,28 +81,54 @@ def deprecated(new_destination):
def _make_temp_json_file():
"""Wrapping function for json temp file
"""
+ temporary_json_file = None
try:
# Store dumped json to temporary file
- temporary_json_file = tempfile.NamedTemporaryFile(
+ with tempfile.NamedTemporaryFile(
mode="w", suffix=".json", delete=False
- )
- temporary_json_file.close()
- temporary_json_filepath = temporary_json_file.name.replace(
- "\\", "/"
- )
+ ) as tmpfile:
+ temporary_json_filepath = tmpfile.name.replace("\\", "/")
yield temporary_json_filepath
- except IOError as _error:
+ except IOError as exc:
raise IOError(
- "Unable to create temp json file: {}".format(
- _error
- )
+ "Unable to create temp json file: {}".format(exc)
)
finally:
# Remove the temporary json
- os.remove(temporary_json_filepath)
+ if temporary_json_file is not None:
+ os.remove(temporary_json_filepath)
+
+
+def has_compatible_ocio_package():
+ """Current process has available compatible 'PyOpenColorIO'.
+
+ Returns:
+ bool: True if compatible package is available.
+
+ """
+ if CachedData.has_compatible_ocio_package is not None:
+ return CachedData.has_compatible_ocio_package
+
+ is_compatible = False
+ try:
+ import PyOpenColorIO
+
+ # Check if PyOpenColorIO is compatible
+ # - version 2.0.0 or higher is required
+ # NOTE version 1 does not have '__version__' attribute
+ if hasattr(PyOpenColorIO, "__version__"):
+ version_parts = PyOpenColorIO.__version__.split(".")
+ major = int(version_parts[0])
+ is_compatible = (major, ) >= (2, )
+ except ImportError:
+ pass
+
+ CachedData.has_compatible_ocio_package = is_compatible
+ # compatible
+ return CachedData.has_compatible_ocio_package
def get_ocio_config_script_path():
@@ -110,53 +136,58 @@ def get_ocio_config_script_path():
Returns:
str: path string
+
"""
- return os.path.normpath(
- os.path.join(
- AYON_CORE_ROOT,
- "scripts",
- "ocio_wrapper.py"
- )
+ return os.path.join(
+ os.path.normpath(AYON_CORE_ROOT),
+ "scripts",
+ "ocio_wrapper.py"
)
def get_colorspace_name_from_filepath(
- filepath, host_name, project_name,
- config_data=None, file_rules=None,
+ filepath,
+ host_name,
+ project_name,
+ config_data,
+ file_rules=None,
project_settings=None,
validate=True
):
"""Get colorspace name from filepath
Args:
- filepath (str): path string, file rule pattern is tested on it
- host_name (str): host name
- project_name (str): project name
- config_data (Optional[dict]): config path and template in dict.
- Defaults to None.
- file_rules (Optional[dict]): file rule data from settings.
- Defaults to None.
- project_settings (Optional[dict]): project settings. Defaults to None.
+ filepath (str): Path string, file rule pattern is tested on it.
+ host_name (str): Host name.
+ project_name (str): Project name.
+ config_data (dict): Config path and template in dict.
+ file_rules (Optional[dict]): File rule data from settings.
+ project_settings (Optional[dict]): Project settings.
validate (Optional[bool]): should resulting colorspace be validated
- with config file? Defaults to True.
+ with config file? Defaults to True.
Returns:
- str: name of colorspace
- """
- project_settings, config_data, file_rules = _get_context_settings(
- host_name, project_name,
- config_data=config_data, file_rules=file_rules,
- project_settings=project_settings
- )
+ Union[str, None]: name of colorspace
+ """
if not config_data:
# in case global or host color management is not enabled
return None
+ if file_rules is None:
+ if project_settings is None:
+ project_settings = get_project_settings(project_name)
+ file_rules = get_imageio_file_rules(
+ project_name, host_name, project_settings
+ )
+
# use ImageIO file rules
colorspace_name = get_imageio_file_rules_colorspace_from_filepath(
- filepath, host_name, project_name,
- config_data=config_data, file_rules=file_rules,
+ filepath,
+ host_name,
+ project_name,
+ config_data=config_data,
+ file_rules=file_rules,
project_settings=project_settings
)
@@ -182,47 +213,18 @@ def get_colorspace_name_from_filepath(
# validate matching colorspace with config
if validate:
validate_imageio_colorspace_in_config(
- config_data["path"], colorspace_name)
+ config_data["path"], colorspace_name
+ )
return colorspace_name
-# TODO: remove this in future - backward compatibility
-@deprecated("get_imageio_file_rules_colorspace_from_filepath")
-def get_imageio_colorspace_from_filepath(*args, **kwargs):
- return get_imageio_file_rules_colorspace_from_filepath(*args, **kwargs)
-
-# TODO: remove this in future - backward compatibility
-@deprecated("get_imageio_file_rules_colorspace_from_filepath")
-def get_colorspace_from_filepath(*args, **kwargs):
- return get_imageio_file_rules_colorspace_from_filepath(*args, **kwargs)
-
-
-def _get_context_settings(
- host_name, project_name,
- config_data=None, file_rules=None,
- project_settings=None
-):
- project_settings = project_settings or get_project_settings(
- project_name
- )
-
- config_data = config_data or get_imageio_config(
- project_name, host_name, project_settings)
-
- # in case host color management is not enabled
- if not config_data:
- return (None, None, None)
-
- file_rules = file_rules or get_imageio_file_rules(
- project_name, host_name, project_settings)
-
- return project_settings, config_data, file_rules
-
-
def get_imageio_file_rules_colorspace_from_filepath(
- filepath, host_name, project_name,
- config_data=None, file_rules=None,
+ filepath,
+ host_name,
+ project_name,
+ config_data,
+ file_rules=None,
project_settings=None
):
"""Get colorspace name from filepath
@@ -230,28 +232,28 @@ def get_imageio_file_rules_colorspace_from_filepath(
ImageIO Settings file rules are tested for matching rule.
Args:
- filepath (str): path string, file rule pattern is tested on it
- host_name (str): host name
- project_name (str): project name
- config_data (Optional[dict]): config path and template in dict.
- Defaults to None.
- file_rules (Optional[dict]): file rule data from settings.
- Defaults to None.
- project_settings (Optional[dict]): project settings. Defaults to None.
+ filepath (str): Path string, file rule pattern is tested on it.
+ host_name (str): Host name.
+ project_name (str): Project name.
+ config_data (dict): Config path and template in dict.
+ file_rules (Optional[dict]): File rule data from settings.
+ project_settings (Optional[dict]): Project settings.
Returns:
- str: name of colorspace
- """
- project_settings, config_data, file_rules = _get_context_settings(
- host_name, project_name,
- config_data=config_data, file_rules=file_rules,
- project_settings=project_settings
- )
+ Union[str, None]: Name of colorspace.
+ """
if not config_data:
# in case global or host color management is not enabled
return None
+ if file_rules is None:
+ if project_settings is None:
+ project_settings = get_project_settings(project_name)
+ file_rules = get_imageio_file_rules(
+ project_name, host_name, project_settings
+ )
+
# match file rule from path
colorspace_name = None
for file_rule in file_rules:
@@ -282,26 +284,48 @@ def get_config_file_rules_colorspace_from_filepath(config_path, filepath):
Returns:
Union[str, None]: matching colorspace name
+
"""
- if not compatibility_check():
- # python environment is not compatible with PyOpenColorIO
- # needs to be run in subprocess
+ if has_compatible_ocio_package():
+ result_data = _get_config_file_rules_colorspace_from_filepath(
+ config_path, filepath
+ )
+ else:
result_data = _get_wrapped_with_subprocess(
- "colorspace", "get_config_file_rules_colorspace_from_filepath",
+ "get_config_file_rules_colorspace_from_filepath",
config_path=config_path,
filepath=filepath
)
- if result_data:
- return result_data[0]
-
- # TODO: refactor this so it is not imported but part of this file
- from ayon_core.scripts.ocio_wrapper import _get_config_file_rules_colorspace_from_filepath # noqa: E501
-
- result_data = _get_config_file_rules_colorspace_from_filepath(
- config_path, filepath)
if result_data:
return result_data[0]
+ return None
+
+
+def get_config_version_data(config_path):
+ """Return major and minor version info.
+
+ Args:
+ config_path (str): path string leading to config.ocio
+
+ Raises:
+ IOError: Input config does not exist.
+
+ Returns:
+ dict: minor and major keys with values
+
+ """
+ if config_path not in CachedData.config_version_data:
+ if has_compatible_ocio_package():
+ version_data = _get_config_version_data(config_path)
+ else:
+ version_data = _get_wrapped_with_subprocess(
+ "get_config_version_data",
+ config_path=config_path
+ )
+ CachedData.config_version_data[config_path] = version_data
+
+ return deepcopy(CachedData.config_version_data[config_path])
def parse_colorspace_from_filepath(
@@ -344,10 +368,10 @@ def parse_colorspace_from_filepath(
pattern = "|".join(
# Allow to match spaces also as underscores because the
# integrator replaces spaces with underscores in filenames
- re.escape(colorspace) for colorspace in
+ re.escape(colorspace)
# Sort by longest first so the regex matches longer matches
# over smaller matches, e.g. matching 'Output - sRGB' over 'sRGB'
- sorted(colorspaces, key=len, reverse=True)
+ for colorspace in sorted(colorspaces, key=len, reverse=True)
)
return re.compile(pattern)
@@ -395,6 +419,7 @@ def validate_imageio_colorspace_in_config(config_path, colorspace_name):
Returns:
bool: True if exists
+
"""
colorspaces = get_ocio_config_colorspaces(config_path)["colorspaces"]
if colorspace_name not in colorspaces:
@@ -405,28 +430,10 @@ def validate_imageio_colorspace_in_config(config_path, colorspace_name):
return True
-# TODO: remove this in future - backward compatibility
-@deprecated("_get_wrapped_with_subprocess")
-def get_data_subprocess(config_path, data_type):
- """[Deprecated] Get data via subprocess
-
- Wrapper for Python 2 hosts.
+def _get_wrapped_with_subprocess(command, **kwargs):
+ """Get data via subprocess.
Args:
- config_path (str): path leading to config.ocio file
- """
- return _get_wrapped_with_subprocess(
- "config", data_type, in_path=config_path,
- )
-
-
-def _get_wrapped_with_subprocess(command_group, command, **kwargs):
- """Get data via subprocess
-
- Wrapper for Python 2 hosts.
-
- Args:
- command_group (str): command group name
command (str): command name
**kwargs: command arguments
@@ -436,14 +443,15 @@ def _get_wrapped_with_subprocess(command_group, command, **kwargs):
with _make_temp_json_file() as tmp_json_path:
# Prepare subprocess arguments
args = [
- "run", get_ocio_config_script_path(),
- command_group, command
+ "run",
+ get_ocio_config_script_path(),
+ command
]
- for key_, value_ in kwargs.items():
- args.extend(("--{}".format(key_), value_))
+ for key, value in kwargs.items():
+ args.extend(("--{}".format(key), value))
- args.append("--out_path")
+ args.append("--output_path")
args.append(tmp_json_path)
log.info("Executing: {}".format(" ".join(args)))
@@ -451,55 +459,23 @@ def _get_wrapped_with_subprocess(command_group, command, **kwargs):
run_ayon_launcher_process(*args, logger=log)
# return all colorspaces
- with open(tmp_json_path, "r") as f_:
- return json.load(f_)
+ with open(tmp_json_path, "r") as stream:
+ return json.load(stream)
-# TODO: this should be part of ocio_wrapper.py
-def compatibility_check():
- """Making sure PyOpenColorIO is importable"""
- if CachedData.has_compatible_ocio_package is not None:
- return CachedData.has_compatible_ocio_package
-
- try:
- import PyOpenColorIO # noqa: F401
- CachedData.has_compatible_ocio_package = True
- except ImportError:
- CachedData.has_compatible_ocio_package = False
-
- # compatible
- return CachedData.has_compatible_ocio_package
-
-
-# TODO: this should be part of ocio_wrapper.py
def compatibility_check_config_version(config_path, major=1, minor=None):
"""Making sure PyOpenColorIO config version is compatible"""
- if not CachedData.config_version_data.get(config_path):
- if compatibility_check():
- # TODO: refactor this so it is not imported but part of this file
- from ayon_core.scripts.ocio_wrapper import _get_version_data
-
- CachedData.config_version_data[config_path] = \
- _get_version_data(config_path)
-
- else:
- # python environment is not compatible with PyOpenColorIO
- # needs to be run in subprocess
- CachedData.config_version_data[config_path] = \
- _get_wrapped_with_subprocess(
- "config", "get_version", config_path=config_path
- )
+ version_data = get_config_version_data(config_path)
# check major version
- if CachedData.config_version_data[config_path]["major"] != major:
+ if version_data["major"] != major:
return False
# check minor version
- if minor and CachedData.config_version_data[config_path]["minor"] != minor:
+ if minor is not None and version_data["minor"] != minor:
return False
- # compatible
return True
@@ -514,23 +490,19 @@ def get_ocio_config_colorspaces(config_path):
Returns:
dict: colorspace and family in couple
+
"""
- if not CachedData.ocio_config_colorspaces.get(config_path):
- if not compatibility_check():
- # python environment is not compatible with PyOpenColorIO
- # needs to be run in subprocess
- CachedData.ocio_config_colorspaces[config_path] = \
- _get_wrapped_with_subprocess(
- "config", "get_colorspace", in_path=config_path
- )
+ if config_path not in CachedData.ocio_config_colorspaces:
+ if has_compatible_ocio_package():
+ config_colorspaces = _get_ocio_config_colorspaces(config_path)
else:
- # TODO: refactor this so it is not imported but part of this file
- from ayon_core.scripts.ocio_wrapper import _get_colorspace_data
+ config_colorspaces = _get_wrapped_with_subprocess(
+ "get_ocio_config_colorspaces",
+ config_path=config_path
+ )
+ CachedData.ocio_config_colorspaces[config_path] = config_colorspaces
- CachedData.ocio_config_colorspaces[config_path] = \
- _get_colorspace_data(config_path)
-
- return CachedData.ocio_config_colorspaces[config_path]
+ return deepcopy(CachedData.ocio_config_colorspaces[config_path])
def convert_colorspace_enumerator_item(
@@ -540,11 +512,12 @@ def convert_colorspace_enumerator_item(
"""Convert colorspace enumerator item to dictionary
Args:
- colorspace_item (str): colorspace and family in couple
- config_items (dict[str,dict]): colorspace data
+ colorspace_enum_item (str): Colorspace and family in couple.
+ config_items (dict[str,dict]): Colorspace data.
Returns:
dict: colorspace data
+
"""
if "::" not in colorspace_enum_item:
return None
@@ -603,16 +576,18 @@ def get_colorspaces_enumerator_items(
Families can be used for building menu and submenus in gui.
Args:
- config_items (dict[str,dict]): colorspace data coming from
- `get_ocio_config_colorspaces` function
- include_aliases (bool): include aliases in result
- include_looks (bool): include looks in result
- include_roles (bool): include roles in result
+ config_items (dict[str,dict]): Colorspace data coming from
+ `get_ocio_config_colorspaces` function.
+ include_aliases (Optional[bool]): Include aliases in result.
+ include_looks (Optional[bool]): Include looks in result.
+ include_roles (Optional[bool]): Include roles in result.
+ include_display_views (Optional[bool]): Include display views
+ in result.
Returns:
- list[tuple[str,str]]: colorspace and family in couple
+ list[tuple[str, str]]: Colorspace and family in couples.
+
"""
- labeled_colorspaces = []
aliases = set()
colorspaces = set()
looks = set()
@@ -622,86 +597,86 @@ def get_colorspaces_enumerator_items(
if items_type == "colorspaces":
for color_name, color_data in colorspace_items.items():
if color_data.get("aliases"):
- aliases.update([
+ aliases.update({
(
"aliases::{}".format(alias_name),
"[alias] {} ({})".format(alias_name, color_name)
)
for alias_name in color_data["aliases"]
- ])
+ })
colorspaces.add((
"{}::{}".format(items_type, color_name),
"[colorspace] {}".format(color_name)
))
elif items_type == "looks":
- looks.update([
+ looks.update({
(
"{}::{}".format(items_type, name),
"[look] {} ({})".format(name, role_data["process_space"])
)
for name, role_data in colorspace_items.items()
- ])
+ })
elif items_type == "displays_views":
- display_views.update([
+ display_views.update({
(
"{}::{}".format(items_type, name),
"[view (display)] {}".format(name)
)
for name, _ in colorspace_items.items()
- ])
+ })
elif items_type == "roles":
- roles.update([
+ roles.update({
(
"{}::{}".format(items_type, name),
"[role] {} ({})".format(name, role_data["colorspace"])
)
for name, role_data in colorspace_items.items()
- ])
+ })
- if roles and include_roles:
- roles = sorted(roles, key=lambda x: x[0])
- labeled_colorspaces.extend(roles)
+ def _sort_key_getter(item):
+ """Use colorspace for sorting.
- # add colorspaces as second so it is not first in menu
- colorspaces = sorted(colorspaces, key=lambda x: x[0])
- labeled_colorspaces.extend(colorspaces)
+ Args:
+ item (tuple[str, str]): Item with colorspace and label.
- if aliases and include_aliases:
- aliases = sorted(aliases, key=lambda x: x[0])
- labeled_colorspaces.extend(aliases)
+ Returns:
+ str: Colorspace.
- if looks and include_looks:
- looks = sorted(looks, key=lambda x: x[0])
- labeled_colorspaces.extend(looks)
+ """
+ return item[0]
- if display_views and include_display_views:
- display_views = sorted(display_views, key=lambda x: x[0])
- labeled_colorspaces.extend(display_views)
+ labeled_colorspaces = []
+ if include_roles:
+ labeled_colorspaces.extend(
+ sorted(roles, key=_sort_key_getter)
+ )
+
+ # Add colorspaces after roles, so it is not first in menu
+ labeled_colorspaces.extend(
+ sorted(colorspaces, key=_sort_key_getter)
+ )
+
+ if include_aliases:
+ labeled_colorspaces.extend(
+ sorted(aliases, key=_sort_key_getter)
+ )
+
+ if include_looks:
+ labeled_colorspaces.extend(
+ sorted(looks, key=_sort_key_getter)
+ )
+
+ if include_display_views:
+ labeled_colorspaces.extend(
+ sorted(display_views, key=_sort_key_getter)
+ )
return labeled_colorspaces
-# TODO: remove this in future - backward compatibility
-@deprecated("_get_wrapped_with_subprocess")
-def get_colorspace_data_subprocess(config_path):
- """[Deprecated] Get colorspace data via subprocess
-
- Wrapper for Python 2 hosts.
-
- Args:
- config_path (str): path leading to config.ocio file
-
- Returns:
- dict: colorspace and family in couple
- """
- return _get_wrapped_with_subprocess(
- "config", "get_colorspace", in_path=config_path
- )
-
-
def get_ocio_config_views(config_path):
"""Get all viewer data
@@ -713,212 +688,346 @@ def get_ocio_config_views(config_path):
Returns:
dict: `display/viewer` and viewer data
+
"""
- if not compatibility_check():
- # python environment is not compatible with PyOpenColorIO
- # needs to be run in subprocess
- return _get_wrapped_with_subprocess(
- "config", "get_views", in_path=config_path
- )
+ if has_compatible_ocio_package():
+ return _get_ocio_config_views(config_path)
- # TODO: refactor this so it is not imported but part of this file
- from ayon_core.scripts.ocio_wrapper import _get_views_data
-
- return _get_views_data(config_path)
-
-
-# TODO: remove this in future - backward compatibility
-@deprecated("_get_wrapped_with_subprocess")
-def get_views_data_subprocess(config_path):
- """[Deprecated] Get viewers data via subprocess
-
- Wrapper for Python 2 hosts.
-
- Args:
- config_path (str): path leading to config.ocio file
-
- Returns:
- dict: `display/viewer` and viewer data
- """
return _get_wrapped_with_subprocess(
- "config", "get_views", in_path=config_path
+ "get_ocio_config_views",
+ config_path=config_path
)
-def get_imageio_config(
+def _get_global_config_data(
project_name,
host_name,
- project_settings=None,
- anatomy_data=None,
+ anatomy,
+ template_data,
+ imageio_global,
+ folder_id,
+ log,
+):
+ """Get global config data.
+
+ Global config from core settings is using profiles that are based on
+ host name, task name and task type. The filtered profile can define 3
+ types of config sources:
+ 1. AYON ocio addon configs.
+ 2. Custom path to ocio config.
+ 3. Path to 'ocioconfig' representation on product. Name of product can be
+ defined in settings. Product name can be regex but exact match is
+ always preferred.
+
+ None is returned when no profile is found, when path
+
+ Args:
+ project_name (str): Project name.
+ host_name (str): Host name.
+ anatomy (Anatomy): Project anatomy object.
+ template_data (dict[str, Any]): Template data.
+ imageio_global (dict[str, Any]): Core imagio settings.
+ folder_id (Union[dict[str, Any], None]): Folder id.
+ log (logging.Logger): Logger object.
+
+ Returns:
+ Union[dict[str, str], None]: Config data with path and template
+ or None.
+
+ """
+ task_name = task_type = None
+ task_data = template_data.get("task")
+ if task_data:
+ task_name = task_data["name"]
+ task_type = task_data["type"]
+
+ filter_values = {
+ "task_names": task_name,
+ "task_types": task_type,
+ "host_names": host_name,
+ }
+ profile = filter_profiles(
+ imageio_global["ocio_config_profiles"], filter_values
+ )
+ if profile is None:
+ log.info(f"No config profile matched filters {str(filter_values)}")
+ return None
+
+ profile_type = profile["type"]
+ if profile_type in ("builtin_path", "custom_path"):
+ template = profile[profile_type]
+ result = StringTemplate.format_strict_template(
+ template, template_data
+ )
+ normalized_path = str(result.normalized())
+ if not os.path.exists(normalized_path):
+ log.warning(f"Path was not found '{normalized_path}'.")
+ return None
+
+ return {
+ "path": normalized_path,
+ "template": template
+ }
+
+ # TODO decide if this is the right name for representation
+ repre_name = "ocioconfig"
+
+ folder_info = template_data.get("folder")
+ if not folder_info:
+ log.warning("Folder info is missing.")
+ return None
+ folder_path = folder_info["path"]
+
+ product_name = profile["product_name"]
+ if folder_id is None:
+ folder_entity = ayon_api.get_folder_by_path(
+ project_name, folder_path, fields={"id"}
+ )
+ if not folder_entity:
+ log.warning(f"Folder entity '{folder_path}' was not found..")
+ return None
+ folder_id = folder_entity["id"]
+
+ product_entities_by_name = {
+ product_entity["name"]: product_entity
+ for product_entity in ayon_api.get_products(
+ project_name,
+ folder_ids={folder_id},
+ product_name_regex=product_name,
+ fields={"id", "name"}
+ )
+ }
+ if not product_entities_by_name:
+ log.debug(
+ f"No product entities were found for folder '{folder_path}' with"
+ f" product name filter '{product_name}'."
+ )
+ return None
+
+ # Try to use exact match first, otherwise use first available product
+ product_entity = product_entities_by_name.get(product_name)
+ if product_entity is None:
+ product_entity = next(iter(product_entities_by_name.values()))
+
+ product_name = product_entity["name"]
+ # Find last product version
+ version_entity = ayon_api.get_last_version_by_product_id(
+ project_name,
+ product_id=product_entity["id"],
+ fields={"id"}
+ )
+ if not version_entity:
+ log.info(
+ f"Product '{product_name}' does not have available any versions."
+ )
+ return None
+
+ # Find 'ocioconfig' representation entity
+ repre_entity = ayon_api.get_representation_by_name(
+ project_name,
+ representation_name=repre_name,
+ version_id=version_entity["id"],
+ )
+ if not repre_entity:
+ log.debug(
+ f"Representation '{repre_name}'"
+ f" not found on product '{product_name}'."
+ )
+ return None
+
+ path = get_representation_path_with_anatomy(repre_entity, anatomy)
+ template = repre_entity["attrib"]["template"]
+ return {
+ "path": path,
+ "template": template,
+ }
+
+
+def get_imageio_config_preset(
+ project_name,
+ folder_path,
+ task_name,
+ host_name,
anatomy=None,
- env=None
+ project_settings=None,
+ template_data=None,
+ env=None,
+ folder_id=None,
):
"""Returns config data from settings
- Config path is formatted in `path` key
- and original settings input is saved into `template` key.
+ Output contains 'path' key and 'template' key holds its template.
+
+ Template data can be prepared with 'get_template_data'.
Args:
- project_name (str): project name
- host_name (str): host name
+ project_name (str): Project name.
+ folder_path (str): Folder path.
+ task_name (str): Task name.
+ host_name (str): Host name.
+ anatomy (Optional[Anatomy]): Project anatomy object.
project_settings (Optional[dict]): Project settings.
- anatomy_data (Optional[dict]): anatomy formatting data.
- anatomy (Optional[Anatomy]): Anatomy object.
- env (Optional[dict]): Environment variables.
+ template_data (Optional[dict]): Template data used for
+ template formatting.
+ env (Optional[dict]): Environment variables. Environments are used
+ for template formatting too. Values from 'os.environ' are used
+ when not provided.
+ folder_id (Optional[str]): Folder id. Is used only when config path
+ is received from published representation. Is autofilled when
+ not provided.
Returns:
dict: config path data or empty dict
+
"""
- project_settings = project_settings or get_project_settings(project_name)
- anatomy = anatomy or Anatomy(project_name)
-
- if not anatomy_data:
- from ayon_core.pipeline.context_tools import (
- get_current_context_template_data)
- anatomy_data = get_current_context_template_data()
-
- formatting_data = deepcopy(anatomy_data)
-
- # Add project roots to anatomy data
- formatting_data["root"] = anatomy.roots
- formatting_data["platform"] = platform.system().lower()
+ if not project_settings:
+ project_settings = get_project_settings(project_name)
# Get colorspace settings
imageio_global, imageio_host = _get_imageio_settings(
- project_settings, host_name)
+ project_settings, host_name
+ )
+ # Global color management must be enabled to be able to use host settings
+ if not imageio_global["activate_global_color_management"]:
+ log.info("Colorspace management is disabled globally.")
+ return {}
# Host 'ocio_config' is optional
host_ocio_config = imageio_host.get("ocio_config") or {}
-
- # Global color management must be enabled to be able to use host settings
- activate_color_management = imageio_global.get(
- "activate_global_color_management")
- # TODO: remove this in future - backward compatibility
- # For already saved overrides from previous version look for 'enabled'
- # on host settings.
- if activate_color_management is None:
- activate_color_management = host_ocio_config.get("enabled", False)
-
- if not activate_color_management:
- # if global settings are disabled return empty dict because
- # it is expected that no colorspace management is needed
- log.info("Colorspace management is disabled globally.")
- return {}
+ # TODO remove
+ # - backward compatibility when host settings had only 'enabled' flag
+ # the flag was split into 'activate_global_color_management'
+ # and 'override_global_config'
+ host_ocio_config_enabled = host_ocio_config.get("enabled", False)
# Check if host settings group is having 'activate_host_color_management'
# - if it does not have activation key then default it to True so it uses
# global settings
- # This is for backward compatibility.
- # TODO: in future rewrite this to be more explicit
activate_host_color_management = imageio_host.get(
- "activate_host_color_management")
-
- # TODO: remove this in future - backward compatibility
+ "activate_host_color_management"
+ )
if activate_host_color_management is None:
- activate_host_color_management = host_ocio_config.get("enabled", False)
+ activate_host_color_management = host_ocio_config_enabled
if not activate_host_color_management:
# if host settings are disabled return False because
# it is expected that no colorspace management is needed
log.info(
- "Colorspace management for host '{}' is disabled.".format(
- host_name)
+ f"Colorspace management for host '{host_name}' is disabled."
)
return {}
- # get config path from either global or host settings
- # depending on override flag
+ project_entity = None
+ if anatomy is None:
+ project_entity = ayon_api.get_project(project_name)
+ anatomy = Anatomy(project_name, project_entity)
+
+ if env is None:
+ env = dict(os.environ.items())
+
+ if template_data:
+ template_data = deepcopy(template_data)
+ else:
+ if not project_entity:
+ project_entity = ayon_api.get_project(project_name)
+
+ folder_entity = task_entity = folder_id = None
+ if folder_path:
+ folder_entity = ayon_api.get_folder_by_path(
+ project_name, folder_path
+ )
+ folder_id = folder_entity["id"]
+
+ if folder_id and task_name:
+ task_entity = ayon_api.get_task_by_name(
+ project_name, folder_id, task_name
+ )
+ template_data = get_template_data(
+ project_entity,
+ folder_entity,
+ task_entity,
+ host_name,
+ project_settings,
+ )
+
+ # Add project roots to anatomy data
+ template_data["root"] = anatomy.roots
+ template_data["platform"] = platform.system().lower()
+
+ # Add environment variables to template data
+ template_data.update(env)
+
+ # Get config path from core or host settings
+ # - based on override flag in host settings
# TODO: in future rewrite this to be more explicit
override_global_config = host_ocio_config.get("override_global_config")
if override_global_config is None:
- # for already saved overrides from previous version
- # TODO: remove this in future - backward compatibility
- override_global_config = host_ocio_config.get("enabled")
+ override_global_config = host_ocio_config_enabled
- if override_global_config:
- config_data = _get_config_data(
- host_ocio_config["filepath"], formatting_data, env
+ if not override_global_config:
+ config_data = _get_global_config_data(
+ project_name,
+ host_name,
+ anatomy,
+ template_data,
+ imageio_global,
+ folder_id,
+ log,
)
else:
- # get config path from global
- config_global = imageio_global["ocio_config"]
- config_data = _get_config_data(
- config_global["filepath"], formatting_data, env
+ config_data = _get_host_config_data(
+ host_ocio_config["filepath"], template_data
)
if not config_data:
raise FileExistsError(
- "No OCIO config found in settings. It is "
- "either missing or there is typo in path inputs"
+ "No OCIO config found in settings. It is"
+ " either missing or there is typo in path inputs"
)
return config_data
-def _get_config_data(path_list, anatomy_data, env=None):
+def _get_host_config_data(templates, template_data):
"""Return first existing path in path list.
- If template is used in path inputs,
- then it is formatted by anatomy data
- and environment variables
+ Use template data to fill possible formatting in paths.
Args:
- path_list (list[str]): list of abs paths
- anatomy_data (dict): formatting data
- env (Optional[dict]): Environment variables.
+ templates (list[str]): List of templates to config paths.
+ template_data (dict): Template data used to format templates.
Returns:
- dict: config data
+ Union[dict, None]: Config data or 'None' if templates are empty
+ or any path exists.
+
"""
- formatting_data = deepcopy(anatomy_data)
-
- environment_vars = env or dict(**os.environ)
-
- # format the path for potential env vars
- formatting_data.update(environment_vars)
-
- # first try host config paths
- for path_ in path_list:
- formatted_path = _format_path(path_, formatting_data)
-
- if not os.path.exists(formatted_path):
+ for template in templates:
+ formatted_path = StringTemplate.format_template(
+ template, template_data
+ )
+ if not formatted_path.solved:
continue
- return {
- "path": os.path.normpath(formatted_path),
- "template": path_
- }
-
-
-def _format_path(template_path, formatting_data):
- """Single template path formatting.
-
- Args:
- template_path (str): template string
- formatting_data (dict): data to be used for
- template formatting
-
- Returns:
- str: absolute formatted path
- """
- # format path for anatomy keys
- formatted_path = StringTemplate(template_path).format(
- formatting_data)
-
- return os.path.abspath(formatted_path)
+ path = os.path.abspath(formatted_path)
+ if os.path.exists(path):
+ return {
+ "path": os.path.normpath(path),
+ "template": template
+ }
def get_imageio_file_rules(project_name, host_name, project_settings=None):
"""Get ImageIO File rules from project settings
Args:
- project_name (str): project name
- host_name (str): host name
- project_settings (dict, optional): project settings.
- Defaults to None.
+ project_name (str): Project name.
+ host_name (str): Host name.
+ project_settings (Optional[dict]): Project settings.
Returns:
list[dict[str, Any]]: file rules data
+
"""
project_settings = project_settings or get_project_settings(project_name)
@@ -960,7 +1069,7 @@ def get_remapped_colorspace_to_native(
"""Return native colorspace name.
Args:
- ocio_colorspace_name (str | None): ocio colorspace name
+ ocio_colorspace_name (str | None): OCIO colorspace name.
host_name (str): Host name.
imageio_host_settings (dict[str, Any]): ImageIO host settings.
@@ -968,16 +1077,15 @@ def get_remapped_colorspace_to_native(
Union[str, None]: native colorspace name defined in remapping or None
"""
- CachedData.remapping.setdefault(host_name, {})
- if CachedData.remapping[host_name].get("to_native") is None:
+ host_mapping = CachedData.remapping.setdefault(host_name, {})
+ if "to_native" not in host_mapping:
remapping_rules = imageio_host_settings["remapping"]["rules"]
- CachedData.remapping[host_name]["to_native"] = {
+ host_mapping["to_native"] = {
rule["ocio_name"]: rule["host_native_name"]
for rule in remapping_rules
}
- return CachedData.remapping[host_name]["to_native"].get(
- ocio_colorspace_name)
+ return host_mapping["to_native"].get(ocio_colorspace_name)
def get_remapped_colorspace_from_native(
@@ -992,30 +1100,29 @@ def get_remapped_colorspace_from_native(
Returns:
Union[str, None]: Ocio colorspace name defined in remapping or None.
- """
- CachedData.remapping.setdefault(host_name, {})
- if CachedData.remapping[host_name].get("from_native") is None:
+ """
+ host_mapping = CachedData.remapping.setdefault(host_name, {})
+ if "from_native" not in host_mapping:
remapping_rules = imageio_host_settings["remapping"]["rules"]
- CachedData.remapping[host_name]["from_native"] = {
+ host_mapping["from_native"] = {
rule["host_native_name"]: rule["ocio_name"]
for rule in remapping_rules
}
- return CachedData.remapping[host_name]["from_native"].get(
- host_native_colorspace_name)
+ return host_mapping["from_native"].get(host_native_colorspace_name)
def _get_imageio_settings(project_settings, host_name):
"""Get ImageIO settings for global and host
Args:
- project_settings (dict): project settings.
- Defaults to None.
- host_name (str): host name
+ project_settings (dict[str, Any]): Project settings.
+ host_name (str): Host name.
Returns:
- tuple[dict, dict]: image io settings for global and host
+ tuple[dict, dict]: Image io settings for global and host.
+
"""
# get image io from global and host_name
imageio_global = project_settings["core"]["imageio"]
@@ -1033,27 +1140,41 @@ def get_colorspace_settings_from_publish_context(context_data):
Returns:
tuple | bool: config, file rules or None
+
"""
if "imageioSettings" in context_data and context_data["imageioSettings"]:
return context_data["imageioSettings"]
project_name = context_data["projectName"]
+ folder_path = context_data["folderPath"]
+ task_name = context_data["task"]
host_name = context_data["hostName"]
- anatomy_data = context_data["anatomyData"]
- project_settings_ = context_data["project_settings"]
+ anatomy = context_data["anatomy"]
+ template_data = context_data["anatomyData"]
+ project_settings = context_data["project_settings"]
+ folder_id = None
+ folder_entity = context_data.get("folderEntity")
+ if folder_entity:
+ folder_id = folder_entity["id"]
- config_data = get_imageio_config(
- project_name, host_name,
- project_settings=project_settings_,
- anatomy_data=anatomy_data
+ config_data = get_imageio_config_preset(
+ project_name,
+ folder_path,
+ task_name,
+ host_name,
+ anatomy=anatomy,
+ project_settings=project_settings,
+ template_data=template_data,
+ folder_id=folder_id,
)
# caching invalid state, so it's not recalculated all the time
file_rules = None
if config_data:
file_rules = get_imageio_file_rules(
- project_name, host_name,
- project_settings=project_settings_
+ project_name,
+ host_name,
+ project_settings=project_settings
)
# caching settings for future instance processing
@@ -1063,18 +1184,13 @@ def get_colorspace_settings_from_publish_context(context_data):
def set_colorspace_data_to_representation(
- representation, context_data,
+ representation,
+ context_data,
colorspace=None,
log=None
):
"""Sets colorspace data to representation.
- Args:
- representation (dict): publishing representation
- context_data (publish.Context.data): publishing context data
- colorspace (str, optional): colorspace name. Defaults to None.
- log (logging.Logger, optional): logger instance. Defaults to None.
-
Example:
```
{
@@ -1089,6 +1205,12 @@ def set_colorspace_data_to_representation(
}
```
+ Args:
+ representation (dict): publishing representation
+ context_data (publish.Context.data): publishing context data
+ colorspace (Optional[str]): Colorspace name.
+ log (Optional[logging.Logger]): logger instance.
+
"""
log = log or Logger.get_logger(__name__)
@@ -1122,12 +1244,15 @@ def set_colorspace_data_to_representation(
filename = filename[0]
# get matching colorspace from rules
- colorspace = colorspace or get_imageio_colorspace_from_filepath(
- filename, host_name, project_name,
- config_data=config_data,
- file_rules=file_rules,
- project_settings=project_settings
- )
+ if colorspace is None:
+ colorspace = get_imageio_file_rules_colorspace_from_filepath(
+ filename,
+ host_name,
+ project_name,
+ config_data=config_data,
+ file_rules=file_rules,
+ project_settings=project_settings
+ )
# infuse data to representation
if colorspace:
@@ -1149,47 +1274,330 @@ def get_display_view_colorspace_name(config_path, display, view):
view (str): view name e.g. "sRGB"
Returns:
- view color space name (str) e.g. "Output - sRGB"
+ str: View color space name. e.g. "Output - sRGB"
+
"""
-
- if not compatibility_check():
- # python environment is not compatible with PyOpenColorIO
- # needs to be run in subprocess
- return get_display_view_colorspace_subprocess(config_path,
- display, view)
-
- from ayon_core.scripts.ocio_wrapper import _get_display_view_colorspace_name # noqa
-
- return _get_display_view_colorspace_name(config_path, display, view)
+ if has_compatible_ocio_package():
+ return _get_display_view_colorspace_name(
+ config_path, display, view
+ )
+ return _get_wrapped_with_subprocess(
+ "get_display_view_colorspace_name",
+ config_path=config_path,
+ display=display,
+ view=view
+ )
-def get_display_view_colorspace_subprocess(config_path, display, view):
- """Returns the colorspace attribute of the (display, view) pair
- via subprocess.
+# --- Implementation of logic using 'PyOpenColorIO' ---
+def _get_ocio_config(config_path):
+ """Helper function to create OCIO config object.
+
+ Args:
+ config_path (str): Path to config.
+
+ Returns:
+ PyOpenColorIO.Config: OCIO config for the confing path.
+
+ """
+ import PyOpenColorIO
+
+ config_path = os.path.abspath(config_path)
+
+ if not os.path.isfile(config_path):
+ raise IOError("Input path should be `config.ocio` file")
+
+ return PyOpenColorIO.Config.CreateFromFile(config_path)
+
+
+def _get_config_file_rules_colorspace_from_filepath(config_path, filepath):
+ """Return found colorspace data found in v2 file rules.
+
+ Args:
+ config_path (str): path string leading to config.ocio
+ filepath (str): path string leading to v2 file rules
+
+ Raises:
+ IOError: Input config does not exist.
+
+ Returns:
+ dict: aggregated available colorspaces
+
+ """
+ config = _get_ocio_config(config_path)
+
+ # TODO: use `parseColorSpaceFromString` instead if ocio v1
+ return config.getColorSpaceFromFilepath(str(filepath))
+
+
+def _get_config_version_data(config_path):
+ """Return major and minor version info.
+
+ Args:
+ config_path (str): path string leading to config.ocio
+
+ Raises:
+ IOError: Input config does not exist.
+
+ Returns:
+ dict: minor and major keys with values
+
+ """
+ config = _get_ocio_config(config_path)
+
+ return {
+ "major": config.getMajorVersion(),
+ "minor": config.getMinorVersion()
+ }
+
+
+def _get_display_view_colorspace_name(config_path, display, view):
+ """Returns the colorspace attribute of the (display, view) pair.
Args:
config_path (str): path string leading to config.ocio
display (str): display name e.g. "ACES"
view (str): view name e.g. "sRGB"
+ Raises:
+ IOError: Input config does not exist.
+
Returns:
- view color space name (str) e.g. "Output - sRGB"
+ str: view color space name e.g. "Output - sRGB"
+
+ """
+ config = _get_ocio_config(config_path)
+ return config.getDisplayViewColorSpaceName(display, view)
+
+
+def _get_ocio_config_colorspaces(config_path):
+ """Return all found colorspace data.
+
+ Args:
+ config_path (str): path string leading to config.ocio
+
+ Raises:
+ IOError: Input config does not exist.
+
+ Returns:
+ dict: aggregated available colorspaces
+
+ """
+ config = _get_ocio_config(config_path)
+
+ colorspace_data = {
+ "roles": {},
+ "colorspaces": {
+ color.getName(): {
+ "family": color.getFamily(),
+ "categories": list(color.getCategories()),
+ "aliases": list(color.getAliases()),
+ "equalitygroup": color.getEqualityGroup(),
+ }
+ for color in config.getColorSpaces()
+ },
+ "displays_views": {
+ f"{view} ({display})": {
+ "display": display,
+ "view": view
+
+ }
+ for display in config.getDisplays()
+ for view in config.getViews(display)
+ },
+ "looks": {}
+ }
+
+ # add looks
+ looks = config.getLooks()
+ if looks:
+ colorspace_data["looks"] = {
+ look.getName(): {"process_space": look.getProcessSpace()}
+ for look in looks
+ }
+
+ # add roles
+ roles = config.getRoles()
+ if roles:
+ colorspace_data["roles"] = {
+ role: {"colorspace": colorspace}
+ for (role, colorspace) in roles
+ }
+
+ return colorspace_data
+
+
+def _get_ocio_config_views(config_path):
+ """Return all found viewer data.
+
+ Args:
+ config_path (str): path string leading to config.ocio
+
+ Raises:
+ IOError: Input config does not exist.
+
+ Returns:
+ dict: aggregated available viewers
+
+ """
+ config = _get_ocio_config(config_path)
+
+ output = {}
+ for display in config.getDisplays():
+ for view in config.getViews(display):
+ colorspace = config.getDisplayViewColorSpaceName(display, view)
+ # Special token. See https://opencolorio.readthedocs.io/en/latest/guides/authoring/authoring.html#shared-views # noqa
+ if colorspace == "":
+ colorspace = display
+
+ output[f"{display}/{view}"] = {
+ "display": display,
+ "view": view,
+ "colorspace": colorspace
+ }
+
+ return output
+
+
+# --- Current context functions ---
+def get_current_context_imageio_config_preset(
+ anatomy=None,
+ project_settings=None,
+ template_data=None,
+ env=None,
+):
+ """Get ImageIO config preset for current context.
+
+ Args:
+ anatomy (Optional[Anatomy]): Current project anatomy.
+ project_settings (Optional[dict[str, Any]]): Current project settings.
+ template_data (Optional[dict[str, Any]]): Prepared template data
+ for current context.
+ env (Optional[dict[str, str]]): Custom environment variable values.
+
+ Returns:
+ dict: ImageIO config preset.
+
+ """
+ from .context_tools import get_current_context, get_current_host_name
+
+ context = get_current_context()
+ host_name = get_current_host_name()
+ return get_imageio_config_preset(
+ context["project_name"],
+ context["folder_path"],
+ context["task_name"],
+ host_name,
+ anatomy=anatomy,
+ project_settings=project_settings,
+ template_data=template_data,
+ env=env,
+ )
+
+
+# --- Deprecated functions ---
+@deprecated("has_compatible_ocio_package")
+def compatibility_check():
+ """Making sure PyOpenColorIO is importable
+
+ Deprecated:
+ Deprecated since '0.3.2'. Use `has_compatible_ocio_package` instead.
"""
- with _make_temp_json_file() as tmp_json_path:
- # Prepare subprocess arguments
- args = [
- "run", get_ocio_config_script_path(),
- "config", "get_display_view_colorspace_name",
- "--in_path", config_path,
- "--out_path", tmp_json_path,
- "--display", display,
- "--view", view
- ]
- log.debug("Executing: {}".format(" ".join(args)))
+ return has_compatible_ocio_package()
- run_ayon_launcher_process(*args, logger=log)
- # return default view colorspace name
- with open(tmp_json_path, "r") as f:
- return json.load(f)
+@deprecated("get_imageio_file_rules_colorspace_from_filepath")
+def get_imageio_colorspace_from_filepath(*args, **kwargs):
+ return get_imageio_file_rules_colorspace_from_filepath(*args, **kwargs)
+
+
+@deprecated("get_imageio_file_rules_colorspace_from_filepath")
+def get_colorspace_from_filepath(*args, **kwargs):
+ return get_imageio_file_rules_colorspace_from_filepath(*args, **kwargs)
+
+
+@deprecated("_get_wrapped_with_subprocess")
+def get_colorspace_data_subprocess(config_path):
+ """[Deprecated] Get colorspace data via subprocess
+
+ Deprecated:
+ Deprecated since OpenPype. Use `_get_wrapped_with_subprocess` instead.
+
+ Args:
+ config_path (str): path leading to config.ocio file
+
+ Returns:
+ dict: colorspace and family in couple
+ """
+ return _get_wrapped_with_subprocess(
+ "get_ocio_config_colorspaces",
+ config_path=config_path
+ )
+
+
+@deprecated("_get_wrapped_with_subprocess")
+def get_views_data_subprocess(config_path):
+ """[Deprecated] Get viewers data via subprocess
+
+ Deprecated:
+ Deprecated since OpenPype. Use `_get_wrapped_with_subprocess` instead.
+
+ Args:
+ config_path (str): path leading to config.ocio file
+
+ Returns:
+ dict: `display/viewer` and viewer data
+
+ """
+ return _get_wrapped_with_subprocess(
+ "get_ocio_config_views",
+ config_path=config_path
+ )
+
+
+@deprecated("get_imageio_config_preset")
+def get_imageio_config(
+ project_name,
+ host_name,
+ project_settings=None,
+ anatomy_data=None,
+ anatomy=None,
+ env=None
+):
+ """Returns config data from settings
+
+ Config path is formatted in `path` key
+ and original settings input is saved into `template` key.
+
+ Deprecated:
+ Deprecated since '0.3.1' . Use `get_imageio_config_preset` instead.
+
+ Args:
+ project_name (str): project name
+ host_name (str): host name
+ project_settings (Optional[dict]): Project settings.
+ anatomy_data (Optional[dict]): anatomy formatting data.
+ anatomy (Optional[Anatomy]): Anatomy object.
+ env (Optional[dict]): Environment variables.
+
+ Returns:
+ dict: config path data or empty dict
+
+ """
+ if not anatomy_data:
+ from .context_tools import get_current_context_template_data
+ anatomy_data = get_current_context_template_data()
+
+ task_name = anatomy_data.get("task", {}).get("name")
+ folder_path = anatomy_data.get("folder", {}).get("path")
+ return get_imageio_config_preset(
+ project_name,
+ folder_path,
+ task_name,
+ host_name,
+ anatomy=anatomy,
+ project_settings=project_settings,
+ template_data=anatomy_data,
+ env=env,
+ )
diff --git a/client/ayon_core/pipeline/context_tools.py b/client/ayon_core/pipeline/context_tools.py
index 33567d7280..c32d04c44c 100644
--- a/client/ayon_core/pipeline/context_tools.py
+++ b/client/ayon_core/pipeline/context_tools.py
@@ -459,36 +459,6 @@ def is_representation_from_latest(representation):
)
-def get_template_data_from_session(session=None, settings=None):
- """Template data for template fill from session keys.
-
- Args:
- session (Union[Dict[str, str], None]): The Session to use. If not
- provided use the currently active global Session.
- settings (Optional[Dict[str, Any]]): Prepared studio or project
- settings.
-
- Returns:
- Dict[str, Any]: All available data from session.
- """
-
- if session is not None:
- project_name = session["AYON_PROJECT_NAME"]
- folder_path = session["AYON_FOLDER_PATH"]
- task_name = session["AYON_TASK_NAME"]
- host_name = session["AYON_HOST_NAME"]
- else:
- context = get_current_context()
- project_name = context["project_name"]
- folder_path = context["folder_path"]
- task_name = context["task_name"]
- host_name = get_current_host_name()
-
- return get_template_data_with_names(
- project_name, folder_path, task_name, host_name, settings
- )
-
-
def get_current_context_template_data(settings=None):
"""Prepare template data for current context.
diff --git a/client/ayon_core/pipeline/create/context.py b/client/ayon_core/pipeline/create/context.py
index b8618738fb..7615ce6aee 100644
--- a/client/ayon_core/pipeline/create/context.py
+++ b/client/ayon_core/pipeline/create/context.py
@@ -1987,12 +1987,12 @@ class CreateContext:
"Folder '{}' was not found".format(folder_path)
)
- task_name = None
if task_entity is None:
- task_name = self.get_current_task_name()
- task_entity = ayon_api.get_task_by_name(
- project_name, folder_entity["id"], task_name
- )
+ current_task_name = self.get_current_task_name()
+ if current_task_name:
+ task_entity = ayon_api.get_task_by_name(
+ project_name, folder_entity["id"], current_task_name
+ )
if pre_create_data is None:
pre_create_data = {}
@@ -2018,7 +2018,7 @@ class CreateContext:
instance_data = {
"folderPath": folder_entity["path"],
- "task": task_name,
+ "task": task_entity["name"] if task_entity else None,
"productType": creator.product_type,
"variant": variant
}
@@ -2053,7 +2053,7 @@ class CreateContext:
exc_info = sys.exc_info()
self.log.warning(error_message.format(identifier, exc_info[1]))
- except:
+ except: # noqa: E722
add_traceback = True
exc_info = sys.exc_info()
self.log.warning(
@@ -2163,7 +2163,7 @@ class CreateContext:
exc_info = sys.exc_info()
self.log.warning(error_message.format(identifier, exc_info[1]))
- except:
+ except: # noqa: E722
failed = True
add_traceback = True
exc_info = sys.exc_info()
@@ -2197,7 +2197,7 @@ class CreateContext:
try:
convertor.find_instances()
- except:
+ except: # noqa: E722
failed_info.append(
prepare_failed_convertor_operation_info(
convertor.identifier, sys.exc_info()
@@ -2373,7 +2373,7 @@ class CreateContext:
exc_info = sys.exc_info()
self.log.warning(error_message.format(identifier, exc_info[1]))
- except:
+ except: # noqa: E722
failed = True
add_traceback = True
exc_info = sys.exc_info()
@@ -2440,7 +2440,7 @@ class CreateContext:
error_message.format(identifier, exc_info[1])
)
- except:
+ except: # noqa: E722
failed = True
add_traceback = True
exc_info = sys.exc_info()
@@ -2546,7 +2546,7 @@ class CreateContext:
try:
self.run_convertor(convertor_identifier)
- except:
+ except: # noqa: E722
failed_info.append(
prepare_failed_convertor_operation_info(
convertor_identifier, sys.exc_info()
diff --git a/client/ayon_core/pipeline/publish/abstract_collect_render.py b/client/ayon_core/pipeline/publish/abstract_collect_render.py
index c50dc16380..17cab876b6 100644
--- a/client/ayon_core/pipeline/publish/abstract_collect_render.py
+++ b/client/ayon_core/pipeline/publish/abstract_collect_render.py
@@ -80,6 +80,7 @@ class RenderInstance(object):
anatomyData = attr.ib(default=None)
outputDir = attr.ib(default=None)
context = attr.ib(default=None)
+ deadline = attr.ib(default=None)
# The source instance the data of this render instance should merge into
source_instance = attr.ib(default=None, type=pyblish.api.Instance)
@@ -215,13 +216,12 @@ class AbstractCollectRender(pyblish.api.ContextPlugin):
# add additional data
data = self.add_additional_data(data)
- render_instance_dict = attr.asdict(render_instance)
- # Merge into source instance if provided, otherwise create instance
- instance = render_instance_dict.pop("source_instance", None)
+ instance = render_instance.source_instance
if instance is None:
instance = context.create_instance(render_instance.name)
+ render_instance_dict = attr.asdict(render_instance)
instance.data.update(render_instance_dict)
instance.data.update(data)
diff --git a/client/ayon_core/pipeline/template_data.py b/client/ayon_core/pipeline/template_data.py
index 526c7d35c5..d5f06d6a59 100644
--- a/client/ayon_core/pipeline/template_data.py
+++ b/client/ayon_core/pipeline/template_data.py
@@ -73,8 +73,8 @@ def get_folder_template_data(folder_entity, project_name):
- 'parent' - direct parent name, project name used if is under
project
- Required document fields:
- Folder: 'path' -> Plan to require: 'folderType'
+ Required entity fields:
+ Folder: 'path', 'folderType'
Args:
folder_entity (Dict[str, Any]): Folder entity.
@@ -101,6 +101,8 @@ def get_folder_template_data(folder_entity, project_name):
return {
"folder": {
"name": folder_name,
+ "type": folder_entity["folderType"],
+ "path": path,
},
"asset": folder_name,
"hierarchy": hierarchy,
diff --git a/client/ayon_core/pipeline/thumbnails.py b/client/ayon_core/pipeline/thumbnails.py
new file mode 100644
index 0000000000..dbb38615d8
--- /dev/null
+++ b/client/ayon_core/pipeline/thumbnails.py
@@ -0,0 +1,263 @@
+import os
+import time
+import collections
+
+import ayon_api
+
+from ayon_core.lib.local_settings import get_ayon_appdirs
+
+
+FileInfo = collections.namedtuple(
+ "FileInfo",
+ ("path", "size", "modification_time")
+)
+
+
+class ThumbnailsCache:
+ """Cache of thumbnails on local storage.
+
+ Thumbnails are cached to appdirs to predefined directory. Each project has
+ own subfolder with thumbnails -> that's because each project has own
+ thumbnail id validation and file names are thumbnail ids with matching
+ extension. Extensions are predefined (.png and .jpeg).
+
+ Cache has cleanup mechanism which is triggered on initialized by default.
+
+ The cleanup has 2 levels:
+ 1. soft cleanup which remove all files that are older then 'days_alive'
+ 2. max size cleanup which remove all files until the thumbnails folder
+ contains less then 'max_filesize'
+ - this is time consuming so it's not triggered automatically
+
+ Args:
+ cleanup (bool): Trigger soft cleanup (Cleanup expired thumbnails).
+ """
+
+ # Lifetime of thumbnails (in seconds)
+ # - default 3 days
+ days_alive = 3
+ # Max size of thumbnail directory (in bytes)
+ # - default 2 Gb
+ max_filesize = 2 * 1024 * 1024 * 1024
+
+ def __init__(self, cleanup=True):
+ self._thumbnails_dir = None
+ self._days_alive_secs = self.days_alive * 24 * 60 * 60
+ if cleanup:
+ self.cleanup()
+
+ def get_thumbnails_dir(self):
+ """Root directory where thumbnails are stored.
+
+ Returns:
+ str: Path to thumbnails root.
+ """
+
+ if self._thumbnails_dir is None:
+ self._thumbnails_dir = get_ayon_appdirs("thumbnails")
+ return self._thumbnails_dir
+
+ thumbnails_dir = property(get_thumbnails_dir)
+
+ def get_thumbnails_dir_file_info(self):
+ """Get information about all files in thumbnails directory.
+
+ Returns:
+ List[FileInfo]: List of file information about all files.
+ """
+
+ thumbnails_dir = self.thumbnails_dir
+ files_info = []
+ if not os.path.exists(thumbnails_dir):
+ return files_info
+
+ for root, _, filenames in os.walk(thumbnails_dir):
+ for filename in filenames:
+ path = os.path.join(root, filename)
+ files_info.append(FileInfo(
+ path, os.path.getsize(path), os.path.getmtime(path)
+ ))
+ return files_info
+
+ def get_thumbnails_dir_size(self, files_info=None):
+ """Got full size of thumbnail directory.
+
+ Args:
+ files_info (List[FileInfo]): Prepared file information about
+ files in thumbnail directory.
+
+ Returns:
+ int: File size of all files in thumbnail directory.
+ """
+
+ if files_info is None:
+ files_info = self.get_thumbnails_dir_file_info()
+
+ if not files_info:
+ return 0
+
+ return sum(
+ file_info.size
+ for file_info in files_info
+ )
+
+ def cleanup(self, check_max_size=False):
+ """Cleanup thumbnails directory.
+
+ Args:
+ check_max_size (bool): Also cleanup files to match max size of
+ thumbnails directory.
+ """
+
+ thumbnails_dir = self.get_thumbnails_dir()
+ # Skip if thumbnails dir does not exist yet
+ if not os.path.exists(thumbnails_dir):
+ return
+
+ self._soft_cleanup(thumbnails_dir)
+ if check_max_size:
+ self._max_size_cleanup(thumbnails_dir)
+
+ def _soft_cleanup(self, thumbnails_dir):
+ current_time = time.time()
+ for root, _, filenames in os.walk(thumbnails_dir):
+ for filename in filenames:
+ path = os.path.join(root, filename)
+ modification_time = os.path.getmtime(path)
+ if current_time - modification_time > self._days_alive_secs:
+ os.remove(path)
+
+ def _max_size_cleanup(self, thumbnails_dir):
+ files_info = self.get_thumbnails_dir_file_info()
+ size = self.get_thumbnails_dir_size(files_info)
+ if size < self.max_filesize:
+ return
+
+ sorted_file_info = collections.deque(
+ sorted(files_info, key=lambda item: item.modification_time)
+ )
+ diff = size - self.max_filesize
+ while diff > 0:
+ if not sorted_file_info:
+ break
+
+ file_info = sorted_file_info.popleft()
+ diff -= file_info.size
+ os.remove(file_info.path)
+
+ def get_thumbnail_filepath(self, project_name, thumbnail_id):
+ """Get thumbnail by thumbnail id.
+
+ Args:
+ project_name (str): Name of project.
+ thumbnail_id (str): Thumbnail id.
+
+ Returns:
+ Union[str, None]: Path to thumbnail image or None if thumbnail
+ is not cached yet.
+ """
+
+ if not thumbnail_id:
+ return None
+
+ for ext in (
+ ".png",
+ ".jpeg",
+ ):
+ filepath = os.path.join(
+ self.thumbnails_dir, project_name, thumbnail_id + ext
+ )
+ if os.path.exists(filepath):
+ return filepath
+ return None
+
+ def get_project_dir(self, project_name):
+ """Path to root directory for specific project.
+
+ Args:
+ project_name (str): Name of project for which root directory path
+ should be returned.
+
+ Returns:
+ str: Path to root of project's thumbnails.
+ """
+
+ return os.path.join(self.thumbnails_dir, project_name)
+
+ def make_sure_project_dir_exists(self, project_name):
+ project_dir = self.get_project_dir(project_name)
+ if not os.path.exists(project_dir):
+ os.makedirs(project_dir)
+ return project_dir
+
+ def store_thumbnail(self, project_name, thumbnail_id, content, mime_type):
+ """Store thumbnail to cache folder.
+
+ Args:
+ project_name (str): Project where the thumbnail belong to.
+ thumbnail_id (str): Thumbnail id.
+ content (bytes): Byte content of thumbnail file.
+ mime_type (str): Type of content.
+
+ Returns:
+ str: Path to cached thumbnail image file.
+ """
+
+ if mime_type == "image/png":
+ ext = ".png"
+ elif mime_type == "image/jpeg":
+ ext = ".jpeg"
+ else:
+ raise ValueError(
+ "Unknown mime type for thumbnail \"{}\"".format(mime_type))
+
+ project_dir = self.make_sure_project_dir_exists(project_name)
+ thumbnail_path = os.path.join(project_dir, thumbnail_id + ext)
+ with open(thumbnail_path, "wb") as stream:
+ stream.write(content)
+
+ current_time = time.time()
+ os.utime(thumbnail_path, (current_time, current_time))
+
+ return thumbnail_path
+
+
+class _CacheItems:
+ thumbnails_cache = ThumbnailsCache()
+
+
+def get_thumbnail_path(project_name, thumbnail_id):
+ """Get path to thumbnail image.
+
+ Args:
+ project_name (str): Project where thumbnail belongs to.
+ thumbnail_id (Union[str, None]): Thumbnail id.
+
+ Returns:
+ Union[str, None]: Path to thumbnail image or None if thumbnail
+ id is not valid or thumbnail was not possible to receive.
+
+ """
+ if not thumbnail_id:
+ return None
+
+ filepath = _CacheItems.thumbnails_cache.get_thumbnail_filepath(
+ project_name, thumbnail_id
+ )
+ if filepath is not None:
+ return filepath
+
+ # 'ayon_api' had a bug, public function
+ # 'get_thumbnail_by_id' did not return output of
+ # 'ServerAPI' method.
+ con = ayon_api.get_server_api_connection()
+ result = con.get_thumbnail_by_id(project_name, thumbnail_id)
+
+ if result is not None and result.is_valid:
+ return _CacheItems.thumbnails_cache.store_thumbnail(
+ project_name,
+ thumbnail_id,
+ result.content,
+ result.content_type
+ )
+ return None
diff --git a/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py b/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py
index f8cc81e718..ad5a5d43fc 100644
--- a/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py
+++ b/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py
@@ -33,6 +33,7 @@ import collections
import pyblish.api
import ayon_api
+from ayon_core.pipeline.template_data import get_folder_template_data
from ayon_core.pipeline.version_start import get_versioning_start
@@ -383,24 +384,11 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
# - 'folder', 'hierarchy', 'parent', 'folder'
folder_entity = instance.data.get("folderEntity")
if folder_entity:
- folder_name = folder_entity["name"]
- folder_path = folder_entity["path"]
- hierarchy_parts = folder_path.split("/")
- hierarchy_parts.pop(0)
- hierarchy_parts.pop(-1)
- parent_name = project_entity["name"]
- if hierarchy_parts:
- parent_name = hierarchy_parts[-1]
-
- hierarchy = "/".join(hierarchy_parts)
- anatomy_data.update({
- "asset": folder_name,
- "hierarchy": hierarchy,
- "parent": parent_name,
- "folder": {
- "name": folder_name,
- },
- })
+ folder_data = get_folder_template_data(
+ folder_entity,
+ project_entity["name"]
+ )
+ anatomy_data.update(folder_data)
return
if instance.data.get("newAssetPublishing"):
@@ -418,6 +406,11 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
"parent": parent_name,
"folder": {
"name": folder_name,
+ "path": instance.data["folderPath"],
+ # TODO get folder type from hierarchy
+ # Using 'Shot' is current default behavior of editorial
+ # (or 'newAssetPublishing') publishing.
+ "type": "Shot",
},
})
diff --git a/client/ayon_core/plugins/publish/integrate.py b/client/ayon_core/plugins/publish/integrate.py
index 764168edd3..865b566e6e 100644
--- a/client/ayon_core/plugins/publish/integrate.py
+++ b/client/ayon_core/plugins/publish/integrate.py
@@ -42,7 +42,7 @@ def prepare_changes(old_entity, new_entity):
Returns:
dict[str, Any]: Changes that have new entity.
-
+
"""
changes = {}
for key in set(new_entity.keys()):
@@ -108,68 +108,6 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
label = "Integrate Asset"
order = pyblish.api.IntegratorOrder
- families = ["workfile",
- "pointcache",
- "pointcloud",
- "proxyAbc",
- "camera",
- "animation",
- "model",
- "maxScene",
- "mayaAscii",
- "mayaScene",
- "setdress",
- "layout",
- "ass",
- "vdbcache",
- "scene",
- "vrayproxy",
- "vrayscene_layer",
- "render",
- "prerender",
- "imagesequence",
- "review",
- "rendersetup",
- "rig",
- "plate",
- "look",
- "ociolook",
- "audio",
- "yetiRig",
- "yeticache",
- "nukenodes",
- "gizmo",
- "source",
- "matchmove",
- "image",
- "assembly",
- "fbx",
- "gltf",
- "textures",
- "action",
- "harmony.template",
- "harmony.palette",
- "editorial",
- "background",
- "camerarig",
- "redshiftproxy",
- "effect",
- "xgen",
- "hda",
- "usd",
- "staticMesh",
- "skeletalMesh",
- "mvLook",
- "mvUsd",
- "mvUsdComposition",
- "mvUsdOverride",
- "online",
- "uasset",
- "blendScene",
- "yeticacheUE",
- "tycache",
- "csv_ingest_file",
- ]
default_template_name = "publish"
@@ -359,7 +297,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
# Compute the resource file infos once (files belonging to the
# version instance instead of an individual representation) so
- # we can re-use those file infos per representation
+ # we can reuse those file infos per representation
resource_file_infos = self.get_files_info(
resource_destinations, anatomy
)
diff --git a/client/ayon_core/scripts/ocio_wrapper.py b/client/ayon_core/scripts/ocio_wrapper.py
index 0a78e33c1f..0414fc59ce 100644
--- a/client/ayon_core/scripts/ocio_wrapper.py
+++ b/client/ayon_core/scripts/ocio_wrapper.py
@@ -1,28 +1,31 @@
"""OpenColorIO Wrapper.
-Only to be interpreted by Python 3. It is run in subprocess in case
-Python 2 hosts needs to use it. Or it is used as module for Python 3
-processing.
-
-Providing functionality:
-- get_colorspace - console command - python 2
- - returning all available color spaces
- found in input config path.
-- _get_colorspace_data - python 3 - module function
- - returning all available colorspaces
- found in input config path.
-- get_views - console command - python 2
- - returning all available viewers
- found in input config path.
-- _get_views_data - python 3 - module function
- - returning all available viewers
- found in input config path.
+Receive OpenColorIO information and store it in JSON format for processed
+that don't have access to OpenColorIO or their version of OpenColorIO is
+not compatible.
"""
-import click
import json
from pathlib import Path
-import PyOpenColorIO as ocio
+
+import click
+
+from ayon_core.pipeline.colorspace import (
+ has_compatible_ocio_package,
+ get_display_view_colorspace_name,
+ get_config_file_rules_colorspace_from_filepath,
+ get_config_version_data,
+ get_ocio_config_views,
+ get_ocio_config_colorspaces,
+)
+
+
+def _save_output_to_json_file(output, output_path):
+ json_path = Path(output_path)
+ with open(json_path, "w") as stream:
+ json.dump(output, stream)
+
+ print(f"Data are saved to '{json_path}'")
@click.group()
@@ -30,404 +33,185 @@ def main():
pass # noqa: WPS100
-@main.group()
-def config():
- """Config related commands group
-
- Example of use:
- > pyton.exe ./ocio_wrapper.py config *args
- """
- pass # noqa: WPS100
-
-
-@main.group()
-def colorspace():
- """Colorspace related commands group
-
- Example of use:
- > pyton.exe ./ocio_wrapper.py config *args
- """
- pass # noqa: WPS100
-
-
-@config.command(
- name="get_colorspace",
- help=(
- "return all colorspaces from config file "
- "--path input arg is required"
- )
-)
-@click.option("--in_path", required=True,
- help="path where to read ocio config file",
- type=click.Path(exists=True))
-@click.option("--out_path", required=True,
- help="path where to write output json file",
- type=click.Path())
-def get_colorspace(in_path, out_path):
+@main.command(
+ name="get_ocio_config_colorspaces",
+ help="return all colorspaces from config file")
+@click.option(
+ "--config_path",
+ required=True,
+ help="OCIO config path to read ocio config file.",
+ type=click.Path(exists=True))
+@click.option(
+ "--output_path",
+ required=True,
+ help="path where to write output json file",
+ type=click.Path())
+def _get_ocio_config_colorspaces(config_path, output_path):
"""Aggregate all colorspace to file.
- Python 2 wrapped console command
-
Args:
- in_path (str): config file path string
- out_path (str): temp json file path string
+ config_path (str): config file path string
+ output_path (str): temp json file path string
Example of use:
> pyton.exe ./ocio_wrapper.py config get_colorspace
- --in_path= --out_path=
+ --config_path --output_path
"""
- json_path = Path(out_path)
-
- out_data = _get_colorspace_data(in_path)
-
- with open(json_path, "w") as f_:
- json.dump(out_data, f_)
-
- print(f"Colorspace data are saved to '{json_path}'")
-
-
-def _get_colorspace_data(config_path):
- """Return all found colorspace data.
-
- Args:
- config_path (str): path string leading to config.ocio
-
- Raises:
- IOError: Input config does not exist.
-
- Returns:
- dict: aggregated available colorspaces
- """
- config_path = Path(config_path)
-
- if not config_path.is_file():
- raise IOError(
- f"Input path `{config_path}` should be `config.ocio` file")
-
- config = ocio.Config().CreateFromFile(str(config_path))
-
- colorspace_data = {
- "roles": {},
- "colorspaces": {
- color.getName(): {
- "family": color.getFamily(),
- "categories": list(color.getCategories()),
- "aliases": list(color.getAliases()),
- "equalitygroup": color.getEqualityGroup(),
- }
- for color in config.getColorSpaces()
- },
- "displays_views": {
- f"{view} ({display})": {
- "display": display,
- "view": view
-
- }
- for display in config.getDisplays()
- for view in config.getViews(display)
- },
- "looks": {}
- }
-
- # add looks
- looks = config.getLooks()
- if looks:
- colorspace_data["looks"] = {
- look.getName(): {"process_space": look.getProcessSpace()}
- for look in looks
- }
-
- # add roles
- roles = config.getRoles()
- if roles:
- colorspace_data["roles"] = {
- role: {"colorspace": colorspace}
- for (role, colorspace) in roles
- }
-
- return colorspace_data
-
-
-@config.command(
- name="get_views",
- help=(
- "return all viewers from config file "
- "--path input arg is required"
+ _save_output_to_json_file(
+ get_ocio_config_colorspaces(config_path),
+ output_path
)
-)
-@click.option("--in_path", required=True,
- help="path where to read ocio config file",
- type=click.Path(exists=True))
-@click.option("--out_path", required=True,
- help="path where to write output json file",
- type=click.Path())
-def get_views(in_path, out_path):
+
+
+@main.command(
+ name="get_ocio_config_views",
+ help="All viewers from config file")
+@click.option(
+ "--config_path",
+ required=True,
+ help="OCIO config path to read ocio config file.",
+ type=click.Path(exists=True))
+@click.option(
+ "--output_path",
+ required=True,
+ help="path where to write output json file",
+ type=click.Path())
+def _get_ocio_config_views(config_path, output_path):
"""Aggregate all viewers to file.
- Python 2 wrapped console command
-
Args:
- in_path (str): config file path string
- out_path (str): temp json file path string
+ config_path (str): config file path string
+ output_path (str): temp json file path string
Example of use:
> pyton.exe ./ocio_wrapper.py config get_views \
- --in_path= --out_path=
+ --config_path --output
"""
- json_path = Path(out_path)
-
- out_data = _get_views_data(in_path)
-
- with open(json_path, "w") as f_:
- json.dump(out_data, f_)
-
- print(f"Viewer data are saved to '{json_path}'")
-
-
-def _get_views_data(config_path):
- """Return all found viewer data.
-
- Args:
- config_path (str): path string leading to config.ocio
-
- Raises:
- IOError: Input config does not exist.
-
- Returns:
- dict: aggregated available viewers
- """
- config_path = Path(config_path)
-
- if not config_path.is_file():
- raise IOError("Input path should be `config.ocio` file")
-
- config = ocio.Config().CreateFromFile(str(config_path))
-
- data_ = {}
- for display in config.getDisplays():
- for view in config.getViews(display):
- colorspace = config.getDisplayViewColorSpaceName(display, view)
- # Special token. See https://opencolorio.readthedocs.io/en/latest/guides/authoring/authoring.html#shared-views # noqa
- if colorspace == "":
- colorspace = display
-
- data_[f"{display}/{view}"] = {
- "display": display,
- "view": view,
- "colorspace": colorspace
- }
-
- return data_
-
-
-@config.command(
- name="get_version",
- help=(
- "return major and minor version from config file "
- "--config_path input arg is required"
- "--out_path input arg is required"
+ _save_output_to_json_file(
+ get_ocio_config_views(config_path),
+ output_path
)
-)
-@click.option("--config_path", required=True,
- help="path where to read ocio config file",
- type=click.Path(exists=True))
-@click.option("--out_path", required=True,
- help="path where to write output json file",
- type=click.Path())
-def get_version(config_path, out_path):
- """Get version of config.
- Python 2 wrapped console command
+
+@main.command(
+ name="get_config_version_data",
+ help="Get major and minor version from config file")
+@click.option(
+ "--config_path",
+ required=True,
+ help="OCIO config path to read ocio config file.",
+ type=click.Path(exists=True))
+@click.option(
+ "--output_path",
+ required=True,
+ help="path where to write output json file",
+ type=click.Path())
+def _get_config_version_data(config_path, output_path):
+ """Get version of config.
Args:
config_path (str): ocio config file path string
- out_path (str): temp json file path string
+ output_path (str): temp json file path string
Example of use:
> pyton.exe ./ocio_wrapper.py config get_version \
- --config_path= --out_path=
+ --config_path --output_path
"""
- json_path = Path(out_path)
-
- out_data = _get_version_data(config_path)
-
- with open(json_path, "w") as f_:
- json.dump(out_data, f_)
-
- print(f"Config version data are saved to '{json_path}'")
-
-
-def _get_version_data(config_path):
- """Return major and minor version info.
-
- Args:
- config_path (str): path string leading to config.ocio
-
- Raises:
- IOError: Input config does not exist.
-
- Returns:
- dict: minor and major keys with values
- """
- config_path = Path(config_path)
-
- if not config_path.is_file():
- raise IOError("Input path should be `config.ocio` file")
-
- config = ocio.Config().CreateFromFile(str(config_path))
-
- return {
- "major": config.getMajorVersion(),
- "minor": config.getMinorVersion()
- }
-
-
-@colorspace.command(
- name="get_config_file_rules_colorspace_from_filepath",
- help=(
- "return colorspace from filepath "
- "--config_path - ocio config file path (input arg is required) "
- "--filepath - any file path (input arg is required) "
- "--out_path - temp json file path (input arg is required)"
+ _save_output_to_json_file(
+ get_config_version_data(config_path),
+ output_path
)
-)
-@click.option("--config_path", required=True,
- help="path where to read ocio config file",
- type=click.Path(exists=True))
-@click.option("--filepath", required=True,
- help="path to file to get colorspace from",
- type=click.Path())
-@click.option("--out_path", required=True,
- help="path where to write output json file",
- type=click.Path())
-def get_config_file_rules_colorspace_from_filepath(
- config_path, filepath, out_path
+
+
+@main.command(
+ name="get_config_file_rules_colorspace_from_filepath",
+ help="Colorspace file rules from filepath")
+@click.option(
+ "--config_path",
+ required=True,
+ help="OCIO config path to read ocio config file.",
+ type=click.Path(exists=True))
+@click.option(
+ "--filepath",
+ required=True,
+ help="Path to file to get colorspace from.",
+ type=click.Path())
+@click.option(
+ "--output_path",
+ required=True,
+ help="Path where to write output json file.",
+ type=click.Path())
+def _get_config_file_rules_colorspace_from_filepath(
+ config_path, filepath, output_path
):
"""Get colorspace from file path wrapper.
- Python 2 wrapped console command
-
Args:
config_path (str): config file path string
filepath (str): path string leading to file
- out_path (str): temp json file path string
+ output_path (str): temp json file path string
Example of use:
- > pyton.exe ./ocio_wrapper.py \
+ > python.exe ./ocio_wrapper.py \
colorspace get_config_file_rules_colorspace_from_filepath \
- --config_path= --filepath= --out_path=
+ --config_path --filepath --output_path
"""
- json_path = Path(out_path)
-
- colorspace = _get_config_file_rules_colorspace_from_filepath(
- config_path, filepath)
-
- with open(json_path, "w") as f_:
- json.dump(colorspace, f_)
-
- print(f"Colorspace name is saved to '{json_path}'")
+ _save_output_to_json_file(
+ get_config_file_rules_colorspace_from_filepath(config_path, filepath),
+ output_path
+ )
-def _get_config_file_rules_colorspace_from_filepath(config_path, filepath):
- """Return found colorspace data found in v2 file rules.
-
- Args:
- config_path (str): path string leading to config.ocio
- filepath (str): path string leading to v2 file rules
-
- Raises:
- IOError: Input config does not exist.
-
- Returns:
- dict: aggregated available colorspaces
- """
- config_path = Path(config_path)
-
- if not config_path.is_file():
- raise IOError(
- f"Input path `{config_path}` should be `config.ocio` file")
-
- config = ocio.Config().CreateFromFile(str(config_path))
-
- # TODO: use `parseColorSpaceFromString` instead if ocio v1
- colorspace = config.getColorSpaceFromFilepath(str(filepath))
-
- return colorspace
-
-
-def _get_display_view_colorspace_name(config_path, display, view):
- """Returns the colorspace attribute of the (display, view) pair.
-
- Args:
- config_path (str): path string leading to config.ocio
- display (str): display name e.g. "ACES"
- view (str): view name e.g. "sRGB"
-
-
- Raises:
- IOError: Input config does not exist.
-
- Returns:
- view color space name (str) e.g. "Output - sRGB"
- """
-
- config_path = Path(config_path)
-
- if not config_path.is_file():
- raise IOError("Input path should be `config.ocio` file")
-
- config = ocio.Config.CreateFromFile(str(config_path))
- colorspace = config.getDisplayViewColorSpaceName(display, view)
-
- return colorspace
-
-
-@config.command(
+@main.command(
name="get_display_view_colorspace_name",
help=(
- "return default view colorspace name "
- "for the given display and view "
- "--path input arg is required"
- )
-)
-@click.option("--in_path", required=True,
- help="path where to read ocio config file",
- type=click.Path(exists=True))
-@click.option("--out_path", required=True,
- help="path where to write output json file",
- type=click.Path())
-@click.option("--display", required=True,
- help="display name",
- type=click.STRING)
-@click.option("--view", required=True,
- help="view name",
- type=click.STRING)
-def get_display_view_colorspace_name(in_path, out_path,
- display, view):
+ "Default view colorspace name for the given display and view"
+ ))
+@click.option(
+ "--config_path",
+ required=True,
+ help="path where to read ocio config file",
+ type=click.Path(exists=True))
+@click.option(
+ "--display",
+ required=True,
+ help="Display name",
+ type=click.STRING)
+@click.option(
+ "--view",
+ required=True,
+ help="view name",
+ type=click.STRING)
+@click.option(
+ "--output_path",
+ required=True,
+ help="path where to write output json file",
+ type=click.Path())
+def _get_display_view_colorspace_name(
+ config_path, display, view, output_path
+):
"""Aggregate view colorspace name to file.
Wrapper command for processes without access to OpenColorIO
Args:
- in_path (str): config file path string
- out_path (str): temp json file path string
+ config_path (str): config file path string
+ output_path (str): temp json file path string
display (str): display name e.g. "ACES"
view (str): view name e.g. "sRGB"
Example of use:
> pyton.exe ./ocio_wrapper.py config \
- get_display_view_colorspace_name --in_path= \
- --out_path= --display= --view=
+ get_display_view_colorspace_name --config_path \
+ --output_path --display --view
"""
+ _save_output_to_json_file(
+ get_display_view_colorspace_name(config_path, display, view),
+ output_path
+ )
- out_data = _get_display_view_colorspace_name(in_path,
- display,
- view)
- with open(out_path, "w") as f:
- json.dump(out_data, f)
-
- print(f"Display view colorspace saved to '{out_path}'")
-
-if __name__ == '__main__':
+if __name__ == "__main__":
+ if not has_compatible_ocio_package():
+ raise RuntimeError("OpenColorIO is not available.")
main()
diff --git a/client/ayon_core/tools/adobe_webserver/app.py b/client/ayon_core/tools/adobe_webserver/app.py
index 7d97d7d66d..26bf638c91 100644
--- a/client/ayon_core/tools/adobe_webserver/app.py
+++ b/client/ayon_core/tools/adobe_webserver/app.py
@@ -104,14 +104,11 @@ class WebServerTool:
again. In that case, use existing running webserver.
Check here is easier than capturing exception from thread.
"""
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- result = True
- try:
- sock.bind((host_name, port))
- result = False
- except:
- print("Port is in use")
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as con:
+ result = con.connect_ex((host_name, port)) == 0
+ if result:
+ print(f"Port {port} is already in use")
return result
def call(self, func):
diff --git a/client/ayon_core/tools/common_models/thumbnails.py b/client/ayon_core/tools/common_models/thumbnails.py
index 6d14783b9a..2fa1e36e5c 100644
--- a/client/ayon_core/tools/common_models/thumbnails.py
+++ b/client/ayon_core/tools/common_models/thumbnails.py
@@ -1,234 +1,15 @@
-import os
-import time
import collections
import ayon_api
-import appdirs
from ayon_core.lib import NestedCacheItem
-
-FileInfo = collections.namedtuple(
- "FileInfo",
- ("path", "size", "modification_time")
-)
-
-
-class ThumbnailsCache:
- """Cache of thumbnails on local storage.
-
- Thumbnails are cached to appdirs to predefined directory. Each project has
- own subfolder with thumbnails -> that's because each project has own
- thumbnail id validation and file names are thumbnail ids with matching
- extension. Extensions are predefined (.png and .jpeg).
-
- Cache has cleanup mechanism which is triggered on initialized by default.
-
- The cleanup has 2 levels:
- 1. soft cleanup which remove all files that are older then 'days_alive'
- 2. max size cleanup which remove all files until the thumbnails folder
- contains less then 'max_filesize'
- - this is time consuming so it's not triggered automatically
-
- Args:
- cleanup (bool): Trigger soft cleanup (Cleanup expired thumbnails).
- """
-
- # Lifetime of thumbnails (in seconds)
- # - default 3 days
- days_alive = 3
- # Max size of thumbnail directory (in bytes)
- # - default 2 Gb
- max_filesize = 2 * 1024 * 1024 * 1024
-
- def __init__(self, cleanup=True):
- self._thumbnails_dir = None
- self._days_alive_secs = self.days_alive * 24 * 60 * 60
- if cleanup:
- self.cleanup()
-
- def get_thumbnails_dir(self):
- """Root directory where thumbnails are stored.
-
- Returns:
- str: Path to thumbnails root.
- """
-
- if self._thumbnails_dir is None:
- # TODO use generic function
- directory = appdirs.user_data_dir("AYON", "Ynput")
- self._thumbnails_dir = os.path.join(directory, "thumbnails")
- return self._thumbnails_dir
-
- thumbnails_dir = property(get_thumbnails_dir)
-
- def get_thumbnails_dir_file_info(self):
- """Get information about all files in thumbnails directory.
-
- Returns:
- List[FileInfo]: List of file information about all files.
- """
-
- thumbnails_dir = self.thumbnails_dir
- files_info = []
- if not os.path.exists(thumbnails_dir):
- return files_info
-
- for root, _, filenames in os.walk(thumbnails_dir):
- for filename in filenames:
- path = os.path.join(root, filename)
- files_info.append(FileInfo(
- path, os.path.getsize(path), os.path.getmtime(path)
- ))
- return files_info
-
- def get_thumbnails_dir_size(self, files_info=None):
- """Got full size of thumbnail directory.
-
- Args:
- files_info (List[FileInfo]): Prepared file information about
- files in thumbnail directory.
-
- Returns:
- int: File size of all files in thumbnail directory.
- """
-
- if files_info is None:
- files_info = self.get_thumbnails_dir_file_info()
-
- if not files_info:
- return 0
-
- return sum(
- file_info.size
- for file_info in files_info
- )
-
- def cleanup(self, check_max_size=False):
- """Cleanup thumbnails directory.
-
- Args:
- check_max_size (bool): Also cleanup files to match max size of
- thumbnails directory.
- """
-
- thumbnails_dir = self.get_thumbnails_dir()
- # Skip if thumbnails dir does not exist yet
- if not os.path.exists(thumbnails_dir):
- return
-
- self._soft_cleanup(thumbnails_dir)
- if check_max_size:
- self._max_size_cleanup(thumbnails_dir)
-
- def _soft_cleanup(self, thumbnails_dir):
- current_time = time.time()
- for root, _, filenames in os.walk(thumbnails_dir):
- for filename in filenames:
- path = os.path.join(root, filename)
- modification_time = os.path.getmtime(path)
- if current_time - modification_time > self._days_alive_secs:
- os.remove(path)
-
- def _max_size_cleanup(self, thumbnails_dir):
- files_info = self.get_thumbnails_dir_file_info()
- size = self.get_thumbnails_dir_size(files_info)
- if size < self.max_filesize:
- return
-
- sorted_file_info = collections.deque(
- sorted(files_info, key=lambda item: item.modification_time)
- )
- diff = size - self.max_filesize
- while diff > 0:
- if not sorted_file_info:
- break
-
- file_info = sorted_file_info.popleft()
- diff -= file_info.size
- os.remove(file_info.path)
-
- def get_thumbnail_filepath(self, project_name, thumbnail_id):
- """Get thumbnail by thumbnail id.
-
- Args:
- project_name (str): Name of project.
- thumbnail_id (str): Thumbnail id.
-
- Returns:
- Union[str, None]: Path to thumbnail image or None if thumbnail
- is not cached yet.
- """
-
- if not thumbnail_id:
- return None
-
- for ext in (
- ".png",
- ".jpeg",
- ):
- filepath = os.path.join(
- self.thumbnails_dir, project_name, thumbnail_id + ext
- )
- if os.path.exists(filepath):
- return filepath
- return None
-
- def get_project_dir(self, project_name):
- """Path to root directory for specific project.
-
- Args:
- project_name (str): Name of project for which root directory path
- should be returned.
-
- Returns:
- str: Path to root of project's thumbnails.
- """
-
- return os.path.join(self.thumbnails_dir, project_name)
-
- def make_sure_project_dir_exists(self, project_name):
- project_dir = self.get_project_dir(project_name)
- if not os.path.exists(project_dir):
- os.makedirs(project_dir)
- return project_dir
-
- def store_thumbnail(self, project_name, thumbnail_id, content, mime_type):
- """Store thumbnail to cache folder.
-
- Args:
- project_name (str): Project where the thumbnail belong to.
- thumbnail_id (str): Id of thumbnail.
- content (bytes): Byte content of thumbnail file.
- mime_data (str): Type of content.
-
- Returns:
- str: Path to cached thumbnail image file.
- """
-
- if mime_type == "image/png":
- ext = ".png"
- elif mime_type == "image/jpeg":
- ext = ".jpeg"
- else:
- raise ValueError(
- "Unknown mime type for thumbnail \"{}\"".format(mime_type))
-
- project_dir = self.make_sure_project_dir_exists(project_name)
- thumbnail_path = os.path.join(project_dir, thumbnail_id + ext)
- with open(thumbnail_path, "wb") as stream:
- stream.write(content)
-
- current_time = time.time()
- os.utime(thumbnail_path, (current_time, current_time))
-
- return thumbnail_path
+from ayon_core.pipeline.thumbnails import get_thumbnail_path
class ThumbnailsModel:
entity_cache_lifetime = 240 # In seconds
def __init__(self):
- self._thumbnail_cache = ThumbnailsCache()
self._paths_cache = collections.defaultdict(dict)
self._folders_cache = NestedCacheItem(
levels=2, lifetime=self.entity_cache_lifetime)
@@ -283,28 +64,7 @@ class ThumbnailsModel:
if thumbnail_id in project_cache:
return project_cache[thumbnail_id]
- filepath = self._thumbnail_cache.get_thumbnail_filepath(
- project_name, thumbnail_id
- )
- if filepath is not None:
- project_cache[thumbnail_id] = filepath
- return filepath
-
- # 'ayon_api' had a bug, public function
- # 'get_thumbnail_by_id' did not return output of
- # 'ServerAPI' method.
- con = ayon_api.get_server_api_connection()
- result = con.get_thumbnail_by_id(project_name, thumbnail_id)
- if result is None:
- pass
-
- elif result.is_valid:
- filepath = self._thumbnail_cache.store_thumbnail(
- project_name,
- thumbnail_id,
- result.content,
- result.content_type
- )
+ filepath = get_thumbnail_path(project_name, thumbnail_id)
project_cache[thumbnail_id] = filepath
return filepath
diff --git a/client/ayon_core/tools/loader/ui/window.py b/client/ayon_core/tools/loader/ui/window.py
index 3a6f4679fa..8529a53b06 100644
--- a/client/ayon_core/tools/loader/ui/window.py
+++ b/client/ayon_core/tools/loader/ui/window.py
@@ -335,9 +335,7 @@ class LoaderWindow(QtWidgets.QWidget):
def closeEvent(self, event):
super(LoaderWindow, self).closeEvent(event)
- # Deselect project so current context will be selected
- # on next 'showEvent'
- self._controller.set_selected_project(None)
+
self._reset_on_show = True
def keyPressEvent(self, event):
diff --git a/client/ayon_core/tools/publisher/widgets/card_view_widgets.py b/client/ayon_core/tools/publisher/widgets/card_view_widgets.py
index 47c5399cf7..4e34f9b58c 100644
--- a/client/ayon_core/tools/publisher/widgets/card_view_widgets.py
+++ b/client/ayon_core/tools/publisher/widgets/card_view_widgets.py
@@ -52,6 +52,7 @@ class SelectionTypes:
class BaseGroupWidget(QtWidgets.QWidget):
selected = QtCore.Signal(str, str, str)
removed_selected = QtCore.Signal()
+ double_clicked = QtCore.Signal()
def __init__(self, group_name, parent):
super(BaseGroupWidget, self).__init__(parent)
@@ -192,6 +193,7 @@ class ConvertorItemsGroupWidget(BaseGroupWidget):
else:
widget = ConvertorItemCardWidget(item, self)
widget.selected.connect(self._on_widget_selection)
+ widget.double_clicked(self.double_clicked)
self._widgets_by_id[item.id] = widget
self._content_layout.insertWidget(widget_idx, widget)
widget_idx += 1
@@ -254,6 +256,7 @@ class InstanceGroupWidget(BaseGroupWidget):
)
widget.selected.connect(self._on_widget_selection)
widget.active_changed.connect(self._on_active_changed)
+ widget.double_clicked.connect(self.double_clicked)
self._widgets_by_id[instance.id] = widget
self._content_layout.insertWidget(widget_idx, widget)
widget_idx += 1
@@ -271,6 +274,7 @@ class CardWidget(BaseClickableFrame):
# Group identifier of card
# - this must be set because if send when mouse is released with card id
_group_identifier = None
+ double_clicked = QtCore.Signal()
def __init__(self, parent):
super(CardWidget, self).__init__(parent)
@@ -279,6 +283,11 @@ class CardWidget(BaseClickableFrame):
self._selected = False
self._id = None
+ def mouseDoubleClickEvent(self, event):
+ super(CardWidget, self).mouseDoubleClickEvent(event)
+ if self._is_valid_double_click(event):
+ self.double_clicked.emit()
+
@property
def id(self):
"""Id of card."""
@@ -312,6 +321,9 @@ class CardWidget(BaseClickableFrame):
self.selected.emit(self._id, self._group_identifier, selection_type)
+ def _is_valid_double_click(self, event):
+ return True
+
class ContextCardWidget(CardWidget):
"""Card for global context.
@@ -527,6 +539,15 @@ class InstanceCardWidget(CardWidget):
def _on_expend_clicked(self):
self._set_expanded()
+ def _is_valid_double_click(self, event):
+ widget = self.childAt(event.pos())
+ if (
+ widget is self._active_checkbox
+ or widget is self._expand_btn
+ ):
+ return False
+ return True
+
class InstanceCardView(AbstractInstanceView):
"""Publish access to card view.
@@ -534,6 +555,8 @@ class InstanceCardView(AbstractInstanceView):
Wrapper of all widgets in card view.
"""
+ double_clicked = QtCore.Signal()
+
def __init__(self, controller, parent):
super(InstanceCardView, self).__init__(parent)
@@ -715,6 +738,7 @@ class InstanceCardView(AbstractInstanceView):
)
group_widget.active_changed.connect(self._on_active_changed)
group_widget.selected.connect(self._on_widget_selection)
+ group_widget.double_clicked.connect(self.double_clicked)
self._content_layout.insertWidget(widget_idx, group_widget)
self._widgets_by_group[group_name] = group_widget
@@ -755,6 +779,7 @@ class InstanceCardView(AbstractInstanceView):
widget = ContextCardWidget(self._content_widget)
widget.selected.connect(self._on_widget_selection)
+ widget.double_clicked.connect(self.double_clicked)
self._context_widget = widget
@@ -778,6 +803,7 @@ class InstanceCardView(AbstractInstanceView):
CONVERTOR_ITEM_GROUP, self._content_widget
)
group_widget.selected.connect(self._on_widget_selection)
+ group_widget.double_clicked.connect(self.double_clicked)
self._content_layout.insertWidget(1, group_widget)
self._convertor_items_group = group_widget
diff --git a/client/ayon_core/tools/publisher/widgets/list_view_widgets.py b/client/ayon_core/tools/publisher/widgets/list_view_widgets.py
index 3322a73be6..71be0ab1a4 100644
--- a/client/ayon_core/tools/publisher/widgets/list_view_widgets.py
+++ b/client/ayon_core/tools/publisher/widgets/list_view_widgets.py
@@ -110,6 +110,7 @@ class InstanceListItemWidget(QtWidgets.QWidget):
This is required to be able use custom checkbox on custom place.
"""
active_changed = QtCore.Signal(str, bool)
+ double_clicked = QtCore.Signal()
def __init__(self, instance, parent):
super(InstanceListItemWidget, self).__init__(parent)
@@ -149,6 +150,12 @@ class InstanceListItemWidget(QtWidgets.QWidget):
self._set_valid_property(instance.has_valid_context)
+ def mouseDoubleClickEvent(self, event):
+ widget = self.childAt(event.pos())
+ super(InstanceListItemWidget, self).mouseDoubleClickEvent(event)
+ if widget is not self._active_checkbox:
+ self.double_clicked.emit()
+
def _set_valid_property(self, valid):
if self._has_valid_context == valid:
return
@@ -209,6 +216,8 @@ class InstanceListItemWidget(QtWidgets.QWidget):
class ListContextWidget(QtWidgets.QFrame):
"""Context (or global attributes) widget."""
+ double_clicked = QtCore.Signal()
+
def __init__(self, parent):
super(ListContextWidget, self).__init__(parent)
@@ -225,6 +234,10 @@ class ListContextWidget(QtWidgets.QFrame):
self.label_widget = label_widget
+ def mouseDoubleClickEvent(self, event):
+ super(ListContextWidget, self).mouseDoubleClickEvent(event)
+ self.double_clicked.emit()
+
class InstanceListGroupWidget(QtWidgets.QFrame):
"""Widget representing group of instances.
@@ -317,6 +330,7 @@ class InstanceListGroupWidget(QtWidgets.QFrame):
class InstanceTreeView(QtWidgets.QTreeView):
"""View showing instances and their groups."""
toggle_requested = QtCore.Signal(int)
+ double_clicked = QtCore.Signal()
def __init__(self, *args, **kwargs):
super(InstanceTreeView, self).__init__(*args, **kwargs)
@@ -425,6 +439,9 @@ class InstanceListView(AbstractInstanceView):
This is public access to and from list view.
"""
+
+ double_clicked = QtCore.Signal()
+
def __init__(self, controller, parent):
super(InstanceListView, self).__init__(parent)
@@ -454,6 +471,7 @@ class InstanceListView(AbstractInstanceView):
instance_view.collapsed.connect(self._on_collapse)
instance_view.expanded.connect(self._on_expand)
instance_view.toggle_requested.connect(self._on_toggle_request)
+ instance_view.double_clicked.connect(self.double_clicked)
self._group_items = {}
self._group_widgets = {}
@@ -687,6 +705,7 @@ class InstanceListView(AbstractInstanceView):
self._active_toggle_enabled
)
widget.active_changed.connect(self._on_active_changed)
+ widget.double_clicked.connect(self.double_clicked)
self._instance_view.setIndexWidget(proxy_index, widget)
self._widgets_by_id[instance.id] = widget
@@ -717,6 +736,7 @@ class InstanceListView(AbstractInstanceView):
)
proxy_index = self._proxy_model.mapFromSource(index)
widget = ListContextWidget(self._instance_view)
+ widget.double_clicked.connect(self.double_clicked)
self._instance_view.setIndexWidget(proxy_index, widget)
self._context_widget = widget
diff --git a/client/ayon_core/tools/publisher/widgets/overview_widget.py b/client/ayon_core/tools/publisher/widgets/overview_widget.py
index dd82185830..cedf52ae01 100644
--- a/client/ayon_core/tools/publisher/widgets/overview_widget.py
+++ b/client/ayon_core/tools/publisher/widgets/overview_widget.py
@@ -18,6 +18,7 @@ class OverviewWidget(QtWidgets.QFrame):
instance_context_changed = QtCore.Signal()
create_requested = QtCore.Signal()
convert_requested = QtCore.Signal()
+ publish_tab_requested = QtCore.Signal()
anim_end_value = 200
anim_duration = 200
@@ -113,9 +114,15 @@ class OverviewWidget(QtWidgets.QFrame):
product_list_view.selection_changed.connect(
self._on_product_change
)
+ product_list_view.double_clicked.connect(
+ self.publish_tab_requested
+ )
product_view_cards.selection_changed.connect(
self._on_product_change
)
+ product_view_cards.double_clicked.connect(
+ self.publish_tab_requested
+ )
# Active instances changed
product_list_view.active_changed.connect(
self._on_active_changed
diff --git a/client/ayon_core/tools/publisher/window.py b/client/ayon_core/tools/publisher/window.py
index 123864ff6c..1b13ced317 100644
--- a/client/ayon_core/tools/publisher/window.py
+++ b/client/ayon_core/tools/publisher/window.py
@@ -258,6 +258,9 @@ class PublisherWindow(QtWidgets.QDialog):
overview_widget.convert_requested.connect(
self._on_convert_requested
)
+ overview_widget.publish_tab_requested.connect(
+ self._go_to_publish_tab
+ )
save_btn.clicked.connect(self._on_save_clicked)
reset_btn.clicked.connect(self._on_reset_clicked)
diff --git a/client/ayon_core/version.py b/client/ayon_core/version.py
index a60de0493a..275e1b1dd6 100644
--- a/client/ayon_core/version.py
+++ b/client/ayon_core/version.py
@@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring AYON core addon version."""
-__version__ = "0.3.1-dev.1"
+__version__ = "0.3.2-dev.1"
diff --git a/client/pyproject.toml b/client/pyproject.toml
index 1a0ad7e5f2..5e811321f8 100644
--- a/client/pyproject.toml
+++ b/client/pyproject.toml
@@ -16,7 +16,7 @@ aiohttp_json_rpc = "*" # TVPaint server
aiohttp-middlewares = "^2.0.0"
wsrpc_aiohttp = "^3.1.1" # websocket server
Click = "^8"
-OpenTimelineIO = "0.14.1"
+OpenTimelineIO = "0.16.0"
opencolorio = "2.2.1"
Pillow = "9.5.0"
pynput = "^1.7.2" # Timers manager - TODO remove
diff --git a/package.py b/package.py
index 79450d029f..b7b8d2dae6 100644
--- a/package.py
+++ b/package.py
@@ -1,11 +1,12 @@
name = "core"
title = "Core"
-version = "0.3.1-dev.1"
+version = "0.3.2-dev.1"
client_dir = "ayon_core"
plugin_for = ["ayon_server"]
-requires = [
- "~ayon_server-1.0.3+<2.0.0",
-]
+ayon_server_version = ">=1.0.3,<2.0.0"
+ayon_launcher_version = ">=1.0.2"
+ayon_required_addons = {}
+ayon_compatible_addons = {}
diff --git a/server/__init__.py b/server/__init__.py
index 152cc77218..79f505ccd5 100644
--- a/server/__init__.py
+++ b/server/__init__.py
@@ -1,3 +1,5 @@
+from typing import Any
+
from ayon_server.addons import BaseServerAddon
from .settings import CoreSettings, DEFAULT_VALUES
@@ -9,3 +11,53 @@ class CoreAddon(BaseServerAddon):
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()
return settings_model_cls(**DEFAULT_VALUES)
+
+ async def convert_settings_overrides(
+ self,
+ source_version: str,
+ overrides: dict[str, Any],
+ ) -> dict[str, Any]:
+ self._convert_imagio_configs_0_3_1(overrides)
+ # Use super conversion
+ return await super().convert_settings_overrides(
+ source_version, overrides
+ )
+
+ def _convert_imagio_configs_0_3_1(self, overrides):
+ """Imageio config settings did change to profiles since 0.3.1. ."""
+ imageio_overrides = overrides.get("imageio") or {}
+ if (
+ "ocio_config" not in imageio_overrides
+ or "filepath" not in imageio_overrides["ocio_config"]
+ ):
+ return
+
+ ocio_config = imageio_overrides.pop("ocio_config")
+
+ filepath = ocio_config["filepath"]
+ if not filepath:
+ return
+ first_filepath = filepath[0]
+ ocio_config_profiles = imageio_overrides.setdefault(
+ "ocio_config_profiles", []
+ )
+ base_value = {
+ "type": "builtin_path",
+ "product_name": "",
+ "host_names": [],
+ "task_names": [],
+ "task_types": [],
+ "custom_path": "",
+ "builtin_path": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio"
+ }
+ if first_filepath in (
+ "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio",
+ "{BUILTIN_OCIO_ROOT}/nuke-default/config.ocio",
+ ):
+ base_value["type"] = "builtin_path"
+ base_value["builtin_path"] = first_filepath
+ else:
+ base_value["type"] = "custom_path"
+ base_value["custom_path"] = first_filepath
+
+ ocio_config_profiles.append(base_value)
diff --git a/server/settings/main.py b/server/settings/main.py
index 28a69e182d..40e16e7e91 100644
--- a/server/settings/main.py
+++ b/server/settings/main.py
@@ -54,9 +54,67 @@ class CoreImageIOFileRulesModel(BaseSettingsModel):
return value
-class CoreImageIOConfigModel(BaseSettingsModel):
- filepath: list[str] = SettingsField(
- default_factory=list, title="Config path"
+def _ocio_config_profile_types():
+ return [
+ {"value": "builtin_path", "label": "AYON built-in OCIO config"},
+ {"value": "custom_path", "label": "Path to OCIO config"},
+ {"value": "product_name", "label": "Published product"},
+ ]
+
+
+def _ocio_built_in_paths():
+ return [
+ {
+ "value": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio",
+ "label": "ACES 1.2",
+ "description": "Aces 1.2 OCIO config file."
+ },
+ {
+ "value": "{BUILTIN_OCIO_ROOT}/nuke-default/config.ocio",
+ "label": "Nuke default",
+ },
+ ]
+
+
+class CoreImageIOConfigProfilesModel(BaseSettingsModel):
+ _layout = "expanded"
+ host_names: list[str] = SettingsField(
+ default_factory=list,
+ title="Host names"
+ )
+ task_types: list[str] = SettingsField(
+ default_factory=list,
+ title="Task types",
+ enum_resolver=task_types_enum
+ )
+ task_names: list[str] = SettingsField(
+ default_factory=list,
+ title="Task names"
+ )
+ type: str = SettingsField(
+ title="Profile type",
+ enum_resolver=_ocio_config_profile_types,
+ conditionalEnum=True,
+ default="builtin_path",
+ section="---",
+ )
+ builtin_path: str = SettingsField(
+ "ACES 1.2",
+ title="Built-in OCIO config",
+ enum_resolver=_ocio_built_in_paths,
+ )
+ custom_path: str = SettingsField(
+ "",
+ title="OCIO config path",
+ description="Path to OCIO config. Anatomy formatting is supported.",
+ )
+ product_name: str = SettingsField(
+ "",
+ title="Product name",
+ description=(
+ "Published product name to get OCIO config from. "
+ "Partial match is supported."
+ ),
)
@@ -65,9 +123,8 @@ class CoreImageIOBaseModel(BaseSettingsModel):
False,
title="Enable Color Management"
)
- ocio_config: CoreImageIOConfigModel = SettingsField(
- default_factory=CoreImageIOConfigModel,
- title="OCIO config"
+ ocio_config_profiles: list[CoreImageIOConfigProfilesModel] = SettingsField(
+ default_factory=list, title="OCIO config profiles"
)
file_rules: CoreImageIOFileRulesModel = SettingsField(
default_factory=CoreImageIOFileRulesModel,
@@ -186,12 +243,17 @@ class CoreSettings(BaseSettingsModel):
DEFAULT_VALUES = {
"imageio": {
"activate_global_color_management": False,
- "ocio_config": {
- "filepath": [
- "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio",
- "{BUILTIN_OCIO_ROOT}/nuke-default/config.ocio"
- ]
- },
+ "ocio_config_profiles": [
+ {
+ "host_names": [],
+ "task_types": [],
+ "task_names": [],
+ "type": "builtin_path",
+ "builtin_path": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio",
+ "custom_path": "",
+ "product_name": "",
+ }
+ ],
"file_rules": {
"activate_global_file_rules": False,
"rules": [
@@ -199,42 +261,57 @@ DEFAULT_VALUES = {
"name": "example",
"pattern": ".*(beauty).*",
"colorspace": "ACES - ACEScg",
- "ext": "exr"
+ "ext": "exr",
}
- ]
- }
+ ],
+ },
},
"studio_name": "",
"studio_code": "",
- "environments": "{\n\"STUDIO_SW\": {\n \"darwin\": \"/mnt/REPO_SW\",\n \"linux\": \"/mnt/REPO_SW\",\n \"windows\": \"P:/REPO_SW\"\n }\n}",
+ "environments": json.dumps(
+ {
+ "STUDIO_SW": {
+ "darwin": "/mnt/REPO_SW",
+ "linux": "/mnt/REPO_SW",
+ "windows": "P:/REPO_SW"
+ }
+ },
+ indent=4
+ ),
"tools": DEFAULT_TOOLS_VALUES,
"version_start_category": {
"profiles": []
},
"publish": DEFAULT_PUBLISH_VALUES,
- "project_folder_structure": json.dumps({
- "__project_root__": {
- "prod": {},
- "resources": {
- "footage": {
- "plates": {},
- "offline": {}
+ "project_folder_structure": json.dumps(
+ {
+ "__project_root__": {
+ "prod": {},
+ "resources": {
+ "footage": {
+ "plates": {},
+ "offline": {}
+ },
+ "audio": {},
+ "art_dept": {}
},
- "audio": {},
- "art_dept": {}
- },
- "editorial": {},
- "assets": {
- "characters": {},
- "locations": {}
- },
- "shots": {}
- }
- }, indent=4),
+ "editorial": {},
+ "assets": {
+ "characters": {},
+ "locations": {}
+ },
+ "shots": {}
+ }
+ },
+ indent=4
+ ),
"project_plugins": {
"windows": [],
"darwin": [],
"linux": []
},
- "project_environments": "{}"
+ "project_environments": json.dumps(
+ {},
+ indent=4
+ )
}
diff --git a/server_addon/applications/client/ayon_applications/utils.py b/server_addon/applications/client/ayon_applications/utils.py
index 234fa6c683..185779a949 100644
--- a/server_addon/applications/client/ayon_applications/utils.py
+++ b/server_addon/applications/client/ayon_applications/utils.py
@@ -281,13 +281,20 @@ def prepare_app_environments(
app.environment
]
+ task_entity = data.get("task_entity")
folder_entity = data.get("folder_entity")
# Add tools environments
groups_by_name = {}
tool_by_group_name = collections.defaultdict(dict)
- if folder_entity:
- # Make sure each tool group can be added only once
- for key in folder_entity["attrib"].get("tools") or []:
+ tools = None
+ if task_entity:
+ tools = task_entity["attrib"].get("tools")
+
+ if tools is None and folder_entity:
+ tools = folder_entity["attrib"].get("tools")
+
+ if tools:
+ for key in tools:
tool = app.manager.tools.get(key)
if not tool or not tool.is_valid_for_app(app):
continue
diff --git a/server_addon/applications/package.py b/server_addon/applications/package.py
index 500f609fc6..983749355e 100644
--- a/server_addon/applications/package.py
+++ b/server_addon/applications/package.py
@@ -1,6 +1,6 @@
name = "applications"
title = "Applications"
-version = "0.2.1"
+version = "0.2.2"
ayon_server_version = ">=1.0.7"
ayon_launcher_version = ">=1.0.2"
diff --git a/server_addon/deadline/server/__init__.py b/server_addon/deadline/server/__init__.py
index e7dcb7d347..8d2dc152cd 100644
--- a/server_addon/deadline/server/__init__.py
+++ b/server_addon/deadline/server/__init__.py
@@ -2,11 +2,13 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
-from .settings import DeadlineSettings, DEFAULT_VALUES
+from .settings import DeadlineSettings, DEFAULT_VALUES, DeadlineSiteSettings
class Deadline(BaseServerAddon):
settings_model: Type[DeadlineSettings] = DeadlineSettings
+ site_settings_model: Type[DeadlineSiteSettings] = DeadlineSiteSettings
+
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()
diff --git a/server_addon/deadline/server/settings/__init__.py b/server_addon/deadline/server/settings/__init__.py
index 0307862afa..d25c0fb330 100644
--- a/server_addon/deadline/server/settings/__init__.py
+++ b/server_addon/deadline/server/settings/__init__.py
@@ -2,9 +2,11 @@ from .main import (
DeadlineSettings,
DEFAULT_VALUES,
)
+from .site_settings import DeadlineSiteSettings
__all__ = (
"DeadlineSettings",
+ "DeadlineSiteSettings",
"DEFAULT_VALUES",
)
diff --git a/server_addon/deadline/server/settings/main.py b/server_addon/deadline/server/settings/main.py
index 21a314cd2f..5d42b9b1ef 100644
--- a/server_addon/deadline/server/settings/main.py
+++ b/server_addon/deadline/server/settings/main.py
@@ -15,12 +15,6 @@ from .publish_plugins import (
)
-class ServerListSubmodel(BaseSettingsModel):
- _layout = "compact"
- name: str = SettingsField(title="Name")
- value: str = SettingsField(title="Value")
-
-
async def defined_deadline_ws_name_enum_resolver(
addon: "BaseServerAddon",
settings_variant: str = "production",
@@ -32,25 +26,40 @@ async def defined_deadline_ws_name_enum_resolver(
settings = await addon.get_studio_settings(variant=settings_variant)
- ws_urls = []
+ ws_server_name = []
for deadline_url_item in settings.deadline_urls:
- ws_urls.append(deadline_url_item.name)
+ ws_server_name.append(deadline_url_item.name)
- return ws_urls
+ return ws_server_name
+
+class ServerItemSubmodel(BaseSettingsModel):
+ """Connection info about configured DL servers."""
+ _layout = "compact"
+ name: str = SettingsField(title="Name")
+ value: str = SettingsField(title="Url")
+ require_authentication: bool = SettingsField(
+ False,
+ title="Require authentication")
+ ssl: bool = SettingsField(False,
+ title="SSL")
class DeadlineSettings(BaseSettingsModel):
- deadline_urls: list[ServerListSubmodel] = SettingsField(
+ # configured DL servers
+ deadline_urls: list[ServerItemSubmodel] = SettingsField(
default_factory=list,
- title="System Deadline Webservice URLs",
+ title="System Deadline Webservice Info",
scope=["studio"],
)
+
+ # name(key) of selected server for project
deadline_server: str = SettingsField(
- title="Project deadline server",
+ title="Project Deadline server name",
section="---",
scope=["project"],
enum_resolver=defined_deadline_ws_name_enum_resolver
)
+
publish: PublishPluginsModel = SettingsField(
default_factory=PublishPluginsModel,
title="Publish Plugins",
@@ -62,11 +71,14 @@ class DeadlineSettings(BaseSettingsModel):
return value
+
DEFAULT_VALUES = {
"deadline_urls": [
{
"name": "default",
- "value": "http://127.0.0.1:8082"
+ "value": "http://127.0.0.1:8082",
+ "require_authentication": False,
+ "ssl": False
}
],
"deadline_server": "default",
diff --git a/server_addon/deadline/server/settings/site_settings.py b/server_addon/deadline/server/settings/site_settings.py
new file mode 100644
index 0000000000..a77a6edc7e
--- /dev/null
+++ b/server_addon/deadline/server/settings/site_settings.py
@@ -0,0 +1,26 @@
+from ayon_server.settings import (
+ BaseSettingsModel,
+ SettingsField,
+)
+from .main import defined_deadline_ws_name_enum_resolver
+
+
+class CredentialPerServerModel(BaseSettingsModel):
+ """Provide credentials for configured DL servers"""
+ _layout = "expanded"
+ server_name: str = SettingsField("",
+ title="DL server name",
+ enum_resolver=defined_deadline_ws_name_enum_resolver)
+ username: str = SettingsField("",
+ title="Username")
+ password: str = SettingsField("",
+ title="Password")
+
+
+class DeadlineSiteSettings(BaseSettingsModel):
+ local_settings: list[CredentialPerServerModel] = SettingsField(
+ default_factory=list,
+ title="Local setting",
+ description="Please provide credentials for configured Deadline servers",
+ )
+
diff --git a/server_addon/houdini/package.py b/server_addon/houdini/package.py
index 4e441c76ae..6c81eba439 100644
--- a/server_addon/houdini/package.py
+++ b/server_addon/houdini/package.py
@@ -1,3 +1,3 @@
name = "houdini"
title = "Houdini"
-version = "0.2.13"
+version = "0.2.14"
diff --git a/server_addon/houdini/server/settings/publish.py b/server_addon/houdini/server/settings/publish.py
index 8e0e7f7795..9e8e796aff 100644
--- a/server_addon/houdini/server/settings/publish.py
+++ b/server_addon/houdini/server/settings/publish.py
@@ -1,4 +1,7 @@
-from ayon_server.settings import BaseSettingsModel, SettingsField
+from ayon_server.settings import (
+ BaseSettingsModel,
+ SettingsField
+)
# Publish Plugins
@@ -20,6 +23,27 @@ class CollectChunkSizeModel(BaseSettingsModel):
title="Frames Per Task")
+class AOVFilterSubmodel(BaseSettingsModel):
+ """You should use the same host name you are using for Houdini."""
+ host_name: str = SettingsField("", title="Houdini Host name")
+ value: list[str] = SettingsField(
+ default_factory=list,
+ title="AOV regex"
+ )
+
+class CollectLocalRenderInstancesModel(BaseSettingsModel):
+
+ use_deadline_aov_filter: bool = SettingsField(
+ False,
+ title="Use Deadline AOV Filter"
+ )
+
+ aov_filter: AOVFilterSubmodel = SettingsField(
+ default_factory=AOVFilterSubmodel,
+ title="Reviewable products filter"
+ )
+
+
class ValidateWorkfilePathsModel(BaseSettingsModel):
enabled: bool = SettingsField(title="Enabled")
optional: bool = SettingsField(title="Optional")
@@ -49,6 +73,10 @@ class PublishPluginsModel(BaseSettingsModel):
default_factory=CollectChunkSizeModel,
title="Collect Chunk Size."
)
+ CollectLocalRenderInstances: CollectLocalRenderInstancesModel = SettingsField(
+ default_factory=CollectLocalRenderInstancesModel,
+ title="Collect Local Render Instances."
+ )
ValidateContainers: BasicValidateModel = SettingsField(
default_factory=BasicValidateModel,
title="Validate Latest Containers.",
@@ -82,6 +110,15 @@ DEFAULT_HOUDINI_PUBLISH_SETTINGS = {
"optional": True,
"chunk_size": 999999
},
+ "CollectLocalRenderInstances": {
+ "use_deadline_aov_filter": False,
+ "aov_filter" : {
+ "host_name": "houdini",
+ "value": [
+ ".*([Bb]eauty).*"
+ ]
+ }
+ },
"ValidateContainers": {
"enabled": True,
"optional": True,
diff --git a/server_addon/maya/package.py b/server_addon/maya/package.py
index 5c6ce923aa..fe3e3039f5 100644
--- a/server_addon/maya/package.py
+++ b/server_addon/maya/package.py
@@ -1,3 +1,3 @@
name = "maya"
title = "Maya"
-version = "0.1.17"
+version = "0.1.18"
diff --git a/server_addon/maya/server/settings/publishers.py b/server_addon/maya/server/settings/publishers.py
index bc38d5f746..20523b2ca9 100644
--- a/server_addon/maya/server/settings/publishers.py
+++ b/server_addon/maya/server/settings/publishers.py
@@ -46,7 +46,6 @@ def extract_alembic_overrides_enum():
return [
{"label": "Custom Attributes", "value": "attr"},
{"label": "Custom Attributes Prefix", "value": "attrPrefix"},
- {"label": "Auto Subd", "value": "autoSubd"},
{"label": "Data Format", "value": "dataFormat"},
{"label": "Euler Filter", "value": "eulerFilter"},
{"label": "Mel Per Frame Callback", "value": "melPerFrameCallback"},
@@ -347,17 +346,6 @@ class ExtractAlembicModel(BaseSettingsModel):
families: list[str] = SettingsField(
default_factory=list,
title="Families")
- autoSubd: bool = SettingsField(
- title="Auto Subd",
- description=(
- "If this flag is present and the mesh has crease edges, crease "
- "vertices or holes, the mesh (OPolyMesh) would now be written out "
- "as an OSubD and crease info will be stored in the Alembic file. "
- "Otherwise, creases info won't be preserved in Alembic file unless"
- " a custom Boolean attribute SubDivisionMesh has been added to "
- "mesh node and its value is true."
- )
- )
eulerFilter: bool = SettingsField(
title="Euler Filter",
description="Apply Euler filter while sampling rotations."
@@ -409,6 +397,10 @@ class ExtractAlembicModel(BaseSettingsModel):
title="Write Color Sets",
description="Write vertex colors with the geometry."
)
+ writeCreases: bool = SettingsField(
+ title="Write Creases",
+ description="Write the geometry's edge and vertex crease information."
+ )
writeFaceSets: bool = SettingsField(
title="Write Face Sets",
description="Write face sets with the geometry."
@@ -1617,7 +1609,6 @@ DEFAULT_PUBLISH_SETTINGS = {
],
"attr": "",
"attrPrefix": "",
- "autoSubd": False,
"bake_attributes": [],
"bake_attribute_prefixes": [],
"dataFormat": "ogawa",
@@ -1641,7 +1632,7 @@ DEFAULT_PUBLISH_SETTINGS = {
"renderableOnly": False,
"stripNamespaces": True,
"uvsOnly": False,
- "uvWrite": False,
+ "uvWrite": True,
"userAttr": "",
"userAttrPrefix": "",
"verbose": False,
@@ -1649,6 +1640,7 @@ DEFAULT_PUBLISH_SETTINGS = {
"wholeFrameGeo": False,
"worldSpace": True,
"writeColorSets": False,
+ "writeCreases": False,
"writeFaceSets": False,
"writeNormals": True,
"writeUVSets": False,
diff --git a/server_addon/nuke/package.py b/server_addon/nuke/package.py
index bf03c4e7e7..e522b9fb5d 100644
--- a/server_addon/nuke/package.py
+++ b/server_addon/nuke/package.py
@@ -1,3 +1,3 @@
name = "nuke"
title = "Nuke"
-version = "0.1.11"
+version = "0.1.12"
diff --git a/server_addon/nuke/server/settings/publish_plugins.py b/server_addon/nuke/server/settings/publish_plugins.py
index d5b05d8715..e67f7be24f 100644
--- a/server_addon/nuke/server/settings/publish_plugins.py
+++ b/server_addon/nuke/server/settings/publish_plugins.py
@@ -125,6 +125,7 @@ class ReformatNodesConfigModel(BaseSettingsModel):
class IntermediateOutputModel(BaseSettingsModel):
name: str = SettingsField(title="Output name")
+ publish: bool = SettingsField(title="Publish")
filter: BakingStreamFilterModel = SettingsField(
title="Filter", default_factory=BakingStreamFilterModel)
read_raw: bool = SettingsField(
@@ -346,6 +347,7 @@ DEFAULT_PUBLISH_PLUGIN_SETTINGS = {
"outputs": [
{
"name": "baking",
+ "publish": False,
"filter": {
"task_types": [],
"product_types": [],
@@ -401,6 +403,7 @@ DEFAULT_PUBLISH_PLUGIN_SETTINGS = {
"outputs": [
{
"name": "baking",
+ "publish": False,
"filter": {
"task_types": [],
"product_types": [],
diff --git a/server_addon/traypublisher/package.py b/server_addon/traypublisher/package.py
index 4ca8ae9fd3..c138a2296d 100644
--- a/server_addon/traypublisher/package.py
+++ b/server_addon/traypublisher/package.py
@@ -1,3 +1,3 @@
name = "traypublisher"
title = "TrayPublisher"
-version = "0.1.4"
+version = "0.1.5"
diff --git a/server_addon/traypublisher/server/settings/publish_plugins.py b/server_addon/traypublisher/server/settings/publish_plugins.py
index f413c86227..99a0bbf107 100644
--- a/server_addon/traypublisher/server/settings/publish_plugins.py
+++ b/server_addon/traypublisher/server/settings/publish_plugins.py
@@ -1,4 +1,7 @@
-from ayon_server.settings import BaseSettingsModel, SettingsField
+from ayon_server.settings import (
+ BaseSettingsModel,
+ SettingsField,
+)
class ValidatePluginModel(BaseSettingsModel):
@@ -14,6 +17,45 @@ class ValidateFrameRangeModel(ValidatePluginModel):
'my_asset_to_publish.mov')"""
+class ExtractEditorialPckgFFmpegModel(BaseSettingsModel):
+ video_filters: list[str] = SettingsField(
+ default_factory=list,
+ title="Video filters"
+ )
+ audio_filters: list[str] = SettingsField(
+ default_factory=list,
+ title="Audio filters"
+ )
+ input: list[str] = SettingsField(
+ default_factory=list,
+ title="Input arguments"
+ )
+ output: list[str] = SettingsField(
+ default_factory=list,
+ title="Output arguments"
+ )
+
+
+class ExtractEditorialPckgOutputDefModel(BaseSettingsModel):
+ _layout = "expanded"
+ ext: str = SettingsField("", title="Output extension")
+
+ ffmpeg_args: ExtractEditorialPckgFFmpegModel = SettingsField(
+ default_factory=ExtractEditorialPckgFFmpegModel,
+ title="FFmpeg arguments"
+ )
+
+
+class ExtractEditorialPckgConversionModel(BaseSettingsModel):
+ """Set output definition if resource files should be converted."""
+ conversion_enabled: bool = SettingsField(True,
+ title="Conversion enabled")
+ output: ExtractEditorialPckgOutputDefModel = SettingsField(
+ default_factory=ExtractEditorialPckgOutputDefModel,
+ title="Output Definitions",
+ )
+
+
class TrayPublisherPublishPlugins(BaseSettingsModel):
CollectFrameDataFromAssetEntity: ValidatePluginModel = SettingsField(
default_factory=ValidatePluginModel,
@@ -28,6 +70,13 @@ class TrayPublisherPublishPlugins(BaseSettingsModel):
default_factory=ValidatePluginModel,
)
+ ExtractEditorialPckgConversion: ExtractEditorialPckgConversionModel = (
+ SettingsField(
+ default_factory=ExtractEditorialPckgConversionModel,
+ title="Extract Editorial Package Conversion"
+ )
+ )
+
DEFAULT_PUBLISH_PLUGINS = {
"CollectFrameDataFromAssetEntity": {
@@ -44,5 +93,24 @@ DEFAULT_PUBLISH_PLUGINS = {
"enabled": True,
"optional": True,
"active": True
+ },
+ "ExtractEditorialPckgConversion": {
+ "optional": False,
+ "conversion_enabled": True,
+ "output": {
+ "ext": "",
+ "ffmpeg_args": {
+ "video_filters": [],
+ "audio_filters": [],
+ "input": [
+ "-apply_trc gamma22"
+ ],
+ "output": [
+ "-pix_fmt yuv420p",
+ "-crf 18",
+ "-intra"
+ ]
+ }
+ }
}
}