Merge branch 'develop' into enhancement/render-product-names-templated

This commit is contained in:
Mustafa Taher 2024-05-22 15:05:42 +03:00 committed by GitHub
commit ad61ced159
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
163 changed files with 4429 additions and 2243 deletions

View file

@ -1,7 +1,7 @@
from ayon_applications import PreLaunchHook
from ayon_core.pipeline.colorspace import get_imageio_config
from ayon_core.pipeline.template_data import get_template_data_with_names
from ayon_core.pipeline.colorspace import get_imageio_config_preset
from ayon_core.pipeline.template_data import get_template_data
class OCIOEnvHook(PreLaunchHook):
@ -26,32 +26,38 @@ class OCIOEnvHook(PreLaunchHook):
def execute(self):
"""Hook entry method."""
template_data = get_template_data_with_names(
project_name=self.data["project_name"],
folder_path=self.data["folder_path"],
task_name=self.data["task_name"],
folder_entity = self.data["folder_entity"]
template_data = get_template_data(
self.data["project_entity"],
folder_entity=folder_entity,
task_entity=self.data["task_entity"],
host_name=self.host_name,
settings=self.data["project_settings"]
settings=self.data["project_settings"],
)
config_data = get_imageio_config(
project_name=self.data["project_name"],
host_name=self.host_name,
project_settings=self.data["project_settings"],
anatomy_data=template_data,
config_data = get_imageio_config_preset(
self.data["project_name"],
self.data["folder_path"],
self.data["task_name"],
self.host_name,
anatomy=self.data["anatomy"],
project_settings=self.data["project_settings"],
template_data=template_data,
env=self.launch_context.env,
folder_id=folder_entity["id"],
)
if config_data:
ocio_path = config_data["path"]
if self.host_name in ["nuke", "hiero"]:
ocio_path = ocio_path.replace("\\", "/")
self.log.info(
f"Setting OCIO environment to config path: {ocio_path}")
self.launch_context.env["OCIO"] = ocio_path
else:
if not config_data:
self.log.debug("OCIO not set or enabled")
return
ocio_path = config_data["path"]
if self.host_name in ["nuke", "hiero"]:
ocio_path = ocio_path.replace("\\", "/")
self.log.info(
f"Setting OCIO environment to config path: {ocio_path}")
self.launch_context.env["OCIO"] = ocio_path

View file

@ -60,7 +60,7 @@ def main(*subprocess_args):
)
)
elif os.environ.get("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", True):
elif os.environ.get("AVALON_AFTEREFFECTS_WORKFILES_ON_LAUNCH", True):
save = False
if os.getenv("WORKFILES_SAVE_AS"):
save = True

View file

@ -24,7 +24,7 @@ class AERenderInstance(RenderInstance):
class CollectAERender(publish.AbstractCollectRender):
order = pyblish.api.CollectorOrder + 0.405
order = pyblish.api.CollectorOrder + 0.100
label = "Collect After Effects Render Layers"
hosts = ["aftereffects"]
@ -145,6 +145,7 @@ class CollectAERender(publish.AbstractCollectRender):
if "review" in instance.families:
# to skip ExtractReview locally
instance.families.remove("review")
instance.deadline = inst.data.get("deadline")
instances.append(instance)

View file

@ -33,7 +33,7 @@ def load_scripts(paths):
if register:
try:
register()
except:
except: # noqa E722
traceback.print_exc()
else:
print("\nWarning! '%s' has no register function, "
@ -45,7 +45,7 @@ def load_scripts(paths):
if unregister:
try:
unregister()
except:
except: # noqa E722
traceback.print_exc()
def test_reload(mod):
@ -57,7 +57,7 @@ def load_scripts(paths):
try:
return importlib.reload(mod)
except:
except: # noqa E722
traceback.print_exc()
def test_register(mod):

View file

@ -143,13 +143,19 @@ def deselect_all():
if obj.mode != 'OBJECT':
modes.append((obj, obj.mode))
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='OBJECT')
context_override = create_blender_context(active=obj)
with bpy.context.temp_override(**context_override):
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
context_override = create_blender_context()
with bpy.context.temp_override(**context_override):
bpy.ops.object.select_all(action='DESELECT')
for p in modes:
bpy.context.view_layer.objects.active = p[0]
bpy.ops.object.mode_set(mode=p[1])
context_override = create_blender_context(active=p[0])
with bpy.context.temp_override(**context_override):
bpy.ops.object.mode_set(mode=p[1])
bpy.context.view_layer.objects.active = active

View file

@ -43,7 +43,10 @@ class AbcCameraLoader(plugin.AssetLoader):
def _process(self, libpath, asset_group, group_name):
plugin.deselect_all()
bpy.ops.wm.alembic_import(filepath=libpath)
# Force the creation of the transform cache even if the camera
# doesn't have an animation. We use the cache to update the camera.
bpy.ops.wm.alembic_import(
filepath=libpath, always_add_cache_reader=True)
objects = lib.get_selection()
@ -178,12 +181,33 @@ class AbcCameraLoader(plugin.AssetLoader):
self.log.info("Library already loaded, not updating...")
return
mat = asset_group.matrix_basis.copy()
for obj in asset_group.children:
found = False
for constraint in obj.constraints:
if constraint.type == "TRANSFORM_CACHE":
constraint.cache_file.filepath = libpath.as_posix()
found = True
break
if not found:
# This is to keep compatibility with cameras loaded with
# the old loader
# Create a new constraint for the cache file
constraint = obj.constraints.new("TRANSFORM_CACHE")
bpy.ops.cachefile.open(filepath=libpath.as_posix())
constraint.cache_file = bpy.data.cache_files[-1]
constraint.cache_file.scale = 1.0
self._remove(asset_group)
self._process(str(libpath), asset_group, object_name)
# This is a workaround to set the object path. Blender doesn't
# load the list of object paths until the object is evaluated.
# This is a hack to force the object to be evaluated.
# The modifier doesn't need to be removed because camera
# objects don't have modifiers.
obj.modifiers.new(
name='MeshSequenceCache', type='MESH_SEQUENCE_CACHE')
bpy.context.evaluated_depsgraph_get()
asset_group.matrix_basis = mat
constraint.object_path = (
constraint.cache_file.object_paths[0].path)
metadata["libpath"] = str(libpath)
metadata["representation"] = repre_entity["id"]

View file

@ -58,3 +58,55 @@ class SelectInvalidAction(pyblish.api.Action):
self.log.info(
"Selecting invalid tools: %s" % ", ".join(sorted(names))
)
class SelectToolAction(pyblish.api.Action):
"""Select invalid output tool in Fusion when plug-in failed.
"""
label = "Select saver"
on = "failed" # This action is only available on a failed plug-in
icon = "search" # Icon from Awesome Icon
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(
context,
plugin=plugin,
)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes..")
tools = []
for instance in errored_instances:
tool = instance.data.get("tool")
if tool is not None:
tools.append(tool)
else:
self.log.warning(
"Plug-in returned to be invalid, "
f"but has no saver for instance {instance.name}."
)
if not tools:
# Assume relevant comp is current comp and clear selection
self.log.info("No invalid tools found.")
comp = get_current_comp()
flow = comp.CurrentFrame.FlowView
flow.Select() # No args equals clearing selection
return
# Assume a single comp
first_tool = tools[0]
comp = first_tool.Comp()
flow = comp.CurrentFrame.FlowView
flow.Select() # No args equals clearing selection
names = set()
for tool in tools:
flow.Select(tool, True)
comp.SetActiveTool(tool)
names.add(tool.Name)
self.log.info(
"Selecting invalid tools: %s" % ", ".join(sorted(names))
)

View file

@ -169,7 +169,7 @@ def validate_comp_prefs(comp=None, force_repair=False):
def _on_repair():
attributes = dict()
for key, comp_key, _label in validations:
value = folder_value[key]
value = folder_attributes[key]
comp_key_full = "Comp.FrameFormat.{}".format(comp_key)
attributes[comp_key_full] = value
comp.SetPrefs(attributes)

View file

@ -52,7 +52,7 @@ class CollectFusionRender(
if product_type not in ["render", "image"]:
continue
task_name = context.data["task"]
task_name = inst.data["task"]
tool = inst.data["transientData"]["tool"]
instance_families = inst.data.get("families", [])
@ -115,6 +115,7 @@ class CollectFusionRender(
if "review" in instance.families:
# to skip ExtractReview locally
instance.families.remove("review")
instance.deadline = inst.data.get("deadline")
instances.append(instance)

View file

@ -0,0 +1,80 @@
# -*- coding: utf-8 -*-
"""Validate if instance context is the same as publish context."""
import pyblish.api
from ayon_core.hosts.fusion.api.action import SelectToolAction
from ayon_core.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
PublishValidationError,
OptionalPyblishPluginMixin
)
class ValidateInstanceInContextFusion(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""Validator to check if instance context matches context of publish.
When working in per-shot style you always publish data in context of
current asset (shot). This validator checks if this is so. It is optional
so it can be disabled when needed.
"""
# Similar to maya and houdini-equivalent `ValidateInstanceInContext`
order = ValidateContentsOrder
label = "Instance in same Context"
optional = True
hosts = ["fusion"]
actions = [SelectToolAction, RepairAction]
def process(self, instance):
if not self.is_active(instance.data):
return
instance_context = self.get_context(instance.data)
context = self.get_context(instance.context.data)
if instance_context != context:
context_label = "{} > {}".format(*context)
instance_label = "{} > {}".format(*instance_context)
raise PublishValidationError(
message=(
"Instance '{}' publishes to different asset than current "
"context: {}. Current context: {}".format(
instance.name, instance_label, context_label
)
),
description=(
"## Publishing to a different asset\n"
"There are publish instances present which are publishing "
"into a different asset than your current context.\n\n"
"Usually this is not what you want but there can be cases "
"where you might want to publish into another asset or "
"shot. If that's the case you can disable the validation "
"on the instance to ignore it."
)
)
@classmethod
def repair(cls, instance):
create_context = instance.context.data["create_context"]
instance_id = instance.data.get("instance_id")
created_instance = create_context.get_instance_by_id(
instance_id
)
if created_instance is None:
raise RuntimeError(
f"No CreatedInstances found with id '{instance_id} "
f"in {create_context.instances_by_id}"
)
context_asset, context_task = cls.get_context(instance.context.data)
created_instance["folderPath"] = context_asset
created_instance["task"] = context_task
create_context.save_changes()
@staticmethod
def get_context(data):
"""Return asset, task from publishing context data"""
return data["folderPath"], data["task"]

View file

@ -177,7 +177,10 @@ class CollectFarmRender(publish.AbstractCollectRender):
outputFormat=info[1],
outputStartFrame=info[3],
leadingZeros=info[2],
ignoreFrameHandleCheck=True
ignoreFrameHandleCheck=True,
#todo: inst is not available, must be determined, fix when
#reworking to Publisher
# deadline=inst.data.get("deadline")
)
render_instance.context = context

View file

@ -1110,10 +1110,7 @@ def apply_colorspace_project():
'''
# backward compatibility layer
# TODO: remove this after some time
config_data = get_imageio_config(
project_name=get_current_project_name(),
host_name="hiero"
)
config_data = get_current_context_imageio_config_preset()
if config_data:
presets.update({

View file

@ -51,13 +51,12 @@ def open_file(filepath):
project = hiero.core.projects()[-1]
# open project file
hiero.core.openProject(filepath.replace(os.path.sep, "/"))
# close previous project
project.close()
# Close previous project if its different to the current project.
filepath = filepath.replace(os.path.sep, "/")
if project.path().replace(os.path.sep, "/") != filepath:
# open project file
hiero.core.openProject(filepath)
project.close()
return True

View file

@ -13,11 +13,17 @@ class CreateArnoldRop(plugin.HoudiniCreator):
# Default extension
ext = "exr"
# Default to split export and render jobs
export_job = True
# Default render target
render_target = "farm_split"
def create(self, product_name, instance_data, pre_create_data):
import hou
# Transfer settings from pre create to instance
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
for key in ["render_target", "review"]:
if key in pre_create_data:
creator_attributes[key] = pre_create_data[key]
# Remove the active, we are checking the bypass flag of the nodes
instance_data.pop("active", None)
@ -25,8 +31,6 @@ class CreateArnoldRop(plugin.HoudiniCreator):
# Add chunk size attribute
instance_data["chunkSize"] = 1
# Submit for job publishing
instance_data["farm"] = pre_create_data.get("farm")
instance = super(CreateArnoldRop, self).create(
product_name,
@ -51,7 +55,7 @@ class CreateArnoldRop(plugin.HoudiniCreator):
"ar_exr_half_precision": 1 # half precision
}
if pre_create_data.get("export_job"):
if pre_create_data.get("render_target") == "farm_split":
ass_filepath = \
"{export_dir}{product_name}/{product_name}.$F4.ass".format(
export_dir=hou.text.expandString("$HIP/pyblish/ass/"),
@ -66,23 +70,41 @@ class CreateArnoldRop(plugin.HoudiniCreator):
to_lock = ["productType", "id"]
self.lock_parameters(instance_node, to_lock)
def get_pre_create_attr_defs(self):
attrs = super(CreateArnoldRop, self).get_pre_create_attr_defs()
def get_instance_attr_defs(self):
"""get instance attribute definitions.
Attributes defined in this method are exposed in
publish tab in the publisher UI.
"""
render_target_items = {
"local": "Local machine rendering",
"local_no_render": "Use existing frames (local)",
"farm": "Farm Rendering",
"farm_split": "Farm Rendering - Split export & render jobs",
}
return [
BoolDef("review",
label="Review",
tooltip="Mark as reviewable",
default=True),
EnumDef("render_target",
items=render_target_items,
label="Render target",
default=self.render_target),
]
def get_pre_create_attr_defs(self):
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
return attrs + [
BoolDef("farm",
label="Submitting to Farm",
default=True),
BoolDef("export_job",
label="Split export and render jobs",
default=self.export_job),
attrs = [
EnumDef("image_format",
image_format_enum,
default=self.ext,
label="Image Format Options")
label="Image Format Options"),
]
return attrs + self.get_instance_attr_defs()

View file

@ -11,15 +11,23 @@ class CreateKarmaROP(plugin.HoudiniCreator):
product_type = "karma_rop"
icon = "magic"
# Default render target
render_target = "farm"
def create(self, product_name, instance_data, pre_create_data):
import hou # noqa
# Transfer settings from pre create to instance
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
for key in ["render_target", "review"]:
if key in pre_create_data:
creator_attributes[key] = pre_create_data[key]
instance_data.pop("active", None)
instance_data.update({"node_type": "karma"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
# Submit for job publishing
instance_data["farm"] = pre_create_data.get("farm")
instance = super(CreateKarmaROP, self).create(
product_name,
@ -86,18 +94,40 @@ class CreateKarmaROP(plugin.HoudiniCreator):
to_lock = ["productType", "id"]
self.lock_parameters(instance_node, to_lock)
def get_pre_create_attr_defs(self):
attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs()
def get_instance_attr_defs(self):
"""get instance attribute definitions.
Attributes defined in this method are exposed in
publish tab in the publisher UI.
"""
render_target_items = {
"local": "Local machine rendering",
"local_no_render": "Use existing frames (local)",
"farm": "Farm Rendering",
}
return [
BoolDef("review",
label="Review",
tooltip="Mark as reviewable",
default=True),
EnumDef("render_target",
items=render_target_items,
label="Render target",
default=self.render_target)
]
def get_pre_create_attr_defs(self):
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
return attrs + [
BoolDef("farm",
label="Submitting to Farm",
default=True),
attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs()
attrs += [
EnumDef("image_format",
image_format_enum,
default="exr",
@ -112,5 +142,6 @@ class CreateKarmaROP(plugin.HoudiniCreator):
decimals=0),
BoolDef("cam_res",
label="Camera Resolution",
default=False)
default=False),
]
return attrs + self.get_instance_attr_defs()

View file

@ -11,18 +11,22 @@ class CreateMantraROP(plugin.HoudiniCreator):
product_type = "mantra_rop"
icon = "magic"
# Default to split export and render jobs
export_job = True
# Default render target
render_target = "farm_split"
def create(self, product_name, instance_data, pre_create_data):
import hou # noqa
# Transfer settings from pre create to instance
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
for key in ["render_target", "review"]:
if key in pre_create_data:
creator_attributes[key] = pre_create_data[key]
instance_data.pop("active", None)
instance_data.update({"node_type": "ifd"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
# Submit for job publishing
instance_data["farm"] = pre_create_data.get("farm")
instance = super(CreateMantraROP, self).create(
product_name,
@ -46,7 +50,7 @@ class CreateMantraROP(plugin.HoudiniCreator):
"vm_picture": filepath,
}
if pre_create_data.get("export_job"):
if pre_create_data.get("render_target") == "farm_split":
ifd_filepath = \
"{export_dir}{product_name}/{product_name}.$F4.ifd".format(
export_dir=hou.text.expandString("$HIP/pyblish/ifd/"),
@ -77,21 +81,40 @@ class CreateMantraROP(plugin.HoudiniCreator):
to_lock = ["productType", "id"]
self.lock_parameters(instance_node, to_lock)
def get_pre_create_attr_defs(self):
attrs = super(CreateMantraROP, self).get_pre_create_attr_defs()
def get_instance_attr_defs(self):
"""get instance attribute definitions.
Attributes defined in this method are exposed in
publish tab in the publisher UI.
"""
render_target_items = {
"local": "Local machine rendering",
"local_no_render": "Use existing frames (local)",
"farm": "Farm Rendering",
"farm_split": "Farm Rendering - Split export & render jobs",
}
return [
BoolDef("review",
label="Review",
tooltip="Mark as reviewable",
default=True),
EnumDef("render_target",
items=render_target_items,
label="Render target",
default=self.render_target)
]
def get_pre_create_attr_defs(self):
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
return attrs + [
BoolDef("farm",
label="Submitting to Farm",
default=True),
BoolDef("export_job",
label="Split export and render jobs",
default=self.export_job),
attrs = super(CreateMantraROP, self).get_pre_create_attr_defs()
attrs += [
EnumDef("image_format",
image_format_enum,
default="exr",
@ -100,5 +123,6 @@ class CreateMantraROP(plugin.HoudiniCreator):
label="Override Camera Resolution",
tooltip="Override the current camera "
"resolution, recommended for IPR.",
default=False)
default=False),
]
return attrs + self.get_instance_attr_defs()

View file

@ -17,17 +17,21 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
ext = "exr"
multi_layered_mode = "No Multi-Layered EXR File"
# Default to split export and render jobs
split_render = True
# Default render target
render_target = "farm_split"
def create(self, product_name, instance_data, pre_create_data):
# Transfer settings from pre create to instance
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
for key in ["render_target", "review"]:
if key in pre_create_data:
creator_attributes[key] = pre_create_data[key]
instance_data.pop("active", None)
instance_data.update({"node_type": "Redshift_ROP"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
# Submit for job publishing
instance_data["farm"] = pre_create_data.get("farm")
instance = super(CreateRedshiftROP, self).create(
product_name,
@ -99,7 +103,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
rs_filepath = f"{export_dir}{product_name}/{product_name}.$F4.rs"
parms["RS_archive_file"] = rs_filepath
if pre_create_data.get("split_render", self.split_render):
if pre_create_data.get("render_target") == "farm_split":
parms["RS_archive_enable"] = 1
instance_node.setParms(parms)
@ -118,24 +122,44 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
return super(CreateRedshiftROP, self).remove_instances(instances)
def get_instance_attr_defs(self):
"""get instance attribute definitions.
Attributes defined in this method are exposed in
publish tab in the publisher UI.
"""
render_target_items = {
"local": "Local machine rendering",
"local_no_render": "Use existing frames (local)",
"farm": "Farm Rendering",
"farm_split": "Farm Rendering - Split export & render jobs",
}
return [
BoolDef("review",
label="Review",
tooltip="Mark as reviewable",
default=True),
EnumDef("render_target",
items=render_target_items,
label="Render target",
default=self.render_target)
]
def get_pre_create_attr_defs(self):
attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
image_format_enum = [
"exr", "tif", "jpg", "png",
]
multi_layered_mode = [
"No Multi-Layered EXR File",
"Full Multi-Layered EXR File"
]
return attrs + [
BoolDef("farm",
label="Submitting to Farm",
default=True),
BoolDef("split_render",
label="Split export and render jobs",
default=self.split_render),
attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
attrs += [
EnumDef("image_format",
image_format_enum,
default=self.ext,
@ -143,5 +167,6 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
EnumDef("multi_layered_mode",
multi_layered_mode,
default=self.multi_layered_mode,
label="Multi-Layered EXR")
label="Multi-Layered EXR"),
]
return attrs + self.get_instance_attr_defs()

View file

@ -16,17 +16,21 @@ class CreateVrayROP(plugin.HoudiniCreator):
icon = "magic"
ext = "exr"
# Default to split export and render jobs
export_job = True
# Default render target
render_target = "farm_split"
def create(self, product_name, instance_data, pre_create_data):
# Transfer settings from pre create to instance
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
for key in ["render_target", "review"]:
if key in pre_create_data:
creator_attributes[key] = pre_create_data[key]
instance_data.pop("active", None)
instance_data.update({"node_type": "vray_renderer"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
# Submit for job publishing
instance_data["farm"] = pre_create_data.get("farm")
instance = super(CreateVrayROP, self).create(
product_name,
@ -55,7 +59,7 @@ class CreateVrayROP(plugin.HoudiniCreator):
"SettingsEXR_bits_per_channel": "16" # half precision
}
if pre_create_data.get("export_job"):
if pre_create_data.get("render_target") == "farm_split":
scene_filepath = \
"{export_dir}{product_name}/{product_name}.$F4.vrscene".format(
export_dir=hou.text.expandString("$HIP/pyblish/vrscene/"),
@ -143,20 +147,41 @@ class CreateVrayROP(plugin.HoudiniCreator):
return super(CreateVrayROP, self).remove_instances(instances)
def get_instance_attr_defs(self):
"""get instance attribute definitions.
Attributes defined in this method are exposed in
publish tab in the publisher UI.
"""
render_target_items = {
"local": "Local machine rendering",
"local_no_render": "Use existing frames (local)",
"farm": "Farm Rendering",
"farm_split": "Farm Rendering - Split export & render jobs",
}
return [
BoolDef("review",
label="Review",
tooltip="Mark as reviewable",
default=True),
EnumDef("render_target",
items=render_target_items,
label="Render target",
default=self.render_target)
]
def get_pre_create_attr_defs(self):
attrs = super(CreateVrayROP, self).get_pre_create_attr_defs()
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
return attrs + [
BoolDef("farm",
label="Submitting to Farm",
default=True),
BoolDef("export_job",
label="Split export and render jobs",
default=self.export_job),
attrs = super(CreateVrayROP, self).get_pre_create_attr_defs()
attrs += [
EnumDef("image_format",
image_format_enum,
default=self.ext,
@ -172,3 +197,4 @@ class CreateVrayROP(plugin.HoudiniCreator):
"if enabled",
default=False)
]
return attrs + self.get_instance_attr_defs()

View file

@ -95,7 +95,7 @@ class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator):
# write workfile information to context container.
op_ctx = hou.node(CONTEXT_CONTAINER)
if not op_ctx:
op_ctx = self.create_context_node()
op_ctx = self.host.create_context_node()
workfile_data = {"workfile": current_instance.data_to_store()}
imprint(op_ctx, workfile_data)

View file

@ -40,12 +40,9 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
default_prefix = evalParmNoFrame(rop, "ar_picture")
render_products = []
# Store whether we are splitting the render job (export + render)
split_render = bool(rop.parm("ar_ass_export_enable").eval())
instance.data["splitRender"] = split_render
export_prefix = None
export_products = []
if split_render:
if instance.data["splitRender"]:
export_prefix = evalParmNoFrame(
rop, "ar_ass_file", pad_character="0"
)
@ -68,7 +65,12 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
"": self.generate_expected_files(instance, beauty_product)
}
# Assume it's a multipartExr Render.
multipartExr = True
num_aovs = rop.evalParm("ar_aovs")
# TODO: Check the following logic.
# as it always assumes that all AOV are not merged.
for index in range(1, num_aovs + 1):
# Skip disabled AOVs
if not rop.evalParm("ar_enable_aov{}".format(index)):
@ -85,6 +87,14 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
files_by_aov[label] = self.generate_expected_files(instance,
aov_product)
# Set to False as soon as we have a separated aov.
multipartExr = False
# Review Logic expects this key to exist and be True
# if render is a multipart Exr.
# As long as we have one AOV then multipartExr should be True.
instance.data["multipartExr"] = multipartExr
for product in render_products:
self.log.debug("Found render product: {}".format(product))

View file

@ -7,7 +7,8 @@ from ayon_core.hosts.houdini.api import lib
class CollectDataforCache(pyblish.api.InstancePlugin):
"""Collect data for caching to Deadline."""
order = pyblish.api.CollectorOrder + 0.04
# Run after Collect Frames
order = pyblish.api.CollectorOrder + 0.11
families = ["ass", "pointcache",
"mantraifd", "redshiftproxy",
"vdbcache"]

View file

@ -0,0 +1,35 @@
import pyblish.api
class CollectFarmInstances(pyblish.api.InstancePlugin):
"""Collect instances for farm render."""
order = pyblish.api.CollectorOrder
families = ["mantra_rop",
"karma_rop",
"redshift_rop",
"arnold_rop",
"vray_rop"]
hosts = ["houdini"]
targets = ["local", "remote"]
label = "Collect farm instances"
def process(self, instance):
creator_attribute = instance.data["creator_attributes"]
# Collect Render Target
if creator_attribute.get("render_target") not in {
"farm_split", "farm"
}:
instance.data["farm"] = False
instance.data["splitRender"] = False
self.log.debug("Render on farm is disabled. "
"Skipping farm collecting.")
return
instance.data["farm"] = True
instance.data["splitRender"] = (
creator_attribute.get("render_target") == "farm_split"
)

View file

@ -17,7 +17,7 @@ class CollectFrames(pyblish.api.InstancePlugin):
label = "Collect Frames"
families = ["vdbcache", "imagesequence", "ass",
"mantraifd", "redshiftproxy", "review",
"bgeo"]
"pointcache"]
def process(self, instance):

View file

@ -57,6 +57,12 @@ class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin):
beauty_product)
}
# Review Logic expects this key to exist and be True
# if render is a multipart Exr.
# As long as we have one AOV then multipartExr should be True.
# By default karma render is a multipart Exr.
instance.data["multipartExr"] = True
filenames = list(render_products)
instance.data["files"] = filenames
instance.data["renderProducts"] = colorspace.ARenderProduct()

View file

@ -0,0 +1,137 @@
import os
import pyblish.api
from ayon_core.pipeline.create import get_product_name
from ayon_core.pipeline.farm.patterning import match_aov_pattern
from ayon_core.pipeline.publish import (
get_plugin_settings,
apply_plugin_settings_automatically
)
class CollectLocalRenderInstances(pyblish.api.InstancePlugin):
"""Collect instances for local render.
Agnostic Local Render Collector.
"""
# this plugin runs after Collect Render Products
order = pyblish.api.CollectorOrder + 0.12
families = ["mantra_rop",
"karma_rop",
"redshift_rop",
"arnold_rop",
"vray_rop"]
hosts = ["houdini"]
label = "Collect local render instances"
use_deadline_aov_filter = False
aov_filter = {"host_name": "houdini",
"value": [".*([Bb]eauty).*"]}
@classmethod
def apply_settings(cls, project_settings):
# Preserve automatic settings applying logic
settings = get_plugin_settings(plugin=cls,
project_settings=project_settings,
log=cls.log,
category="houdini")
apply_plugin_settings_automatically(cls, settings, logger=cls.log)
if not cls.use_deadline_aov_filter:
# get aov_filter from collector settings
# and restructure it as match_aov_pattern requires.
cls.aov_filter = {
cls.aov_filter["host_name"]: cls.aov_filter["value"]
}
else:
# get aov_filter from deadline settings
cls.aov_filter = project_settings["deadline"]["publish"]["ProcessSubmittedJobOnFarm"]["aov_filter"]
cls.aov_filter = {
item["name"]: item["value"]
for item in cls.aov_filter
}
def process(self, instance):
if instance.data["farm"]:
self.log.debug("Render on farm is enabled. "
"Skipping local render collecting.")
return
# Create Instance for each AOV.
context = instance.context
expectedFiles = next(iter(instance.data["expectedFiles"]), {})
product_type = "render" # is always render
product_group = get_product_name(
context.data["projectName"],
context.data["taskEntity"]["name"],
context.data["taskEntity"]["taskType"],
context.data["hostName"],
product_type,
instance.data["productName"]
)
for aov_name, aov_filepaths in expectedFiles.items():
product_name = product_group
if aov_name:
product_name = "{}_{}".format(product_name, aov_name)
# Create instance for each AOV
aov_instance = context.create_instance(product_name)
# Prepare Representation for each AOV
aov_filenames = [os.path.basename(path) for path in aov_filepaths]
staging_dir = os.path.dirname(aov_filepaths[0])
ext = aov_filepaths[0].split(".")[-1]
# Decide if instance is reviewable
preview = False
if instance.data.get("multipartExr", False):
# Add preview tag because its multipartExr.
preview = True
else:
# Add Preview tag if the AOV matches the filter.
preview = match_aov_pattern(
"houdini", self.aov_filter, aov_filenames[0]
)
preview = preview and instance.data.get("review", False)
# Support Single frame.
# The integrator wants single files to be a single
# filename instead of a list.
# More info: https://github.com/ynput/ayon-core/issues/238
if len(aov_filenames) == 1:
aov_filenames = aov_filenames[0]
aov_instance.data.update({
# 'label': label,
"task": instance.data["task"],
"folderPath": instance.data["folderPath"],
"frameStart": instance.data["frameStartHandle"],
"frameEnd": instance.data["frameEndHandle"],
"productType": product_type,
"family": product_type,
"productName": product_name,
"productGroup": product_group,
"families": ["render.local.hou", "review"],
"instance_node": instance.data["instance_node"],
"representations": [
{
"stagingDir": staging_dir,
"ext": ext,
"name": ext,
"tags": ["review"] if preview else [],
"files": aov_filenames,
"frameStart": instance.data["frameStartHandle"],
"frameEnd": instance.data["frameEndHandle"]
}
]
})
# Remove original render instance
# I can't remove it here as I still need it to trigger the render.
# context.remove(instance)

View file

@ -46,12 +46,9 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
default_prefix = evalParmNoFrame(rop, "vm_picture")
render_products = []
# Store whether we are splitting the render job (export + render)
split_render = bool(rop.parm("soho_outputmode").eval())
instance.data["splitRender"] = split_render
export_prefix = None
export_products = []
if split_render:
if instance.data["splitRender"]:
export_prefix = evalParmNoFrame(
rop, "soho_diskfile", pad_character="0"
)
@ -76,6 +73,11 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
beauty_product)
}
# Assume it's a multipartExr Render.
multipartExr = True
# TODO: This logic doesn't take into considerations
# cryptomatte defined in 'Images > Cryptomatte'
aov_numbers = rop.evalParm("vm_numaux")
if aov_numbers > 0:
# get the filenames of the AOVs
@ -95,6 +97,14 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
files_by_aov[var] = self.generate_expected_files(instance, aov_product) # noqa
# Set to False as soon as we have a separated aov.
multipartExr = False
# Review Logic expects this key to exist and be True
# if render is a multipart Exr.
# As long as we have one AOV then multipartExr should be True.
instance.data["multipartExr"] = multipartExr
for product in render_products:
self.log.debug("Found render product: %s" % product)

View file

@ -53,11 +53,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
default_prefix = evalParmNoFrame(rop, "RS_outputFileNamePrefix")
beauty_suffix = rop.evalParm("RS_outputBeautyAOVSuffix")
# Store whether we are splitting the render job (export + render)
split_render = bool(rop.parm("RS_archive_enable").eval())
instance.data["splitRender"] = split_render
export_products = []
if split_render:
if instance.data["splitRender"]:
export_prefix = evalParmNoFrame(
rop, "RS_archive_file", pad_character="0"
)
@ -77,6 +75,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
# As this is what the rop does.
beauty_suffix = ""
# Assume it's a multipartExr Render.
multipartExr = True
# Default beauty/main layer AOV
beauty_product = self.get_render_product_name(
prefix=default_prefix, suffix=beauty_suffix
@ -116,6 +117,14 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
files_by_aov[aov_suffix] = self.generate_expected_files(instance,
aov_product) # noqa
# Set to False as soon as we have a separated aov.
multipartExr = False
# Review Logic expects this key to exist and be True
# if render is a multipart Exr.
# As long as we have one AOV then multipartExr should be True.
instance.data["multipartExr"] = multipartExr
for product in render_products:
self.log.debug("Found render product: %s" % product)

View file

@ -8,7 +8,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
label = "Collect Review Data"
# This specific order value is used so that
# this plugin runs after CollectRopFrameRange
order = pyblish.api.CollectorOrder + 0.1
# Also after CollectLocalRenderInstances
order = pyblish.api.CollectorOrder + 0.13
hosts = ["houdini"]
families = ["review"]
@ -28,7 +29,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
ropnode_path = instance.data["instance_node"]
ropnode = hou.node(ropnode_path)
camera_path = ropnode.parm("camera").eval()
# Get camera based on the instance_node type.
camera_path = self._get_camera_path(ropnode)
camera_node = hou.node(camera_path)
if not camera_node:
self.log.warning("No valid camera node found on review node: "
@ -55,3 +57,29 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
# Store focal length in `burninDataMembers`
burnin_members = instance.data.setdefault("burninDataMembers", {})
burnin_members["focalLength"] = focal_length
def _get_camera_path(self, ropnode):
"""Get the camera path associated with the given rop node.
This function evaluates the camera parameter according to the
type of the given rop node.
Returns:
Union[str, None]: Camera path or None.
This function can return empty string if the camera
path is empty i.e. no camera path.
"""
if ropnode.type().name() in {
"opengl", "karma", "ifd", "arnold"
}:
return ropnode.parm("camera").eval()
elif ropnode.type().name() == "Redshift_ROP":
return ropnode.parm("RS_renderCamera").eval()
elif ropnode.type().name() == "vray_renderer":
return ropnode.parm("render_camera").eval()
return None

View file

@ -0,0 +1,22 @@
import pyblish.api
class CollectReviewableInstances(pyblish.api.InstancePlugin):
"""Collect Reviewable Instances.
Basically, all instances of the specified families
with creator_attribure["review"]
"""
order = pyblish.api.CollectorOrder
label = "Collect Reviewable Instances"
families = ["mantra_rop",
"karma_rop",
"redshift_rop",
"arnold_rop",
"vray_rop"]
def process(self, instance):
creator_attribute = instance.data["creator_attributes"]
instance.data["review"] = creator_attribute.get("review", False)

View file

@ -47,12 +47,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
render_products = []
# TODO: add render elements if render element
# Store whether we are splitting the render job in an export + render
split_render = rop.parm("render_export_mode").eval() == "2"
instance.data["splitRender"] = split_render
export_prefix = None
export_products = []
if split_render:
if instance.data["splitRender"]:
export_prefix = evalParmNoFrame(
rop, "render_export_filepath", pad_character="0"
)
@ -72,6 +69,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
"": self.generate_expected_files(instance,
beauty_product)}
# Assume it's a multipartExr Render.
multipartExr = True
if instance.data.get("RenderElement", True):
render_element = self.get_render_element_name(rop, default_prefix)
if render_element:
@ -79,7 +79,13 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
render_products.append(renderpass)
files_by_aov[aov] = self.generate_expected_files(
instance, renderpass)
# Set to False as soon as we have a separated aov.
multipartExr = False
# Review Logic expects this key to exist and be True
# if render is a multipart Exr.
# As long as we have one AOV then multipartExr should be True.
instance.data["multipartExr"] = multipartExr
for product in render_products:
self.log.debug("Found render product: %s" % product)

View file

@ -28,10 +28,15 @@ class ExtractAlembic(publish.Extractor):
staging_dir = os.path.dirname(output)
instance.data["stagingDir"] = staging_dir
file_name = os.path.basename(output)
if instance.data.get("frames"):
# list of files
files = instance.data["frames"]
else:
# single file
files = os.path.basename(output)
# We run the render
self.log.info("Writing alembic '%s' to '%s'" % (file_name,
self.log.info("Writing alembic '%s' to '%s'" % (files,
staging_dir))
render_rop(ropnode)
@ -42,7 +47,7 @@ class ExtractAlembic(publish.Extractor):
representation = {
'name': 'abc',
'ext': 'abc',
'files': file_name,
'files': files,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)

View file

@ -19,6 +19,16 @@ class ExtractOpenGL(publish.Extractor,
def process(self, instance):
ropnode = hou.node(instance.data.get("instance_node"))
# This plugin is triggered when marking render as reviewable.
# Therefore, this plugin will run on over wrong instances.
# TODO: Don't run this plugin on wrong instances.
# This plugin should run only on review product type
# with instance node of opengl type.
if ropnode.type().name() != "opengl":
self.log.debug("Skipping OpenGl extraction. Rop node {} "
"is not an OpenGl node.".format(ropnode.path()))
return
output = ropnode.evalParm("picture")
staging_dir = os.path.normpath(os.path.dirname(output))
instance.data["stagingDir"] = staging_dir

View file

@ -0,0 +1,74 @@
import pyblish.api
from ayon_core.pipeline import publish
from ayon_core.hosts.houdini.api.lib import render_rop
import hou
import os
class ExtractRender(publish.Extractor):
order = pyblish.api.ExtractorOrder
label = "Extract Render"
hosts = ["houdini"]
families = ["mantra_rop",
"karma_rop",
"redshift_rop",
"arnold_rop",
"vray_rop"]
def process(self, instance):
creator_attribute = instance.data["creator_attributes"]
product_type = instance.data["productType"]
rop_node = hou.node(instance.data.get("instance_node"))
# Align split parameter value on rop node to the render target.
if instance.data["splitRender"]:
if product_type == "arnold_rop":
rop_node.setParms({"ar_ass_export_enable": 1})
elif product_type == "mantra_rop":
rop_node.setParms({"soho_outputmode": 1})
elif product_type == "redshift_rop":
rop_node.setParms({"RS_archive_enable": 1})
elif product_type == "vray_rop":
rop_node.setParms({"render_export_mode": "2"})
else:
if product_type == "arnold_rop":
rop_node.setParms({"ar_ass_export_enable": 0})
elif product_type == "mantra_rop":
rop_node.setParms({"soho_outputmode": 0})
elif product_type == "redshift_rop":
rop_node.setParms({"RS_archive_enable": 0})
elif product_type == "vray_rop":
rop_node.setParms({"render_export_mode": "1"})
if instance.data.get("farm"):
self.log.debug("Render should be processed on farm, skipping local render.")
return
if creator_attribute.get("render_target") == "local":
ropnode = hou.node(instance.data.get("instance_node"))
render_rop(ropnode)
# `ExpectedFiles` is a list that includes one dict.
expected_files = instance.data["expectedFiles"][0]
# Each key in that dict is a list of files.
# Combine lists of files into one big list.
all_frames = []
for value in expected_files.values():
if isinstance(value, str):
all_frames.append(value)
elif isinstance(value, list):
all_frames.extend(value)
# Check missing frames.
# Frames won't exist if user cancels the render.
missing_frames = [
frame
for frame in all_frames
if not os.path.exists(frame)
]
if missing_frames:
# TODO: Use user friendly error reporting.
raise RuntimeError("Failed to complete render extraction. "
"Missing output files: {}".format(
missing_frames))

View file

@ -17,11 +17,13 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin):
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["houdini"]
families = ["workfile",
"redshift_rop",
"arnold_rop",
"usdrender",
"mantra_rop",
"karma_rop",
"usdrender",
"redshift_rop",
"arnold_rop",
"vray_rop",
"render.local.hou",
"publish.hou"]
optional = True

View file

@ -56,6 +56,18 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
def process(self, instance):
rop_node = hou.node(instance.data["instance_node"])
# This plugin is triggered when marking render as reviewable.
# Therefore, this plugin will run on over wrong instances.
# TODO: Don't run this plugin on wrong instances.
# This plugin should run only on review product type
# with instance node of opengl type.
if rop_node.type().name() != "opengl":
self.log.debug("Skipping Validation. Rop node {} "
"is not an OpenGl node.".format(rop_node.path()))
return
if not self.is_active(instance.data):
return
@ -66,7 +78,6 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
)
return
rop_node = hou.node(instance.data["instance_node"])
if rop_node.evalParm("colorcorrect") != 2:
# any colorspace settings other than default requires
# 'Color Correct' parm to be set to 'OpenColorIO'

View file

@ -20,6 +20,16 @@ class ValidateSceneReview(pyblish.api.InstancePlugin):
report = []
instance_node = hou.node(instance.data.get("instance_node"))
# This plugin is triggered when marking render as reviewable.
# Therefore, this plugin will run on over wrong instances.
# TODO: Don't run this plugin on wrong instances.
# This plugin should run only on review product type
# with instance node of opengl type.
if instance_node.type().name() != "opengl":
self.log.debug("Skipping Validation. Rop node {} "
"is not an OpenGl node.".format(instance_node.path()))
return
invalid = self.get_invalid_scene_path(instance_node)
if invalid:
report.append(invalid)

View file

@ -0,0 +1,29 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- OPMenu Stencil.
It's used to extend the OPMenu.
-->
<menuDocument>
<menu>
<!-- Operator type and asset options. -->
<subMenu id="opmenu.vhda_options_create">
<insertBefore>opmenu.unsynchronize</insertBefore>
<scriptItem id="opmenu.vhda_create_ayon">
<insertAfter>opmenu.vhda_create</insertAfter>
<label>Create New (AYON)...</label>
<context>
</context>
<scriptCode>
<![CDATA[
from ayon_core.hosts.houdini.api.creator_node_shelves import create_interactive
node = kwargs["node"]
if node not in hou.selectedNodes():
node.setSelected(True)
create_interactive("io.openpype.creators.houdini.hda", **kwargs)
]]>
</scriptCode>
</scriptItem>
</subMenu>
</menu>
</menuDocument>

View file

@ -6,12 +6,9 @@ import json
from typing import Any, Dict, Union
import six
import ayon_api
from ayon_core.pipeline import (
get_current_project_name,
get_current_folder_path,
get_current_task_name,
colorspace
)
from ayon_core.settings import get_project_settings
@ -372,12 +369,8 @@ def reset_colorspace():
"""
if int(get_max_version()) < 2024:
return
project_name = get_current_project_name()
colorspace_mgr = rt.ColorPipelineMgr
project_settings = get_project_settings(project_name)
max_config_data = colorspace.get_imageio_config(
project_name, "max", project_settings)
max_config_data = colorspace.get_current_context_imageio_config_preset()
if max_config_data:
ocio_config_path = max_config_data["path"]
colorspace_mgr = rt.ColorPipelineMgr
@ -392,10 +385,7 @@ def check_colorspace():
"because Max main window can't be found.")
if int(get_max_version()) >= 2024:
color_mgr = rt.ColorPipelineMgr
project_name = get_current_project_name()
project_settings = get_project_settings(project_name)
max_config_data = colorspace.get_imageio_config(
project_name, "max", project_settings)
max_config_data = colorspace.get_current_context_imageio_config_preset()
if max_config_data and color_mgr.Mode != rt.Name("OCIO_Custom"):
if not is_headless():
from ayon_core.tools.utils import SimplePopup

View file

@ -52,11 +52,7 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
self._has_been_setup = True
def context_setting():
return lib.set_context_setting()
rt.callbacks.addScript(rt.Name('systemPostNew'),
context_setting)
rt.callbacks.addScript(rt.Name('systemPostNew'), on_new)
rt.callbacks.addScript(rt.Name('filePostOpen'),
lib.check_colorspace)
@ -163,6 +159,14 @@ def ls() -> list:
yield lib.read(container)
def on_new():
lib.set_context_setting()
if rt.checkForSave():
rt.resetMaxFile(rt.Name("noPrompt"))
rt.clearUndoBuffer()
rt.redrawViews()
def containerise(name: str, nodes: list, context,
namespace=None, loader=None, suffix="_CON"):
data = {

View file

@ -22,7 +22,6 @@ ALEMBIC_ARGS = {
"melPostJobCallback": str,
"noNormals": bool,
"preRoll": bool,
"preRollStartFrame": int,
"pythonPerFrameCallback": str,
"pythonPostJobCallback": str,
"renderableOnly": bool,
@ -54,15 +53,22 @@ def extract_alembic(
endFrame=None,
eulerFilter=True,
frameRange="",
melPerFrameCallback=None,
melPostJobCallback=None,
noNormals=False,
preRoll=False,
preRollStartFrame=0,
pythonPerFrameCallback=None,
pythonPostJobCallback=None,
renderableOnly=False,
root=None,
selection=True,
startFrame=None,
step=1.0,
stripNamespaces=True,
userAttr=None,
userAttrPrefix=None,
uvsOnly=False,
uvWrite=True,
verbose=False,
wholeFrameGeo=False,
@ -102,6 +108,11 @@ def extract_alembic(
string formatted as: "startFrame endFrame". This argument
overrides `startFrame` and `endFrame` arguments.
melPerFrameCallback (Optional[str]): MEL callback run per frame.
melPostJobCallback (Optional[str]): MEL callback after last frame is
written.
noNormals (bool): When on, normal data from the original polygon
objects is not included in the exported Alembic cache file.
@ -113,6 +124,11 @@ def extract_alembic(
dependent translations and can be used to evaluate run-up that
isn't actually translated. Defaults to 0.
pythonPerFrameCallback (Optional[str]): Python callback run per frame.
pythonPostJobCallback (Optional[str]): Python callback after last frame
is written.
renderableOnly (bool): When on, any non-renderable nodes or hierarchy,
such as hidden objects, are not included in the Alembic file.
Defaults to False.
@ -137,6 +153,15 @@ def extract_alembic(
object with the namespace taco:foo:bar appears as bar in the
Alembic file.
userAttr (list of str, optional): A specific user defined attribute to
write out. Defaults to [].
userAttrPrefix (list of str, optional): Prefix filter for determining
which user defined attributes to write out. Defaults to [].
uvsOnly (bool): When on, only uv data for PolyMesh and SubD shapes
will be written to the Alembic file.
uvWrite (bool): When on, UV data from polygon meshes and subdivision
objects are written to the Alembic file. Only the current UV map is
included.
@ -183,6 +208,8 @@ def extract_alembic(
# Ensure list arguments are valid.
attr = attr or []
attrPrefix = attrPrefix or []
userAttr = userAttr or []
userAttrPrefix = userAttrPrefix or []
root = root or []
# Pass the start and end frame on as `frameRange` so that it
@ -213,8 +240,10 @@ def extract_alembic(
"eulerFilter": eulerFilter,
"noNormals": noNormals,
"preRoll": preRoll,
"root": root,
"renderableOnly": renderableOnly,
"uvWrite": uvWrite,
"uvsOnly": uvsOnly,
"writeColorSets": writeColorSets,
"writeFaceSets": writeFaceSets,
"wholeFrameGeo": wholeFrameGeo,
@ -226,9 +255,10 @@ def extract_alembic(
"step": step,
"attr": attr,
"attrPrefix": attrPrefix,
"userAttr": userAttr,
"userAttrPrefix": userAttrPrefix,
"stripNamespaces": stripNamespaces,
"verbose": verbose,
"preRollStartFrame": preRollStartFrame
"verbose": verbose
}
# Validate options
@ -264,6 +294,17 @@ def extract_alembic(
if maya_version >= 2018:
options['autoSubd'] = options.pop('writeCreases', False)
# Only add callbacks if they are set so that we're not passing `None`
callbacks = {
"melPerFrameCallback": melPerFrameCallback,
"melPostJobCallback": melPostJobCallback,
"pythonPerFrameCallback": pythonPerFrameCallback,
"pythonPostJobCallback": pythonPostJobCallback,
}
for key, callback in callbacks.items():
if callback:
options[key] = str(callback)
# Format the job string from options
job_args = list()
for key, value in options.items():
@ -297,7 +338,11 @@ def extract_alembic(
# exports are made. (PLN-31)
# TODO: Make sure this actually fixes the issues
with evaluation("off"):
cmds.AbcExport(j=job_str, verbose=verbose)
cmds.AbcExport(
j=job_str,
verbose=verbose,
preRollStartFrame=preRollStartFrame
)
if verbose:
log.debug("Extracted Alembic to: %s", file)

View file

@ -47,7 +47,7 @@ class FBXExtractor:
"smoothMesh": bool,
"instances": bool,
# "referencedContainersContent": bool, # deprecated in Maya 2016+
"bakeComplexAnimation": int,
"bakeComplexAnimation": bool,
"bakeComplexStart": int,
"bakeComplexEnd": int,
"bakeComplexStep": int,
@ -59,6 +59,7 @@ class FBXExtractor:
"constraints": bool,
"lights": bool,
"embeddedTextures": bool,
"includeChildren": bool,
"inputConnections": bool,
"upAxis": str, # x, y or z,
"triangulate": bool,
@ -102,6 +103,7 @@ class FBXExtractor:
"constraints": False,
"lights": True,
"embeddedTextures": False,
"includeChildren": True,
"inputConnections": True,
"upAxis": "y",
"triangulate": False,

View file

@ -1299,7 +1299,7 @@ def is_visible(node,
override_enabled = cmds.getAttr('{}.overrideEnabled'.format(node))
override_visibility = cmds.getAttr('{}.overrideVisibility'.format(
node))
if override_enabled and override_visibility:
if override_enabled and not override_visibility:
return False
if parentHidden:
@ -4212,3 +4212,23 @@ def create_rig_animation_instance(
variant=namespace,
pre_create_data={"use_selection": True}
)
def get_node_index_under_parent(node: str) -> int:
"""Return the index of a DAG node under its parent.
Arguments:
node (str): A DAG Node path.
Returns:
int: The DAG node's index under its parents or world
"""
node = cmds.ls(node, long=True)[0] # enforce long names
parent = node.rsplit("|", 1)[0]
if not parent:
return cmds.ls(assemblies=True, long=True).index(node)
else:
return cmds.listRelatives(parent,
children=True,
fullPath=True).index(node)

View file

@ -1,3 +1,5 @@
import json
from maya import cmds
from ayon_core.pipeline import (
@ -8,13 +10,15 @@ from ayon_core.pipeline import (
)
from ayon_core.pipeline.workfile.workfile_template_builder import (
TemplateAlreadyImported,
AbstractTemplateBuilder
AbstractTemplateBuilder,
PlaceholderPlugin,
PlaceholderItem,
)
from ayon_core.tools.workfile_template_build import (
WorkfileBuildPlaceholderDialog,
)
from .lib import get_main_window
from .lib import read, imprint, get_main_window
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
@ -86,6 +90,162 @@ class MayaTemplateBuilder(AbstractTemplateBuilder):
return True
class MayaPlaceholderPlugin(PlaceholderPlugin):
"""Base Placeholder Plugin for Maya with one unified cache.
Creates a locator as placeholder node, which during populate provide
all of its attributes defined on the locator's transform in
`placeholder.data` and where `placeholder.scene_identifier` is the
full path to the node.
Inherited classes must still implement `populate_placeholder`
"""
use_selection_as_parent = True
item_class = PlaceholderItem
def _create_placeholder_name(self, placeholder_data):
return self.identifier.replace(".", "_")
def _collect_scene_placeholders(self):
nodes_by_identifier = self.builder.get_shared_populate_data(
"placeholder_nodes"
)
if nodes_by_identifier is None:
# Cache placeholder data to shared data
nodes = cmds.ls("*.plugin_identifier", long=True, objectsOnly=True)
nodes_by_identifier = {}
for node in nodes:
identifier = cmds.getAttr("{}.plugin_identifier".format(node))
nodes_by_identifier.setdefault(identifier, []).append(node)
# Set the cache
self.builder.set_shared_populate_data(
"placeholder_nodes", nodes_by_identifier
)
return nodes_by_identifier
def create_placeholder(self, placeholder_data):
parent = None
if self.use_selection_as_parent:
selection = cmds.ls(selection=True)
if len(selection) > 1:
raise ValueError(
"More than one node is selected. "
"Please select only one to define the parent."
)
parent = selection[0] if selection else None
placeholder_data["plugin_identifier"] = self.identifier
placeholder_name = self._create_placeholder_name(placeholder_data)
placeholder = cmds.spaceLocator(name=placeholder_name)[0]
if parent:
placeholder = cmds.parent(placeholder, selection[0])[0]
self.imprint(placeholder, placeholder_data)
def update_placeholder(self, placeholder_item, placeholder_data):
node_name = placeholder_item.scene_identifier
changed_values = {}
for key, value in placeholder_data.items():
if value != placeholder_item.data.get(key):
changed_values[key] = value
# Delete attributes to ensure we imprint new data with correct type
for key in changed_values.keys():
placeholder_item.data[key] = value
if cmds.attributeQuery(key, node=node_name, exists=True):
attribute = "{}.{}".format(node_name, key)
cmds.deleteAttr(attribute)
self.imprint(node_name, changed_values)
def collect_placeholders(self):
placeholders = []
nodes_by_identifier = self._collect_scene_placeholders()
for node in nodes_by_identifier.get(self.identifier, []):
# TODO do data validations and maybe upgrades if they are invalid
placeholder_data = self.read(node)
placeholders.append(
self.item_class(scene_identifier=node,
data=placeholder_data,
plugin=self)
)
return placeholders
def post_placeholder_process(self, placeholder, failed):
"""Cleanup placeholder after load of its corresponding representations.
Hide placeholder, add them to placeholder set.
Used only by PlaceholderCreateMixin and PlaceholderLoadMixin
Args:
placeholder (PlaceholderItem): Item which was just used to load
representation.
failed (bool): Loading of representation failed.
"""
# Hide placeholder and add them to placeholder set
node = placeholder.scene_identifier
# If we just populate the placeholders from current scene, the
# placeholder set will not be created so account for that.
if not cmds.objExists(PLACEHOLDER_SET):
cmds.sets(name=PLACEHOLDER_SET, empty=True)
cmds.sets(node, addElement=PLACEHOLDER_SET)
cmds.hide(node)
cmds.setAttr("{}.hiddenInOutliner".format(node), True)
def delete_placeholder(self, placeholder):
"""Remove placeholder if building was successful
Used only by PlaceholderCreateMixin and PlaceholderLoadMixin.
"""
node = placeholder.scene_identifier
# To avoid that deleting a placeholder node will have Maya delete
# any objectSets the node was a member of we will first remove it
# from any sets it was a member of. This way the `PLACEHOLDERS_SET`
# will survive long enough
sets = cmds.listSets(o=node) or []
for object_set in sets:
cmds.sets(node, remove=object_set)
cmds.delete(node)
def imprint(self, node, data):
"""Imprint call for placeholder node"""
# Complicated data that can't be represented as flat maya attributes
# we write to json strings, e.g. multiselection EnumDef
for key, value in data.items():
if isinstance(value, (list, tuple, dict)):
data[key] = "JSON::{}".format(json.dumps(value))
imprint(node, data)
def read(self, node):
"""Read call for placeholder node"""
data = read(node)
# Complicated data that can't be represented as flat maya attributes
# we read from json strings, e.g. multiselection EnumDef
for key, value in data.items():
if isinstance(value, str) and value.startswith("JSON::"):
value = value[len("JSON::"):] # strip of JSON:: prefix
data[key] = json.loads(value)
return data
def build_workfile_template(*args):
builder = MayaTemplateBuilder(registered_host())
builder.build_template()

View file

@ -6,7 +6,6 @@ from ayon_core.lib import (
BoolDef,
NumberDef,
)
from ayon_core.pipeline import CreatedInstance
def _get_animation_attr_defs(cls):

View file

@ -1,3 +1,5 @@
from maya import cmds
from ayon_core.hosts.maya.api import (
lib,
plugin
@ -87,16 +89,24 @@ class CreateArnoldSceneSource(plugin.MayaCreator):
return defs
class CreateArnoldSceneSourceProxy(CreateArnoldSceneSource):
"""Arnold Scene Source Proxy
This product type facilitates working with proxy geometry in the viewport.
"""
identifier = "io.openpype.creators.maya.assproxy"
label = "Arnold Scene Source Proxy"
product_type = "assProxy"
icon = "cube"
def create(self, product_name, instance_data, pre_create_data):
from maya import cmds
instance = super(CreateArnoldSceneSource, self).create(
product_name, instance_data, pre_create_data
)
instance_node = instance.get("instance_node")
content = cmds.sets(name=instance_node + "_content_SET", empty=True)
proxy = cmds.sets(name=instance_node + "_proxy_SET", empty=True)
cmds.sets([content, proxy], forceElement=instance_node)
cmds.sets([proxy], forceElement=instance_node)

View file

@ -12,6 +12,7 @@ from ayon_core.hosts.maya.api.lib import (
unique_namespace,
get_attribute_input,
maintained_selection,
get_fps_for_current_context
)
from ayon_core.hosts.maya.api.pipeline import containerise
from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type
@ -29,7 +30,13 @@ class ArnoldStandinLoader(load.LoaderPlugin):
"""Load as Arnold standin"""
product_types = {
"ass", "animation", "model", "proxyAbc", "pointcache", "usd"
"ass",
"assProxy",
"animation",
"model",
"proxyAbc",
"pointcache",
"usd"
}
representations = {"ass", "abc", "usda", "usdc", "usd"}
@ -95,8 +102,10 @@ class ArnoldStandinLoader(load.LoaderPlugin):
sequence = is_sequence(os.listdir(os.path.dirname(repre_path)))
cmds.setAttr(standin_shape + ".useFrameExtension", sequence)
fps = float(version_attributes.get("fps")) or 25
cmds.setAttr(standin_shape + ".abcFPS", fps)
fps = (
version_attributes.get("fps") or get_fps_for_current_context()
)
cmds.setAttr(standin_shape + ".abcFPS", float(fps))
nodes = [root, standin, standin_shape]
if operator is not None:
@ -128,6 +137,18 @@ class ArnoldStandinLoader(load.LoaderPlugin):
proxy_path = "/".join([os.path.dirname(path), proxy_basename])
return proxy_basename, proxy_path
def _update_operators(self, string_replace_operator, proxy_basename, path):
cmds.setAttr(
string_replace_operator + ".match",
proxy_basename.split(".")[0],
type="string"
)
cmds.setAttr(
string_replace_operator + ".replace",
os.path.basename(path).split(".")[0],
type="string"
)
def _setup_proxy(self, shape, path, namespace):
proxy_basename, proxy_path = self._get_proxy_path(path)
@ -150,16 +171,7 @@ class ArnoldStandinLoader(load.LoaderPlugin):
"*.(@node=='{}')".format(node_type),
type="string"
)
cmds.setAttr(
string_replace_operator + ".match",
proxy_basename,
type="string"
)
cmds.setAttr(
string_replace_operator + ".replace",
os.path.basename(path),
type="string"
)
self._update_operators(string_replace_operator, proxy_basename, path)
cmds.connectAttr(
string_replace_operator + ".out",
@ -194,18 +206,9 @@ class ArnoldStandinLoader(load.LoaderPlugin):
path = get_representation_path(repre_entity)
proxy_basename, proxy_path = self._get_proxy_path(path)
# Whether there is proxy or so, we still update the string operator.
# Whether there is proxy or not, we still update the string operator.
# If no proxy exists, the string operator won't replace anything.
cmds.setAttr(
string_replace_operator + ".match",
proxy_basename,
type="string"
)
cmds.setAttr(
string_replace_operator + ".replace",
os.path.basename(path),
type="string"
)
self._update_operators(string_replace_operator, proxy_basename, path)
dso_path = path
if os.path.exists(proxy_path):

View file

@ -8,7 +8,7 @@ from ayon_core.pipeline import (
from ayon_core.pipeline.load.utils import get_representation_path_from_context
from ayon_core.pipeline.colorspace import (
get_imageio_file_rules_colorspace_from_filepath,
get_imageio_config,
get_current_context_imageio_config_preset,
get_imageio_file_rules
)
from ayon_core.settings import get_project_settings
@ -270,8 +270,7 @@ class FileNodeLoader(load.LoaderPlugin):
host_name = get_current_host_name()
project_settings = get_project_settings(project_name)
config_data = get_imageio_config(
project_name, host_name,
config_data = get_current_context_imageio_config_preset(
project_settings=project_settings
)

View file

@ -10,21 +10,23 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
# Offset to be after renderable camera collection.
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Arnold Scene Source"
families = ["ass"]
families = ["ass", "assProxy"]
def process(self, instance):
objsets = instance.data["setMembers"]
instance.data["members"] = []
for set_member in instance.data["setMembers"]:
if cmds.nodeType(set_member) != "objectSet":
instance.data["members"].extend(self.get_hierarchy(set_member))
continue
for objset in objsets:
objset = str(objset)
members = cmds.sets(objset, query=True)
members = cmds.sets(set_member, query=True)
members = cmds.ls(members, long=True)
if members is None:
self.log.warning("Skipped empty instance: \"%s\" " % objset)
self.log.warning(
"Skipped empty instance: \"%s\" " % set_member
)
continue
if objset.endswith("content_SET"):
instance.data["contentMembers"] = self.get_hierarchy(members)
if objset.endswith("proxy_SET"):
if set_member.endswith("proxy_SET"):
instance.data["proxy"] = self.get_hierarchy(members)
# Use camera in object set if present else default to render globals
@ -33,7 +35,7 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)]
if renderable:
camera = renderable[0]
for node in instance.data["contentMembers"]:
for node in instance.data["members"]:
camera_shapes = cmds.listRelatives(
node, shapes=True, type="camera"
)
@ -46,18 +48,11 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
self.log.debug("data: {}".format(instance.data))
def get_hierarchy(self, nodes):
"""Return nodes with all their children.
Arguments:
nodes (List[str]): List of nodes to collect children hierarchy for
Returns:
list: Input nodes with their children hierarchy
"""
"""Return nodes with all their children"""
nodes = cmds.ls(nodes, long=True)
if not nodes:
return []
children = get_all_children(nodes, ignore_intermediate_objects=True)
return list(children.union(nodes))
children = get_all_children(nodes)
# Make sure nodes merged with children only
# contains unique entries
return list(set(nodes + list(children)))

View file

@ -17,8 +17,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
families = ["ass"]
asciiAss = False
def process(self, instance):
staging_dir = self.staging_dir(instance)
def _pre_process(self, instance, staging_dir):
file_path = os.path.join(staging_dir, "{}.ass".format(instance.name))
# Mask
@ -70,24 +69,38 @@ class ExtractArnoldSceneSource(publish.Extractor):
"mask": mask
}
filenames, nodes_by_id = self._extract(
instance.data["contentMembers"], attribute_data, kwargs
)
if "representations" not in instance.data:
instance.data["representations"] = []
return attribute_data, kwargs
def process(self, instance):
staging_dir = self.staging_dir(instance)
attribute_data, kwargs = self._pre_process(instance, staging_dir)
filenames = self._extract(
instance.data["members"], attribute_data, kwargs
)
self._post_process(
instance, filenames, staging_dir, kwargs["startFrame"]
)
def _post_process(self, instance, filenames, staging_dir, frame_start):
nodes_by_id = self._nodes_by_id(instance[:])
representation = {
"name": "ass",
"ext": "ass",
"files": filenames if len(filenames) > 1 else filenames[0],
"stagingDir": staging_dir,
"frameStart": kwargs["startFrame"]
"frameStart": frame_start
}
instance.data["representations"].append(representation)
json_path = os.path.join(staging_dir, "{}.json".format(instance.name))
json_path = os.path.join(
staging_dir, "{}.json".format(instance.name)
)
with open(json_path, "w") as f:
json.dump(nodes_by_id, f)
@ -104,13 +117,68 @@ class ExtractArnoldSceneSource(publish.Extractor):
"Extracted instance {} to: {}".format(instance.name, staging_dir)
)
# Extract proxy.
if not instance.data.get("proxy", []):
return
def _nodes_by_id(self, nodes):
nodes_by_id = defaultdict(list)
kwargs["filename"] = file_path.replace(".ass", "_proxy.ass")
for node in nodes:
id = lib.get_id(node)
filenames, _ = self._extract(
if id is None:
continue
# Converting Maya hierarchy separator "|" to Arnold separator "/".
nodes_by_id[id].append(node.replace("|", "/"))
return nodes_by_id
def _extract(self, nodes, attribute_data, kwargs):
filenames = []
with lib.attribute_values(attribute_data):
with lib.maintained_selection():
self.log.debug(
"Writing: {}".format(nodes)
)
cmds.select(nodes, noExpand=True)
self.log.debug(
"Extracting ass sequence with: {}".format(kwargs)
)
exported_files = cmds.arnoldExportAss(**kwargs)
for file in exported_files:
filenames.append(os.path.split(file)[1])
self.log.debug("Exported: {}".format(filenames))
return filenames
class ExtractArnoldSceneSourceProxy(ExtractArnoldSceneSource):
"""Extract the content of the instance to an Arnold Scene Source file."""
label = "Extract Arnold Scene Source Proxy"
hosts = ["maya"]
families = ["assProxy"]
asciiAss = True
def process(self, instance):
staging_dir = self.staging_dir(instance)
attribute_data, kwargs = self._pre_process(instance, staging_dir)
filenames, _ = self._duplicate_extract(
instance.data["members"], attribute_data, kwargs
)
self._post_process(
instance, filenames, staging_dir, kwargs["startFrame"]
)
kwargs["filename"] = os.path.join(
staging_dir, "{}_proxy.ass".format(instance.name)
)
filenames, _ = self._duplicate_extract(
instance.data["proxy"], attribute_data, kwargs
)
@ -125,12 +193,11 @@ class ExtractArnoldSceneSource(publish.Extractor):
instance.data["representations"].append(representation)
def _extract(self, nodes, attribute_data, kwargs):
def _duplicate_extract(self, nodes, attribute_data, kwargs):
self.log.debug(
"Writing {} with:\n{}".format(kwargs["filename"], kwargs)
)
filenames = []
nodes_by_id = defaultdict(list)
# Duplicating nodes so they are direct children of the world. This
# makes the hierarchy of any exported ass file the same.
with lib.delete_after() as delete_bin:
@ -147,7 +214,9 @@ class ExtractArnoldSceneSource(publish.Extractor):
if not shapes:
continue
duplicate_transform = cmds.duplicate(node)[0]
basename = cmds.duplicate(node)[0]
parents = cmds.ls(node, long=True)[0].split("|")[:-1]
duplicate_transform = "|".join(parents + [basename])
if cmds.listRelatives(duplicate_transform, parent=True):
duplicate_transform = cmds.parent(
@ -172,28 +241,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
duplicate_nodes.extend(shapes)
delete_bin.append(duplicate_transform)
# Copy cbId to mtoa_constant.
for node in duplicate_nodes:
# Converting Maya hierarchy separator "|" to Arnold
# separator "/".
nodes_by_id[lib.get_id(node)].append(node.replace("|", "/"))
with lib.attribute_values(attribute_data):
with lib.maintained_selection():
self.log.debug(
"Writing: {}".format(duplicate_nodes)
)
cmds.select(duplicate_nodes, noExpand=True)
self.log.debug(
"Extracting ass sequence with: {}".format(kwargs)
)
exported_files = cmds.arnoldExportAss(**kwargs)
for file in exported_files:
filenames.append(os.path.split(file)[1])
self.log.debug("Exported: {}".format(filenames))
nodes_by_id = self._nodes_by_id(duplicate_nodes)
filenames = self._extract(duplicate_nodes, attribute_data, kwargs)
return filenames, nodes_by_id

View file

@ -2,7 +2,7 @@ import os
import json
from ayon_core.pipeline import publish
from ayon_core.hosts.maya.api.lib import extract_alembic
from ayon_core.hosts.maya.api.alembic import extract_alembic
from maya import cmds

View file

@ -35,7 +35,8 @@ class ExtractFBXAnimation(publish.Extractor):
fbx_exporter = fbx.FBXExtractor(log=self.log)
out_members = instance.data.get("animated_skeleton", [])
# Export
instance.data["constraints"] = True
# TODO: need to set up the options for users to set up
# the flags they intended to export
instance.data["skeletonDefinitions"] = True
instance.data["referencedAssetsContent"] = True
fbx_exporter.set_options_from_instance(instance)

View file

@ -6,6 +6,7 @@ from maya import cmds
from ayon_core.pipeline import publish
from ayon_core.hosts.maya.api.alembic import extract_alembic
from ayon_core.hosts.maya.api.lib import (
get_all_children,
suspended_refresh,
maintained_selection,
iter_visible_nodes_in_range
@ -40,7 +41,6 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
# From settings
attr = []
attrPrefix = []
autoSubd = False
bake_attributes = []
bake_attribute_prefixes = []
dataFormat = "ogawa"
@ -63,6 +63,7 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
wholeFrameGeo = False
worldSpace = True
writeColorSets = False
writeCreases = False
writeFaceSets = False
writeNormals = True
writeUVSets = False
@ -173,15 +174,9 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
"writeVisibility": attribute_values.get(
"writeVisibility", self.writeVisibility
),
"autoSubd": attribute_values.get(
"autoSubd", self.autoSubd
),
"uvsOnly": attribute_values.get(
"uvsOnly", self.uvsOnly
),
"writeNormals": attribute_values.get(
"writeNormals", self.writeNormals
),
"melPerFrameCallback": attribute_values.get(
"melPerFrameCallback", self.melPerFrameCallback
),
@ -193,7 +188,12 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
),
"pythonPostJobCallback": attribute_values.get(
"pythonPostJobCallback", self.pythonPostJobCallback
)
),
# Note that this converts `writeNormals` to `noNormals` for the
# `AbcExport` equivalent in `extract_alembic`
"noNormals": not attribute_values.get(
"writeNormals", self.writeNormals
),
}
if instance.data.get("visibleOnly", False):
@ -249,7 +249,6 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
with maintained_selection():
cmds.select(instance.data["proxy"])
extract_alembic(**kwargs)
representation = {
"name": "proxy",
"ext": "abc",
@ -268,20 +267,6 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
return []
override_defs = OrderedDict({
"autoSubd": BoolDef(
"autoSubd",
label="Auto Subd",
default=cls.autoSubd,
tooltip=(
"If this flag is present and the mesh has crease edges, "
"crease vertices or holes, the mesh (OPolyMesh) would now "
"be written out as an OSubD and crease info will be stored"
" in the Alembic file. Otherwise, creases info won't be "
"preserved in Alembic file unless a custom Boolean "
"attribute SubDivisionMesh has been added to mesh node and"
" its value is true."
)
),
"eulerFilter": BoolDef(
"eulerFilter",
label="Euler Filter",
@ -354,6 +339,13 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
default=cls.writeColorSets,
tooltip="Write vertex colors with the geometry."
),
"writeCreases": BoolDef(
"writeCreases",
label="Write Creases",
default=cls.writeCreases,
tooltip="Write the geometry's edge and vertex crease "
"information."
),
"writeFaceSets": BoolDef(
"writeFaceSets",
label="Write Face Sets",
@ -527,9 +519,7 @@ class ExtractAnimation(ExtractAlembic):
roots = cmds.sets(out_set, query=True) or []
# Include all descendants
nodes = roots
nodes += cmds.listRelatives(
roots, allDescendents=True, fullPath=True
) or []
nodes = roots.copy()
nodes.extend(get_all_children(roots, ignore_intermediate_objects=True))
return nodes, roots

View file

@ -3,8 +3,8 @@ import os
from maya import cmds
from ayon_core.pipeline import publish
from ayon_core.hosts.maya.api.alembic import extract_alembic
from ayon_core.hosts.maya.api.lib import (
extract_alembic,
suspended_refresh,
maintained_selection,
iter_visible_nodes_in_range

View file

@ -5,8 +5,8 @@ import os
from maya import cmds # noqa
from ayon_core.pipeline import publish
from ayon_core.hosts.maya.api.alembic import extract_alembic
from ayon_core.hosts.maya.api.lib import (
extract_alembic,
suspended_refresh,
maintained_selection
)

View file

@ -1,3 +1,4 @@
import inspect
import pyblish.api
from ayon_core.pipeline import OptionalPyblishPluginMixin
@ -29,29 +30,28 @@ class ValidateAlembicDefaultsPointcache(
@classmethod
def _get_publish_attributes(cls, instance):
attributes = instance.data["publish_attributes"][
cls.plugin_name(
instance.data["publish_attributes"]
)
]
return attributes
return instance.data["publish_attributes"][cls.plugin_name]
def process(self, instance):
if not self.is_active(instance.data):
return
settings = self._get_settings(instance.context)
attributes = self._get_publish_attributes(instance)
msg = (
"Alembic Extract setting \"{}\" is not the default value:"
"\nCurrent: {}"
"\nDefault Value: {}\n"
)
errors = []
invalid = {}
for key, value in attributes.items():
if key not in settings:
# This may occur if attributes have changed over time and an
# existing instance has older legacy attributes that do not
# match the current settings definition.
self.log.warning(
"Publish attribute %s not found in Alembic Export "
"default settings. Ignoring validation for attribute.",
key
)
continue
default_value = settings[key]
# Lists are best to compared sorted since we cant rely on the order
@ -61,10 +61,35 @@ class ValidateAlembicDefaultsPointcache(
default_value = sorted(default_value)
if value != default_value:
errors.append(msg.format(key, value, default_value))
invalid[key] = value, default_value
if errors:
raise PublishValidationError("\n".join(errors))
if invalid:
non_defaults = "\n".join(
f"- {key}: {value} \t(default: {default_value})"
for key, (value, default_value) in invalid.items()
)
raise PublishValidationError(
"Alembic extract options differ from default values:\n"
f"{non_defaults}",
description=self.get_description()
)
@staticmethod
def get_description():
return inspect.cleandoc(
"""### Alembic Extract settings differ from defaults
The alembic export options differ from the project default values.
If this is intentional you can disable this validation by
disabling **Validate Alembic Options Default**.
If not you may use the "Repair" action to revert all the options to
their default values.
"""
)
@classmethod
def repair(cls, instance):
@ -75,13 +100,20 @@ class ValidateAlembicDefaultsPointcache(
)
# Set the settings values on the create context then save to workfile.
publish_attributes = instance.data["publish_attributes"]
plugin_name = cls.plugin_name(publish_attributes)
attributes = cls._get_publish_attributes(instance)
settings = cls._get_settings(instance.context)
create_publish_attributes = create_instance.data["publish_attributes"]
attributes = cls._get_publish_attributes(create_instance)
for key in attributes:
create_publish_attributes[plugin_name][key] = settings[key]
if key not in settings:
# This may occur if attributes have changed over time and an
# existing instance has older legacy attributes that do not
# match the current settings definition.
cls.log.warning(
"Publish attribute %s not found in Alembic Export "
"default settings. Ignoring repair for attribute.",
key
)
continue
attributes[key] = settings[key]
create_context.save_changes()
@ -93,6 +125,6 @@ class ValidateAlembicDefaultsAnimation(
The defaults are defined in the project settings.
"""
label = "Validate Alembic Options Defaults"
label = "Validate Alembic Options Defaults"
families = ["animation"]
plugin_name = "ExtractAnimation"

View file

@ -1,71 +0,0 @@
import pyblish.api
import ayon_core.hosts.maya.api.action
from ayon_core.pipeline.publish import (
PublishValidationError,
ValidateContentsOrder,
OptionalPyblishPluginMixin
)
from maya import cmds
class ValidateAnimatedReferenceRig(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""Validate all nodes in skeletonAnim_SET are referenced"""
order = ValidateContentsOrder
hosts = ["maya"]
families = ["animation.fbx"]
label = "Animated Reference Rig"
accepted_controllers = ["transform", "locator"]
actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction]
optional = False
def process(self, instance):
if not self.is_active(instance.data):
return
animated_sets = instance.data.get("animated_skeleton", [])
if not animated_sets:
self.log.debug(
"No nodes found in skeletonAnim_SET. "
"Skipping validation of animated reference rig..."
)
return
for animated_reference in animated_sets:
is_referenced = cmds.referenceQuery(
animated_reference, isNodeReferenced=True)
if not bool(is_referenced):
raise PublishValidationError(
"All the content in skeletonAnim_SET"
" should be referenced nodes"
)
invalid_controls = self.validate_controls(animated_sets)
if invalid_controls:
raise PublishValidationError(
"All the content in skeletonAnim_SET"
" should be transforms"
)
@classmethod
def validate_controls(self, set_members):
"""Check if the controller set contains only accepted node types.
Checks if all its set members are within the hierarchy of the root
Checks if the node types of the set members valid
Args:
set_members: list of nodes of the skeleton_anim_set
hierarchy: list of nodes which reside under the root node
Returns:
errors (list)
"""
# Validate control types
invalid = []
set_members = cmds.ls(set_members, long=True)
for node in set_members:
if cmds.nodeType(node) not in self.accepted_controllers:
invalid.append(node)
return invalid

View file

@ -1,30 +1,56 @@
from maya import cmds
import pyblish.api
from ayon_core.pipeline.publish import (
ValidateContentsOrder, PublishValidationError
)
from ayon_core.hosts.maya.api.lib import is_visible
class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
"""Validate Arnold Scene Source.
We require at least 1 root node/parent for the meshes. This is to ensure we
can duplicate the nodes and preserve the names.
Ensure no nodes are hidden.
"""
If using proxies we need the nodes to share the same names and not be
order = ValidateContentsOrder
hosts = ["maya"]
families = ["ass", "assProxy"]
label = "Validate Arnold Scene Source"
def process(self, instance):
# Validate against having nodes hidden, which will result in the
# extraction to ignore the node.
nodes = instance.data["members"] + instance.data.get("proxy", [])
nodes = [x for x in nodes if cmds.objectType(x, isAType='dagNode')]
hidden_nodes = [
x for x in nodes if not is_visible(x, intermediateObject=False)
]
if hidden_nodes:
raise PublishValidationError(
"Found hidden nodes:\n\n{}\n\nPlease unhide for"
" publishing.".format("\n".join(hidden_nodes))
)
class ValidateArnoldSceneSourceProxy(pyblish.api.InstancePlugin):
"""Validate Arnold Scene Source Proxy.
When using proxies we need the nodes to share the same names and not be
parent to the world. This ends up needing at least two groups with content
nodes and proxy nodes in another.
"""
order = ValidateContentsOrder
hosts = ["maya"]
families = ["ass"]
label = "Validate Arnold Scene Source"
families = ["assProxy"]
label = "Validate Arnold Scene Source Proxy"
def _get_nodes_by_name(self, nodes):
ungrouped_nodes = []
nodes_by_name = {}
parents = []
same_named_nodes = {}
for node in nodes:
node_split = node.split("|")
if len(node_split) == 2:
@ -35,33 +61,16 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
parents.append(parent)
node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1]
# Check for same same nodes, which can happen in different
# hierarchies.
if node_name in nodes_by_name:
try:
same_named_nodes[node_name].append(node)
except KeyError:
same_named_nodes[node_name] = [
nodes_by_name[node_name], node
]
nodes_by_name[node_name] = node
if same_named_nodes:
message = "Found nodes with the same name:"
for name, nodes in same_named_nodes.items():
message += "\n\n\"{}\":\n{}".format(name, "\n".join(nodes))
raise PublishValidationError(message)
return ungrouped_nodes, nodes_by_name, parents
def process(self, instance):
# Validate against nodes directly parented to world.
ungrouped_nodes = []
nodes, content_nodes_by_name, content_parents = (
self._get_nodes_by_name(instance.data["contentMembers"])
self._get_nodes_by_name(instance.data["members"])
)
ungrouped_nodes.extend(nodes)
@ -70,24 +79,21 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
)
ungrouped_nodes.extend(nodes)
# Validate against nodes directly parented to world.
if ungrouped_nodes:
raise PublishValidationError(
"Found nodes parented to the world: {}\n"
"All nodes need to be grouped.".format(ungrouped_nodes)
)
# Proxy validation.
if not instance.data.get("proxy", []):
return
# Validate for content and proxy nodes amount being the same.
if len(instance.data["contentMembers"]) != len(instance.data["proxy"]):
if len(instance.data["members"]) != len(instance.data["proxy"]):
raise PublishValidationError(
"Amount of content nodes ({}) and proxy nodes ({}) needs to "
"be the same.".format(
len(instance.data["contentMembers"]),
len(instance.data["proxy"])
"be the same.\nContent nodes: {}\nProxy nodes:{}".format(
len(instance.data["members"]),
len(instance.data["proxy"]),
instance.data["members"],
instance.data["proxy"]
)
)

View file

@ -17,7 +17,7 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin,
order = ValidateContentsOrder
hosts = ["maya"]
families = ["ass"]
families = ["assProxy"]
label = "Validate Arnold Scene Source CBID"
actions = [RepairAction]
optional = False
@ -40,15 +40,11 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin,
@classmethod
def get_invalid_couples(cls, instance):
content_nodes_by_name = cls._get_nodes_by_name(
instance.data["contentMembers"]
)
proxy_nodes_by_name = cls._get_nodes_by_name(
instance.data.get("proxy", [])
)
nodes_by_name = cls._get_nodes_by_name(instance.data["members"])
proxy_nodes_by_name = cls._get_nodes_by_name(instance.data["proxy"])
invalid_couples = []
for content_name, content_node in content_nodes_by_name.items():
for content_name, content_node in nodes_by_name.items():
proxy_node = proxy_nodes_by_name.get(content_name, None)
if not proxy_node:
@ -70,7 +66,7 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin,
if not self.is_active(instance.data):
return
# Proxy validation.
if not instance.data.get("proxy", []):
if not instance.data["proxy"]:
return
# Validate for proxy nodes sharing the same cbId as content nodes.

View file

@ -10,6 +10,7 @@ from ayon_core.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
PublishValidationError,
OptionalPyblishPluginMixin
)
from ayon_core.hosts.maya.api import lib
from ayon_core.hosts.maya.api.lib_rendersettings import RenderSettings
@ -37,7 +38,8 @@ def get_redshift_image_format_labels():
return mel.eval("{0}={0}".format(var))
class ValidateRenderSettings(pyblish.api.InstancePlugin):
class ValidateRenderSettings(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""Validates the global render settings
* File Name Prefix must start with: `<Scene>`
@ -55,7 +57,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
* Frame Padding must be:
* default: 4
* Animation must be toggle on, in Render Settings - Common tab:
* Animation must be toggled on, in Render Settings - Common tab:
* vray: Animation on standard of specific
* arnold: Frame / Animation ext: Any choice without "(Single Frame)"
* redshift: Animation toggled on
@ -67,10 +69,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
"""
order = ValidateContentsOrder
label = "Render Settings"
label = "Validate Render Settings"
hosts = ["maya"]
families = ["renderlayer"]
actions = [RepairAction]
optional = True
ImagePrefixes = {
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
@ -112,6 +115,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
DEFAULT_PREFIX = "<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>"
def process(self, instance):
if not self.is_active(instance.data):
return
invalid = self.get_invalid(instance)
if invalid:

View file

@ -1,87 +1,48 @@
import json
from maya import cmds
from ayon_core.pipeline.workfile.workfile_template_builder import (
PlaceholderPlugin,
LoadPlaceholderItem,
PlaceholderLoadMixin,
LoadPlaceholderItem
)
from ayon_core.hosts.maya.api.lib import (
read,
imprint,
get_reference_node
get_container_transforms,
get_node_parent,
get_node_index_under_parent
)
from ayon_core.hosts.maya.api.workfile_template_builder import (
MayaPlaceholderPlugin,
)
from ayon_core.hosts.maya.api.workfile_template_builder import PLACEHOLDER_SET
class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
class MayaPlaceholderLoadPlugin(MayaPlaceholderPlugin, PlaceholderLoadMixin):
identifier = "maya.load"
label = "Maya load"
def _collect_scene_placeholders(self):
# Cache placeholder data to shared data
placeholder_nodes = self.builder.get_shared_populate_data(
"placeholder_nodes"
)
if placeholder_nodes is None:
attributes = cmds.ls("*.plugin_identifier", long=True)
placeholder_nodes = {}
for attribute in attributes:
node_name = attribute.rpartition(".")[0]
placeholder_nodes[node_name] = (
self._parse_placeholder_node_data(node_name)
)
self.builder.set_shared_populate_data(
"placeholder_nodes", placeholder_nodes
)
return placeholder_nodes
def _parse_placeholder_node_data(self, node_name):
placeholder_data = read(node_name)
parent_name = (
cmds.getAttr(node_name + ".parent", asString=True)
or node_name.rpartition("|")[0]
or ""
)
if parent_name:
siblings = cmds.listRelatives(parent_name, children=True)
else:
siblings = cmds.ls(assemblies=True)
node_shortname = node_name.rpartition("|")[2]
current_index = cmds.getAttr(node_name + ".index", asString=True)
if current_index < 0:
current_index = siblings.index(node_shortname)
placeholder_data.update({
"parent": parent_name,
"index": current_index
})
return placeholder_data
item_class = LoadPlaceholderItem
def _create_placeholder_name(self, placeholder_data):
placeholder_name_parts = placeholder_data["builder_type"].split("_")
pos = 1
# Split builder type: context_assets, linked_assets, all_assets
prefix, suffix = placeholder_data["builder_type"].split("_", 1)
parts = [prefix]
# add family if any
placeholder_product_type = placeholder_data.get("product_type")
if placeholder_product_type is None:
placeholder_product_type = placeholder_data.get("family")
if placeholder_product_type:
placeholder_name_parts.insert(pos, placeholder_product_type)
pos += 1
parts.append(placeholder_product_type)
# add loader arguments if any
loader_args = placeholder_data["loader_args"]
if loader_args:
loader_args = json.loads(loader_args.replace('\'', '\"'))
values = [v for v in loader_args.values()]
for value in values:
placeholder_name_parts.insert(pos, value)
pos += 1
loader_args = eval(loader_args)
for value in loader_args.values():
parts.append(str(value))
placeholder_name = "_".join(placeholder_name_parts)
parts.append(suffix)
placeholder_name = "_".join(parts)
return placeholder_name.capitalize()
@ -104,68 +65,6 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
)
return loaded_representation_ids
def create_placeholder(self, placeholder_data):
selection = cmds.ls(selection=True)
if len(selection) > 1:
raise ValueError("More then one item are selected")
parent = selection[0] if selection else None
placeholder_data["plugin_identifier"] = self.identifier
placeholder_name = self._create_placeholder_name(placeholder_data)
placeholder = cmds.spaceLocator(name=placeholder_name)[0]
if parent:
placeholder = cmds.parent(placeholder, selection[0])[0]
imprint(placeholder, placeholder_data)
# Add helper attributes to keep placeholder info
cmds.addAttr(
placeholder,
longName="parent",
hidden=True,
dataType="string"
)
cmds.addAttr(
placeholder,
longName="index",
hidden=True,
attributeType="short",
defaultValue=-1
)
cmds.setAttr(placeholder + ".parent", "", type="string")
def update_placeholder(self, placeholder_item, placeholder_data):
node_name = placeholder_item.scene_identifier
new_values = {}
for key, value in placeholder_data.items():
placeholder_value = placeholder_item.data.get(key)
if value != placeholder_value:
new_values[key] = value
placeholder_item.data[key] = value
for key in new_values.keys():
cmds.deleteAttr(node_name + "." + key)
imprint(node_name, new_values)
def collect_placeholders(self):
output = []
scene_placeholders = self._collect_scene_placeholders()
for node_name, placeholder_data in scene_placeholders.items():
if placeholder_data.get("plugin_identifier") != self.identifier:
continue
# TODO do data validations and maybe upgrades if they are invalid
output.append(
LoadPlaceholderItem(node_name, placeholder_data, self)
)
return output
def populate_placeholder(self, placeholder):
self.populate_load_placeholder(placeholder)
@ -176,30 +75,6 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
def get_placeholder_options(self, options=None):
return self.get_load_plugin_options(options)
def post_placeholder_process(self, placeholder, failed):
"""Cleanup placeholder after load of its corresponding representations.
Args:
placeholder (PlaceholderItem): Item which was just used to load
representation.
failed (bool): Loading of representation failed.
"""
# Hide placeholder and add them to placeholder set
node = placeholder.scene_identifier
# If we just populate the placeholders from current scene, the
# placeholder set will not be created so account for that.
if not cmds.objExists(PLACEHOLDER_SET):
cmds.sets(name=PLACEHOLDER_SET, empty=True)
cmds.sets(node, addElement=PLACEHOLDER_SET)
cmds.hide(node)
cmds.setAttr(node + ".hiddenInOutliner", True)
def delete_placeholder(self, placeholder):
"""Remove placeholder if building was successful"""
cmds.delete(placeholder.scene_identifier)
def load_succeed(self, placeholder, container):
self._parent_in_hierarchy(placeholder, container)
@ -215,56 +90,43 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
if not container:
return
roots = cmds.sets(container, q=True) or []
ref_node = None
try:
ref_node = get_reference_node(roots)
except AssertionError as e:
self.log.info(e.args[0])
# TODO: This currently returns only a single root but a loaded scene
# could technically load more than a single root
container_root = get_container_transforms(container, root=True)
nodes_to_parent = []
for root in roots:
if ref_node:
ref_root = cmds.referenceQuery(root, nodes=True)[0]
ref_root = (
cmds.listRelatives(ref_root, parent=True, path=True) or
[ref_root]
)
nodes_to_parent.extend(ref_root)
continue
if root.endswith("_RN"):
# Backwards compatibility for hardcoded reference names.
refRoot = cmds.referenceQuery(root, n=True)[0]
refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot]
nodes_to_parent.extend(refRoot)
elif root not in cmds.listSets(allSets=True):
nodes_to_parent.append(root)
# Bugfix: The get_container_transforms does not recognize the load
# reference group currently
# TODO: Remove this when it does
parent = get_node_parent(container_root)
if parent:
container_root = parent
roots = [container_root]
elif not cmds.sets(root, q=True):
return
# Add the loaded roots to the holding sets if they exist
holding_sets = cmds.listSets(object=placeholder.scene_identifier) or []
for holding_set in holding_sets:
cmds.sets(roots, forceElement=holding_set)
# Move loaded nodes to correct index in outliner hierarchy
# Parent the roots to the place of the placeholder locator and match
# its matrix
placeholder_form = cmds.xform(
placeholder.scene_identifier,
q=True,
query=True,
matrix=True,
worldSpace=True
)
scene_parent = cmds.listRelatives(
placeholder.scene_identifier, parent=True, fullPath=True
)
for node in set(nodes_to_parent):
cmds.reorder(node, front=True)
cmds.reorder(node, relative=placeholder.data["index"])
cmds.xform(node, matrix=placeholder_form, ws=True)
if scene_parent:
cmds.parent(node, scene_parent)
else:
if cmds.listRelatives(node, parent=True):
cmds.parent(node, world=True)
scene_parent = get_node_parent(placeholder.scene_identifier)
for node in set(roots):
cmds.xform(node, matrix=placeholder_form, worldSpace=True)
holding_sets = cmds.listSets(object=placeholder.scene_identifier)
if not holding_sets:
return
for holding_set in holding_sets:
cmds.sets(roots, forceElement=holding_set)
if scene_parent != get_node_parent(node):
if scene_parent:
node = cmds.parent(node, scene_parent)[0]
else:
node = cmds.parent(node, world=True)[0]
# Move loaded nodes in index order next to their placeholder node
cmds.reorder(node, back=True)
index = get_node_index_under_parent(placeholder.scene_identifier)
cmds.reorder(node, front=True)
cmds.reorder(node, relative=index + 1)

View file

@ -0,0 +1,201 @@
from maya import cmds
from ayon_core.hosts.maya.api.workfile_template_builder import (
MayaPlaceholderPlugin
)
from ayon_core.lib import NumberDef, TextDef, EnumDef
from ayon_core.lib.events import weakref_partial
EXAMPLE_SCRIPT = """
# Access maya commands
from maya import cmds
# Access the placeholder node
placeholder_node = placeholder.scene_identifier
# Access the event callback
if event is None:
print(f"Populating {placeholder}")
else:
if event.topic == "template.depth_processed":
print(f"Processed depth: {event.get('depth')}")
elif event.topic == "template.finished":
print("Build finished.")
""".strip()
class MayaPlaceholderScriptPlugin(MayaPlaceholderPlugin):
"""Execute a script at the given `order` during workfile build.
This is a very low-level placeholder to run Python scripts at a given
point in time during the workfile template build.
It can create either a locator or an objectSet as placeholder node.
It defaults to an objectSet, since allowing to run on e.g. other
placeholder node members can be useful, e.g. using:
>>> members = cmds.sets(placeholder.scene_identifier, query=True)
"""
identifier = "maya.runscript"
label = "Run Python Script"
use_selection_as_parent = False
def get_placeholder_options(self, options=None):
options = options or {}
return [
NumberDef(
"order",
label="Order",
default=options.get("order") or 0,
decimals=0,
minimum=0,
maximum=999,
tooltip=(
"Order"
"\nOrder defines asset loading priority (0 to 999)"
"\nPriority rule is : \"lowest is first to load\"."
)
),
TextDef(
"prepare_script",
label="Run at\nprepare",
tooltip="Run before populate at prepare order",
multiline=True,
default=options.get("prepare_script", "")
),
TextDef(
"populate_script",
label="Run at\npopulate",
tooltip="Run script at populate node order<br>"
"This is the <b>default</b> behavior",
multiline=True,
default=options.get("populate_script", EXAMPLE_SCRIPT)
),
TextDef(
"depth_processed_script",
label="Run after\ndepth\niteration",
tooltip="Run script after every build depth iteration",
multiline=True,
default=options.get("depth_processed_script", "")
),
TextDef(
"finished_script",
label="Run after\nbuild",
tooltip=(
"Run script at build finished.<br>"
"<b>Note</b>: this even runs if other placeholders had "
"errors during the build"
),
multiline=True,
default=options.get("finished_script", "")
),
EnumDef(
"create_nodetype",
label="Nodetype",
items={
"spaceLocator": "Locator",
"objectSet": "ObjectSet"
},
tooltip=(
"The placeholder's node type to be created.<br>"
"<b>Note</b> this only works on create, not on update"
),
default=options.get("create_nodetype", "objectSet")
),
]
def create_placeholder(self, placeholder_data):
nodetype = placeholder_data.get("create_nodetype", "objectSet")
if nodetype == "spaceLocator":
super(MayaPlaceholderScriptPlugin, self).create_placeholder(
placeholder_data
)
elif nodetype == "objectSet":
placeholder_data["plugin_identifier"] = self.identifier
# Create maya objectSet on selection
selection = cmds.ls(selection=True, long=True)
name = self._create_placeholder_name(placeholder_data)
node = cmds.sets(selection, name=name)
self.imprint(node, placeholder_data)
def prepare_placeholders(self, placeholders):
super(MayaPlaceholderScriptPlugin, self).prepare_placeholders(
placeholders
)
for placeholder in placeholders:
prepare_script = placeholder.data.get("prepare_script")
if not prepare_script:
continue
self.run_script(placeholder, prepare_script)
def populate_placeholder(self, placeholder):
populate_script = placeholder.data.get("populate_script")
depth_script = placeholder.data.get("depth_processed_script")
finished_script = placeholder.data.get("finished_script")
# Run now
if populate_script:
self.run_script(placeholder, populate_script)
if not any([depth_script, finished_script]):
# No callback scripts to run
if not placeholder.data.get("keep_placeholder", True):
self.delete_placeholder(placeholder)
return
# Run at each depth processed
if depth_script:
callback = weakref_partial(
self.run_script, placeholder, depth_script)
self.builder.add_on_depth_processed_callback(
callback, order=placeholder.order)
# Run at build finish
if finished_script:
callback = weakref_partial(
self.run_script, placeholder, finished_script)
self.builder.add_on_finished_callback(
callback, order=placeholder.order)
# If placeholder should be deleted, delete it after finish so
# the scripts have access to it up to the last run
if not placeholder.data.get("keep_placeholder", True):
delete_callback = weakref_partial(
self.delete_placeholder, placeholder)
self.builder.add_on_finished_callback(
delete_callback, order=placeholder.order + 1)
def run_script(self, placeholder, script, event=None):
"""Run script
Even though `placeholder` is an unused arguments by exposing it as
an input argument it means it makes it available through
globals()/locals() in the `exec` call, giving the script access
to the placeholder.
For example:
>>> node = placeholder.scene_identifier
In the case the script is running at a callback level (not during
populate) then it has access to the `event` as well, otherwise the
value is None if it runs during `populate_placeholder` directly.
For example adding this as the callback script:
>>> if event is not None:
>>> if event.topic == "on_depth_processed":
>>> print(f"Processed depth: {event.get('depth')}")
>>> elif event.topic == "on_finished":
>>> print("Build finished.")
"""
self.log.debug(f"Running script at event: {event}")
exec(script, locals())

View file

@ -43,7 +43,9 @@ from ayon_core.pipeline import (
from ayon_core.pipeline.context_tools import (
get_current_context_custom_workfile_template
)
from ayon_core.pipeline.colorspace import get_imageio_config
from ayon_core.pipeline.colorspace import (
get_current_context_imageio_config_preset
)
from ayon_core.pipeline.workfile import BuildWorkfile
from . import gizmo_menu
from .constants import ASSIST
@ -1552,10 +1554,7 @@ class WorkfileSettings(object):
imageio_host (dict): host colorspace configurations
'''
config_data = get_imageio_config(
project_name=get_current_project_name(),
host_name="nuke"
)
config_data = get_current_context_imageio_config_preset()
workfile_settings = imageio_host["workfile"]
color_management = workfile_settings["color_management"]

View file

@ -778,6 +778,7 @@ class ExporterReviewMov(ExporterReview):
# deal with now lut defined in viewer lut
self.viewer_lut_raw = klass.viewer_lut_raw
self.write_colorspace = instance.data["colorspace"]
self.color_channels = instance.data["color_channels"]
self.name = name or "baked"
self.ext = ext or "mov"
@ -834,7 +835,7 @@ class ExporterReviewMov(ExporterReview):
self.log.info("Nodes exported...")
return path
def generate_mov(self, farm=False, **kwargs):
def generate_mov(self, farm=False, delete=True, **kwargs):
# colorspace data
colorspace = None
# get colorspace settings
@ -947,6 +948,8 @@ class ExporterReviewMov(ExporterReview):
self.log.debug("Path: {}".format(self.path))
write_node["file"].setValue(str(self.path))
write_node["file_type"].setValue(str(self.ext))
write_node["channels"].setValue(str(self.color_channels))
# Knobs `meta_codec` and `mov64_codec` are not available on centos.
# TODO shouldn't this come from settings on outputs?
try:
@ -987,8 +990,13 @@ class ExporterReviewMov(ExporterReview):
self.render(write_node.name())
# ---------- generate representation data
tags = ["review", "need_thumbnail"]
if delete:
tags.append("delete")
self.get_representation_data(
tags=["review", "need_thumbnail", "delete"] + add_tags,
tags=tags + add_tags,
custom_tags=add_custom_tags,
range=True,
colorspace=colorspace

View file

@ -62,7 +62,7 @@ class LoadBackdropNodes(load.LoaderPlugin):
}
# add attributes from the version to imprint to metadata knob
for k in ["source", "author", "fps"]:
for k in ["source", "fps"]:
data_imprint[k] = version_attributes[k]
# getting file path
@ -206,7 +206,7 @@ class LoadBackdropNodes(load.LoaderPlugin):
"colorspaceInput": colorspace,
}
for k in ["source", "author", "fps"]:
for k in ["source", "fps"]:
data_imprint[k] = version_attributes[k]
# adding nodes to node graph

View file

@ -48,7 +48,7 @@ class AlembicCameraLoader(load.LoaderPlugin):
"frameEnd": last,
"version": version_entity["version"],
}
for k in ["source", "author", "fps"]:
for k in ["source", "fps"]:
data_imprint[k] = version_attributes[k]
# getting file path
@ -123,7 +123,7 @@ class AlembicCameraLoader(load.LoaderPlugin):
}
# add attributes from the version to imprint to metadata knob
for k in ["source", "author", "fps"]:
for k in ["source", "fps"]:
data_imprint[k] = version_attributes[k]
# getting file path

View file

@ -9,7 +9,8 @@ from ayon_core.pipeline import (
get_representation_path,
)
from ayon_core.pipeline.colorspace import (
get_imageio_file_rules_colorspace_from_filepath
get_imageio_file_rules_colorspace_from_filepath,
get_current_context_imageio_config_preset,
)
from ayon_core.hosts.nuke.api.lib import (
get_imageio_input_colorspace,
@ -197,7 +198,6 @@ class LoadClip(plugin.NukeLoader):
"frameStart",
"frameEnd",
"source",
"author",
"fps",
"handleStart",
"handleEnd",
@ -347,8 +347,7 @@ class LoadClip(plugin.NukeLoader):
"source": version_attributes.get("source"),
"handleStart": str(self.handle_start),
"handleEnd": str(self.handle_end),
"fps": str(version_attributes.get("fps")),
"author": version_attributes.get("author")
"fps": str(version_attributes.get("fps"))
}
last_version_entity = ayon_api.get_last_version_by_product_id(
@ -547,9 +546,10 @@ class LoadClip(plugin.NukeLoader):
f"Colorspace from representation colorspaceData: {colorspace}"
)
config_data = get_current_context_imageio_config_preset()
# check if any filerules are not applicable
new_parsed_colorspace = get_imageio_file_rules_colorspace_from_filepath( # noqa
filepath, "nuke", project_name
filepath, "nuke", project_name, config_data=config_data
)
self.log.debug(f"Colorspace new filerules: {new_parsed_colorspace}")

View file

@ -69,7 +69,6 @@ class LoadEffects(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
"author",
"fps"
]:
data_imprint[k] = version_attributes[k]
@ -189,7 +188,6 @@ class LoadEffects(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
"author",
"fps",
]:
data_imprint[k] = version_attributes[k]

View file

@ -69,7 +69,6 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
"author",
"fps"
]:
data_imprint[k] = version_attributes[k]
@ -192,7 +191,6 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
"author",
"fps"
]:
data_imprint[k] = version_attributes[k]

View file

@ -71,7 +71,6 @@ class LoadGizmo(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
"author",
"fps"
]:
data_imprint[k] = version_attributes[k]
@ -139,7 +138,6 @@ class LoadGizmo(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
"author",
"fps"
]:
data_imprint[k] = version_attributes[k]

View file

@ -73,7 +73,6 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
"author",
"fps"
]:
data_imprint[k] = version_attributes[k]
@ -145,7 +144,6 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
"author",
"fps"
]:
data_imprint[k] = version_attributes[k]

View file

@ -133,7 +133,7 @@ class LoadImage(load.LoaderPlugin):
"version": version_entity["version"],
"colorspace": colorspace,
}
for k in ["source", "author", "fps"]:
for k in ["source", "fps"]:
data_imprint[k] = version_attributes.get(k, str(None))
r["tile_color"].setValue(int("0x4ecd25ff", 16))
@ -207,7 +207,6 @@ class LoadImage(load.LoaderPlugin):
"colorspace": version_attributes.get("colorSpace"),
"source": version_attributes.get("source"),
"fps": str(version_attributes.get("fps")),
"author": version_attributes.get("author")
}
# change color of node

View file

@ -47,7 +47,7 @@ class AlembicModelLoader(load.LoaderPlugin):
"version": version_entity["version"]
}
# add attributes from the version to imprint to metadata knob
for k in ["source", "author", "fps"]:
for k in ["source", "fps"]:
data_imprint[k] = version_attributes[k]
# getting file path
@ -130,7 +130,7 @@ class AlembicModelLoader(load.LoaderPlugin):
}
# add additional metadata from the version to imprint to Avalon knob
for k in ["source", "author", "fps"]:
for k in ["source", "fps"]:
data_imprint[k] = version_attributes[k]
# getting file path

View file

@ -55,7 +55,6 @@ class LinkAsGroup(load.LoaderPlugin):
"handleStart",
"handleEnd",
"source",
"author",
"fps"
]:
data_imprint[k] = version_attributes[k]
@ -131,7 +130,6 @@ class LinkAsGroup(load.LoaderPlugin):
"colorspace": version_attributes.get("colorSpace"),
"source": version_attributes.get("source"),
"fps": version_attributes.get("fps"),
"author": version_attributes.get("author")
}
# Update the imprinted representation

View file

@ -153,6 +153,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin,
# Determine defined file type
ext = write_node["file_type"].value()
# determine defined channel type
color_channels = write_node["channels"].value()
# get frame range data
handle_start = instance.context.data["handleStart"]
handle_end = instance.context.data["handleEnd"]
@ -172,7 +175,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin,
"path": write_file_path,
"outputDir": output_dir,
"ext": ext,
"colorspace": colorspace
"colorspace": colorspace,
"color_channels": color_channels
})
if product_type == "render":

View file

@ -136,11 +136,16 @@ class ExtractReviewIntermediates(publish.Extractor):
self, instance, o_name, o_data["extension"],
multiple_presets)
o_data["add_custom_tags"].append("intermediate")
delete = not o_data.get("publish", False)
if instance.data.get("farm"):
if "review" in instance.data["families"]:
instance.data["families"].remove("review")
data = exporter.generate_mov(farm=True, **o_data)
data = exporter.generate_mov(
farm=True, delete=delete, **o_data
)
self.log.debug(
"_ data: {}".format(data))
@ -154,7 +159,7 @@ class ExtractReviewIntermediates(publish.Extractor):
"bakeWriteNodeName": data.get("bakeWriteNodeName")
})
else:
data = exporter.generate_mov(**o_data)
data = exporter.generate_mov(delete=delete, **o_data)
# add representation generated by exporter
generated_repres.extend(data["representations"])

View file

@ -35,8 +35,12 @@ class ImageCreator(Creator):
create_empty_group = False
stub = api.stub() # only after PS is up
top_level_selected_items = stub.get_selected_layers()
if pre_create_data.get("use_selection"):
try:
top_level_selected_items = stub.get_selected_layers()
except ValueError:
raise CreatorError("Cannot group locked Background layer!")
only_single_item_selected = len(top_level_selected_items) == 1
if (
only_single_item_selected or
@ -50,11 +54,12 @@ class ImageCreator(Creator):
group = stub.group_selected_layers(product_name_from_ui)
groups_to_create.append(group)
else:
stub.select_layers(stub.get_layers())
try:
stub.select_layers(stub.get_layers())
group = stub.group_selected_layers(product_name_from_ui)
except:
except ValueError:
raise CreatorError("Cannot group locked Background layer!")
groups_to_create.append(group)
# create empty group if nothing selected

View file

@ -1,5 +1,3 @@
import os
import pyblish.api
import pyblish.util

View file

@ -156,14 +156,9 @@ This creator publishes color space look file (LUT).
]
def apply_settings(self, project_settings):
host = self.create_context.host
host_name = host.name
project_name = host.get_current_project_name()
config_data = colorspace.get_imageio_config(
project_name, host_name,
config_data = colorspace.get_current_context_imageio_config_preset(
project_settings=project_settings
)
if not config_data:
self.enabled = False
return

View file

@ -0,0 +1,96 @@
from pathlib import Path
from ayon_core.pipeline import (
CreatedInstance,
)
from ayon_core.lib.attribute_definitions import (
FileDef,
BoolDef,
TextDef,
)
from ayon_core.hosts.traypublisher.api.plugin import TrayPublishCreator
class EditorialPackageCreator(TrayPublishCreator):
"""Creates instance for OTIO file from published folder.
Folder contains OTIO file and exported .mov files. Process should publish
whole folder as single `editorial_pckg` product type and (possibly) convert
.mov files into different format and copy them into `publish` `resources`
subfolder.
"""
identifier = "editorial_pckg"
label = "Editorial package"
product_type = "editorial_pckg"
description = "Publish folder with OTIO file and resources"
# Position batch creator after simple creators
order = 120
conversion_enabled = False
def apply_settings(self, project_settings):
self.conversion_enabled = (
project_settings["traypublisher"]
["publish"]
["ExtractEditorialPckgConversion"]
["conversion_enabled"]
)
def get_icon(self):
return "fa.folder"
def create(self, product_name, instance_data, pre_create_data):
folder_path = pre_create_data.get("folder_path")
if not folder_path:
return
instance_data["creator_attributes"] = {
"folder_path": (Path(folder_path["directory"]) /
Path(folder_path["filenames"][0])).as_posix(),
"conversion_enabled": pre_create_data["conversion_enabled"]
}
# Create new instance
new_instance = CreatedInstance(self.product_type, product_name,
instance_data, self)
self._store_new_instance(new_instance)
def get_pre_create_attr_defs(self):
# Use same attributes as for instance attributes
return [
FileDef(
"folder_path",
folders=True,
single_item=True,
extensions=[],
allow_sequences=False,
label="Folder path"
),
BoolDef("conversion_enabled",
tooltip="Convert to output defined in Settings.",
default=self.conversion_enabled,
label="Convert resources"),
]
def get_instance_attr_defs(self):
return [
TextDef(
"folder_path",
label="Folder path",
disabled=True
),
BoolDef("conversion_enabled",
tooltip="Convert to output defined in Settings.",
label="Convert resources"),
]
def get_detail_description(self):
return """# Publish folder with OTIO file and video clips
Folder contains OTIO file and exported .mov files. Process should
publish whole folder as single `editorial_pckg` product type and
(possibly) convert .mov files into different format and copy them into
`publish` `resources` subfolder.
"""

View file

@ -0,0 +1,58 @@
"""Produces instance.data["editorial_pckg"] data used during integration.
Requires:
instance.data["creator_attributes"]["path"] - from creator
Provides:
instance -> editorial_pckg (dict):
folder_path (str)
otio_path (str) - from dragged folder
resource_paths (list)
"""
import os
import pyblish.api
from ayon_core.lib.transcoding import VIDEO_EXTENSIONS
class CollectEditorialPackage(pyblish.api.InstancePlugin):
"""Collects path to OTIO file and resources"""
label = "Collect Editorial Package"
order = pyblish.api.CollectorOrder - 0.1
hosts = ["traypublisher"]
families = ["editorial_pckg"]
def process(self, instance):
folder_path = instance.data["creator_attributes"]["folder_path"]
if not folder_path or not os.path.exists(folder_path):
self.log.info((
"Instance doesn't contain collected existing folder path."
))
return
instance.data["editorial_pckg"] = {}
instance.data["editorial_pckg"]["folder_path"] = folder_path
otio_path, resource_paths = (
self._get_otio_and_resource_paths(folder_path))
instance.data["editorial_pckg"]["otio_path"] = otio_path
instance.data["editorial_pckg"]["resource_paths"] = resource_paths
def _get_otio_and_resource_paths(self, folder_path):
otio_path = None
resource_paths = []
file_names = os.listdir(folder_path)
for filename in file_names:
_, ext = os.path.splitext(filename)
file_path = os.path.join(folder_path, filename)
if ext == ".otio":
otio_path = file_path
elif ext in VIDEO_EXTENSIONS:
resource_paths.append(file_path)
return otio_path, resource_paths

View file

@ -1,10 +1,7 @@
import pyblish.api
from ayon_core.pipeline import (
publish,
registered_host
)
from ayon_core.lib import EnumDef
from ayon_core.pipeline import colorspace
from ayon_core.pipeline import publish
from ayon_core.pipeline.publish import KnownPublishError
@ -19,9 +16,10 @@ class CollectColorspace(pyblish.api.InstancePlugin,
families = ["render", "plate", "reference", "image", "online"]
enabled = False
colorspace_items = [
default_colorspace_items = [
(None, "Don't override")
]
colorspace_items = list(default_colorspace_items)
colorspace_attr_show = False
config_items = None
@ -69,14 +67,13 @@ class CollectColorspace(pyblish.api.InstancePlugin,
@classmethod
def apply_settings(cls, project_settings):
host = registered_host()
host_name = host.name
project_name = host.get_current_project_name()
config_data = colorspace.get_imageio_config(
project_name, host_name,
config_data = colorspace.get_current_context_imageio_config_preset(
project_settings=project_settings
)
enabled = False
colorspace_items = list(cls.default_colorspace_items)
config_items = None
if config_data:
filepath = config_data["path"]
config_items = colorspace.get_ocio_config_colorspaces(filepath)
@ -85,9 +82,11 @@ class CollectColorspace(pyblish.api.InstancePlugin,
include_aliases=True,
include_roles=True
)
cls.config_items = config_items
cls.colorspace_items.extend(labeled_colorspaces)
cls.enabled = True
colorspace_items.extend(labeled_colorspaces)
cls.config_items = config_items
cls.colorspace_items = colorspace_items
cls.enabled = enabled
@classmethod
def get_attribute_defs(cls):

View file

@ -10,9 +10,13 @@ class CollectFrameDataFromAssetEntity(pyblish.api.InstancePlugin):
order = pyblish.api.CollectorOrder + 0.491
label = "Collect Missing Frame Data From Folder"
families = ["plate", "pointcache",
"vdbcache", "online",
"render"]
families = [
"plate",
"pointcache",
"vdbcache",
"online",
"render",
]
hosts = ["traypublisher"]
def process(self, instance):
@ -22,16 +26,26 @@ class CollectFrameDataFromAssetEntity(pyblish.api.InstancePlugin):
"frameStart",
"frameEnd",
"handleStart",
"handleEnd"
"handleEnd",
):
if key not in instance.data:
missing_keys.append(key)
# Skip the logic if all keys are already collected.
# NOTE: In editorial is not 'folderEntity' filled, so it would crash
# even if we don't need it.
if not missing_keys:
return
keys_set = []
folder_attributes = instance.data["folderEntity"]["attrib"]
for key in missing_keys:
if key in folder_attributes:
instance.data[key] = folder_attributes[key]
keys_set.append(key)
if keys_set:
self.log.debug(f"Frame range data {keys_set} "
"has been collected from folder entity.")
self.log.debug(
f"Frame range data {keys_set} "
"has been collected from folder entity."
)

View file

@ -0,0 +1,232 @@
import copy
import os.path
import subprocess
import opentimelineio
import pyblish.api
from ayon_core.lib import get_ffmpeg_tool_args, run_subprocess
from ayon_core.pipeline import publish
class ExtractEditorialPckgConversion(publish.Extractor):
"""Replaces movie paths in otio file with publish rootless
Prepares movie resources for integration (adds them to `transfers`).
Converts .mov files according to output definition.
"""
label = "Extract Editorial Package"
order = pyblish.api.ExtractorOrder - 0.45
hosts = ["traypublisher"]
families = ["editorial_pckg"]
def process(self, instance):
editorial_pckg_data = instance.data.get("editorial_pckg")
otio_path = editorial_pckg_data["otio_path"]
otio_basename = os.path.basename(otio_path)
staging_dir = self.staging_dir(instance)
editorial_pckg_repre = {
'name': "editorial_pckg",
'ext': "otio",
'files': otio_basename,
"stagingDir": staging_dir,
}
otio_staging_path = os.path.join(staging_dir, otio_basename)
instance.data["representations"].append(editorial_pckg_repre)
publish_resource_folder = self._get_publish_resource_folder(instance)
resource_paths = editorial_pckg_data["resource_paths"]
transfers = self._get_transfers(resource_paths,
publish_resource_folder)
project_settings = instance.context.data["project_settings"]
output_def = (project_settings["traypublisher"]
["publish"]
["ExtractEditorialPckgConversion"]
["output"])
conversion_enabled = (instance.data["creator_attributes"]
["conversion_enabled"])
if conversion_enabled and output_def["ext"]:
transfers = self._convert_resources(output_def, transfers)
instance.data["transfers"] = transfers
source_to_rootless = self._get_resource_path_mapping(instance,
transfers)
otio_data = editorial_pckg_data["otio_data"]
otio_data = self._replace_target_urls(otio_data, source_to_rootless)
opentimelineio.adapters.write_to_file(otio_data, otio_staging_path)
self.log.info("Added Editorial Package representation: {}".format(
editorial_pckg_repre))
def _get_publish_resource_folder(self, instance):
"""Calculates publish folder and create it."""
publish_path = self._get_published_path(instance)
publish_folder = os.path.dirname(publish_path)
publish_resource_folder = os.path.join(publish_folder, "resources")
if not os.path.exists(publish_resource_folder):
os.makedirs(publish_resource_folder, exist_ok=True)
return publish_resource_folder
def _get_resource_path_mapping(self, instance, transfers):
"""Returns dict of {source_mov_path: rootless_published_path}."""
replace_paths = {}
anatomy = instance.context.data["anatomy"]
for source, destination in transfers:
rootless_path = self._get_rootless(anatomy, destination)
source_file_name = os.path.basename(source)
replace_paths[source_file_name] = rootless_path
return replace_paths
def _get_transfers(self, resource_paths, publish_resource_folder):
"""Returns list of tuples (source, destination) with movie paths."""
transfers = []
for res_path in resource_paths:
res_basename = os.path.basename(res_path)
pub_res_path = os.path.join(publish_resource_folder, res_basename)
transfers.append((res_path, pub_res_path))
return transfers
def _replace_target_urls(self, otio_data, replace_paths):
"""Replace original movie paths with published rootless ones."""
for track in otio_data.tracks:
for clip in track:
# Check if the clip has a media reference
if clip.media_reference is not None:
# Access the target_url from the media reference
target_url = clip.media_reference.target_url
if not target_url:
continue
file_name = os.path.basename(target_url)
replace_path = replace_paths.get(file_name)
if replace_path:
clip.media_reference.target_url = replace_path
if clip.name == file_name:
clip.name = os.path.basename(replace_path)
return otio_data
def _get_rootless(self, anatomy, path):
"""Try to find rootless {root[work]} path from `path`"""
success, rootless_path = anatomy.find_root_template_from_path(
path)
if not success:
# `rootless_path` is not set to `output_dir` if none of roots match
self.log.warning(
f"Could not find root path for remapping '{path}'."
)
rootless_path = path
return rootless_path
def _get_published_path(self, instance):
"""Calculates expected `publish` folder"""
# determine published path from Anatomy.
template_data = instance.data.get("anatomyData")
rep = instance.data["representations"][0]
template_data["representation"] = rep.get("name")
template_data["ext"] = rep.get("ext")
template_data["comment"] = None
anatomy = instance.context.data["anatomy"]
template_data["root"] = anatomy.roots
template = anatomy.get_template_item("publish", "default", "path")
template_filled = template.format_strict(template_data)
return os.path.normpath(template_filled)
def _convert_resources(self, output_def, transfers):
"""Converts all resource files to configured format."""
out_extension = output_def["ext"]
if not out_extension:
self.log.warning("No output extension configured in "
"ayon+settings://traypublisher/publish/ExtractEditorialPckgConversion") # noqa
return transfers
final_transfers = []
out_def_ffmpeg_args = output_def["ffmpeg_args"]
ffmpeg_input_args = [
value.strip()
for value in out_def_ffmpeg_args["input"]
if value.strip()
]
ffmpeg_video_filters = [
value.strip()
for value in out_def_ffmpeg_args["video_filters"]
if value.strip()
]
ffmpeg_audio_filters = [
value.strip()
for value in out_def_ffmpeg_args["audio_filters"]
if value.strip()
]
ffmpeg_output_args = [
value.strip()
for value in out_def_ffmpeg_args["output"]
if value.strip()
]
ffmpeg_input_args = self._split_ffmpeg_args(ffmpeg_input_args)
generic_args = [
subprocess.list2cmdline(get_ffmpeg_tool_args("ffmpeg"))
]
generic_args.extend(ffmpeg_input_args)
if ffmpeg_video_filters:
generic_args.append("-filter:v")
generic_args.append(
"\"{}\"".format(",".join(ffmpeg_video_filters)))
if ffmpeg_audio_filters:
generic_args.append("-filter:a")
generic_args.append(
"\"{}\"".format(",".join(ffmpeg_audio_filters)))
for source, destination in transfers:
base_name = os.path.basename(destination)
file_name, ext = os.path.splitext(base_name)
dest_path = os.path.join(os.path.dirname(destination),
f"{file_name}.{out_extension}")
final_transfers.append((source, dest_path))
all_args = copy.deepcopy(generic_args)
all_args.append(f"-i \"{source}\"")
all_args.extend(ffmpeg_output_args) # order matters
all_args.append(f"\"{dest_path}\"")
subprcs_cmd = " ".join(all_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
run_subprocess(subprcs_cmd, shell=True, logger=self.log)
return final_transfers
def _split_ffmpeg_args(self, in_args):
"""Makes sure all entered arguments are separated in individual items.
Split each argument string with " -" to identify if string contains
one or more arguments.
"""
splitted_args = []
for arg in in_args:
sub_args = arg.split(" -")
if len(sub_args) == 1:
if arg and arg not in splitted_args:
splitted_args.append(arg)
continue
for idx, arg in enumerate(sub_args):
if idx != 0:
arg = "-" + arg
if arg and arg not in splitted_args:
splitted_args.append(arg)
return splitted_args

View file

@ -0,0 +1,68 @@
import os
import opentimelineio
import pyblish.api
from ayon_core.pipeline import PublishValidationError
class ValidateEditorialPackage(pyblish.api.InstancePlugin):
"""Checks that published folder contains all resources from otio
Currently checks only by file names and expects flat structure.
It ignores path to resources in otio file as folder might be dragged in and
published from different location than it was created.
"""
label = "Validate Editorial Package"
order = pyblish.api.ValidatorOrder - 0.49
hosts = ["traypublisher"]
families = ["editorial_pckg"]
def process(self, instance):
editorial_pckg_data = instance.data.get("editorial_pckg")
if not editorial_pckg_data:
raise PublishValidationError("Editorial package not collected")
folder_path = editorial_pckg_data["folder_path"]
otio_path = editorial_pckg_data["otio_path"]
if not otio_path:
raise PublishValidationError(
f"Folder {folder_path} missing otio file")
resource_paths = editorial_pckg_data["resource_paths"]
resource_file_names = {os.path.basename(path)
for path in resource_paths}
otio_data = opentimelineio.adapters.read_from_file(otio_path)
target_urls = self._get_all_target_urls(otio_data)
missing_files = set()
for target_url in target_urls:
target_basename = os.path.basename(target_url)
if target_basename not in resource_file_names:
missing_files.add(target_basename)
if missing_files:
raise PublishValidationError(
f"Otio file contains missing files `{missing_files}`.\n\n"
f"Please add them to `{folder_path}` and republish.")
instance.data["editorial_pckg"]["otio_data"] = otio_data
def _get_all_target_urls(self, otio_data):
target_urls = []
# Iterate through tracks, clips, or other elements
for track in otio_data.tracks:
for clip in track:
# Check if the clip has a media reference
if clip.media_reference is not None:
# Access the target_url from the media reference
target_url = clip.media_reference.target_url
if target_url:
target_urls.append(target_url)
return target_urls

View file

@ -80,17 +80,21 @@ def get_engine_versions(env=None):
def get_editor_exe_path(engine_path: Path, engine_version: str) -> Path:
"""Get UE Editor executable path."""
ue_path = engine_path / "Engine/Binaries"
ue_name = "UnrealEditor"
# handle older versions of Unreal Engine
if engine_version.split(".")[0] == "4":
ue_name = "UE4Editor"
if platform.system().lower() == "windows":
if engine_version.split(".")[0] == "4":
ue_path /= "Win64/UE4Editor.exe"
elif engine_version.split(".")[0] == "5":
ue_path /= "Win64/UnrealEditor.exe"
ue_path /= f"Win64/{ue_name}.exe"
elif platform.system().lower() == "linux":
ue_path /= "Linux/UE4Editor"
ue_path /= f"Linux/{ue_name}"
elif platform.system().lower() == "darwin":
ue_path /= "Mac/UE4Editor"
ue_path /= f"Mac/{ue_name}"
return ue_path

View file

@ -260,11 +260,11 @@ class UEProjectGenerationWorker(UEWorker):
self.failed.emit(msg, return_code)
raise RuntimeError(msg)
# ensure we have PySide2 installed in engine
# ensure we have PySide2/6 installed in engine
self.progress.emit(0)
self.stage_begin.emit(
(f"Checking PySide2 installation... {stage_count} "
(f"Checking Qt bindings installation... {stage_count} "
f" out of {stage_count}"))
python_path = None
if platform.system().lower() == "windows":
@ -287,11 +287,30 @@ class UEProjectGenerationWorker(UEWorker):
msg = f"Unreal Python not found at {python_path}"
self.failed.emit(msg, 1)
raise RuntimeError(msg)
pyside_cmd = [python_path.as_posix(),
"-m",
"pip",
"install",
"pyside2"]
pyside_version = "PySide2"
ue_version = self.ue_version.split(".")
if int(ue_version[0]) == 5 and int(ue_version[1]) >= 4:
# Use PySide6 6.6.3 because 6.7.0 had a bug
# - 'QPushButton' can't be added to 'QBoxLayout'
pyside_version = "PySide6==6.6.3"
site_packages_prefix = python_path.parent.as_posix()
pyside_cmd = [
python_path.as_posix(),
"-m", "pip",
"install",
"--ignore-installed",
pyside_version,
]
if platform.system().lower() == "windows":
pyside_cmd += ["--target", site_packages_prefix]
print(f"--- Installing {pyside_version} ...")
print(" ".join(pyside_cmd))
pyside_install = subprocess.Popen(pyside_cmd,
stdout=subprocess.PIPE,
@ -306,8 +325,8 @@ class UEProjectGenerationWorker(UEWorker):
return_code = pyside_install.wait()
if return_code and return_code != 0:
msg = ("Failed to create the project! "
"The installation of PySide2 has failed!")
msg = (f"Failed to create the project! {return_code} "
f"The installation of {pyside_version} has failed!: {pyside_install}")
self.failed.emit(msg, return_code)
raise RuntimeError(msg)

View file

@ -139,6 +139,7 @@ from .path_tools import (
)
from .ayon_info import (
is_in_ayon_launcher_process,
is_running_from_build,
is_using_ayon_console,
is_staging_enabled,
@ -248,6 +249,7 @@ __all__ = [
"Logger",
"is_in_ayon_launcher_process",
"is_running_from_build",
"is_using_ayon_console",
"is_staging_enabled",

View file

@ -1,4 +1,5 @@
import os
import sys
import json
import datetime
import platform
@ -25,6 +26,18 @@ def get_ayon_launcher_version():
return content["__version__"]
def is_in_ayon_launcher_process():
"""Determine if current process is running from AYON launcher.
Returns:
bool: True if running from AYON launcher.
"""
ayon_executable_path = os.path.normpath(os.environ["AYON_EXECUTABLE"])
executable_path = os.path.normpath(sys.executable)
return ayon_executable_path == executable_path
def is_running_from_build():
"""Determine if current process is running from build or code.

View file

@ -1,6 +1,8 @@
from .deadline_module import DeadlineModule
from .version import __version__
__all__ = (
"DeadlineModule",
"__version__"
)

View file

@ -29,15 +29,11 @@ from ayon_core.pipeline.publish.lib import (
JSONDecodeError = getattr(json.decoder, "JSONDecodeError", ValueError)
# TODO both 'requests_post' and 'requests_get' should not set 'verify' based
# on environment variable. This should be done in a more controlled way,
# e.g. each deadline url could have checkbox to enabled/disable
# ssl verification.
def requests_post(*args, **kwargs):
"""Wrap request post method.
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
variable is found. This is useful when Deadline server is
Disabling SSL certificate validation if ``verify`` kwarg is set to False.
This is useful when Deadline server is
running with self-signed certificates and its certificate is not
added to trusted certificates on client machines.
@ -46,9 +42,9 @@ def requests_post(*args, **kwargs):
of defense SSL is providing, and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL",
True) else True # noqa
auth = kwargs.get("auth")
if auth:
kwargs["auth"] = tuple(auth) # explicit cast to tuple
# add 10sec timeout before bailing out
kwargs['timeout'] = 10
return requests.post(*args, **kwargs)
@ -57,8 +53,8 @@ def requests_post(*args, **kwargs):
def requests_get(*args, **kwargs):
"""Wrap request get method.
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
variable is found. This is useful when Deadline server is
Disabling SSL certificate validation if ``verify`` kwarg is set to False.
This is useful when Deadline server is
running with self-signed certificates and its certificate is not
added to trusted certificates on client machines.
@ -67,9 +63,9 @@ def requests_get(*args, **kwargs):
of defense SSL is providing, and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL",
True) else True # noqa
auth = kwargs.get("auth")
if auth:
kwargs["auth"] = tuple(auth)
# add 10sec timeout before bailing out
kwargs['timeout'] = 10
return requests.get(*args, **kwargs)
@ -434,9 +430,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
"""Plugin entry point."""
self._instance = instance
context = instance.context
self._deadline_url = context.data.get("defaultDeadline")
self._deadline_url = instance.data.get(
"deadlineUrl", self._deadline_url)
self._deadline_url = instance.data["deadline"]["url"]
assert self._deadline_url, "Requires Deadline Webservice URL"
@ -460,7 +454,9 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
self.plugin_info = self.get_plugin_info()
self.aux_files = self.get_aux_files()
job_id = self.process_submission()
auth = instance.data["deadline"]["auth"]
verify = instance.data["deadline"]["verify"]
job_id = self.process_submission(auth, verify)
self.log.info("Submitted job to Deadline: {}.".format(job_id))
# TODO: Find a way that's more generic and not render type specific
@ -473,10 +469,10 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
job_info=render_job_info,
plugin_info=render_plugin_info
)
render_job_id = self.submit(payload)
render_job_id = self.submit(payload, auth, verify)
self.log.info("Render job id: %s", render_job_id)
def process_submission(self):
def process_submission(self, auth=None, verify=True):
"""Process data for submission.
This takes Deadline JobInfo, PluginInfo, AuxFile, creates payload
@ -487,7 +483,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
"""
payload = self.assemble_payload()
return self.submit(payload)
return self.submit(payload, auth, verify)
@abstractmethod
def get_job_info(self):
@ -577,7 +573,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
"AuxFiles": aux_files or self.aux_files
}
def submit(self, payload):
def submit(self, payload, auth, verify):
"""Submit payload to Deadline API end-point.
This takes payload in the form of JSON file and POST it to
@ -585,6 +581,8 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
Args:
payload (dict): dict to become json in deadline submission.
auth (tuple): (username, password)
verify (bool): verify SSL certificate if present
Returns:
str: resulting Deadline job id.
@ -594,7 +592,8 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
"""
url = "{}/api/jobs".format(self._deadline_url)
response = requests_post(url, json=payload)
response = requests_post(
url, json=payload, auth=auth, verify=verify)
if not response.ok:
self.log.error("Submission failed!")
self.log.error(response.status_code)

View file

@ -19,23 +19,23 @@ class DeadlineModule(AYONAddon, IPluginPaths):
def initialize(self, studio_settings):
# This module is always enabled
deadline_urls = {}
deadline_servers_info = {}
enabled = self.name in studio_settings
if enabled:
deadline_settings = studio_settings[self.name]
deadline_urls = {
url_item["name"]: url_item["value"]
deadline_servers_info = {
url_item["name"]: url_item
for url_item in deadline_settings["deadline_urls"]
}
if enabled and not deadline_urls:
if enabled and not deadline_servers_info:
enabled = False
self.log.warning((
"Deadline Webservice URLs are not specified. Disabling addon."
))
self.enabled = enabled
self.deadline_urls = deadline_urls
self.deadline_servers_info = deadline_servers_info
def get_plugin_paths(self):
"""Deadline plugin paths."""
@ -45,13 +45,15 @@ class DeadlineModule(AYONAddon, IPluginPaths):
}
@staticmethod
def get_deadline_pools(webservice, log=None):
def get_deadline_pools(webservice, auth=None, log=None):
"""Get pools from Deadline.
Args:
webservice (str): Server url.
log (Logger)
auth (Optional[Tuple[str, str]]): Tuple containing username,
password
log (Optional[Logger]): Logger to log errors to, if provided.
Returns:
list: Pools.
List[str]: Pools.
Throws:
RuntimeError: If deadline webservice is unreachable.
@ -63,7 +65,10 @@ class DeadlineModule(AYONAddon, IPluginPaths):
argument = "{}/api/pools?NamesOnly=true".format(webservice)
try:
response = requests_get(argument)
kwargs = {}
if auth:
kwargs["auth"] = auth
response = requests_get(argument, **kwargs)
except requests.exceptions.ConnectionError as exc:
msg = 'Cannot connect to DL web service {}'.format(webservice)
log.error(msg)

View file

@ -13,17 +13,45 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
"""Collect Deadline Webservice URL from instance."""
# Run before collect_render.
order = pyblish.api.CollectorOrder + 0.005
order = pyblish.api.CollectorOrder + 0.225
label = "Deadline Webservice from the Instance"
families = ["rendering", "renderlayer"]
hosts = ["maya"]
targets = ["local"]
families = ["render",
"rendering",
"render.farm",
"renderFarm",
"renderlayer",
"maxrender",
"usdrender",
"redshift_rop",
"arnold_rop",
"mantra_rop",
"karma_rop",
"vray_rop",
"publish.hou",
"image"] # for Fusion
def process(self, instance):
instance.data["deadlineUrl"] = self._collect_deadline_url(instance)
instance.data["deadlineUrl"] = \
instance.data["deadlineUrl"].strip().rstrip("/")
if not instance.data.get("farm"):
self.log.debug("Should not be processed on farm, skipping.")
return
if not instance.data.get("deadline"):
instance.data["deadline"] = {}
# todo: separate logic should be removed, all hosts should have same
host_name = instance.context.data["hostName"]
if host_name == "maya":
deadline_url = self._collect_deadline_url(instance)
else:
deadline_url = (instance.data.get("deadlineUrl") or # backwards
instance.data.get("deadline", {}).get("url"))
if deadline_url:
instance.data["deadline"]["url"] = deadline_url.strip().rstrip("/")
else:
instance.data["deadline"]["url"] = instance.context.data["deadline"]["defaultUrl"] # noqa
self.log.debug(
"Using {} for submission.".format(instance.data["deadlineUrl"]))
"Using {} for submission".format(instance.data["deadline"]["url"]))
def _collect_deadline_url(self, render_instance):
# type: (pyblish.api.Instance) -> str
@ -49,13 +77,13 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
["project_settings"]
["deadline"]
)
default_server = render_instance.context.data["defaultDeadline"]
default_server_url = (render_instance.context.data["deadline"]
["defaultUrl"])
# QUESTION How and where is this is set? Should be removed?
instance_server = render_instance.data.get("deadlineServers")
if not instance_server:
self.log.debug("Using default server.")
return default_server
return default_server_url
# Get instance server as sting.
if isinstance(instance_server, int):
@ -66,7 +94,7 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
default_servers = {
url_item["name"]: url_item["value"]
for url_item in deadline_settings["deadline_urls"]
for url_item in deadline_settings["deadline_servers_info"]
}
project_servers = (
render_instance.context.data

View file

@ -18,10 +18,9 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
"""
# Run before collect_deadline_server_instance.
order = pyblish.api.CollectorOrder + 0.0025
order = pyblish.api.CollectorOrder + 0.200
label = "Default Deadline Webservice"
pass_mongo_url = False
targets = ["local"]
def process(self, context):
try:
@ -33,15 +32,17 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
deadline_settings = context.data["project_settings"]["deadline"]
deadline_server_name = deadline_settings["deadline_server"]
deadline_webservice = None
dl_server_info = None
if deadline_server_name:
deadline_webservice = deadline_module.deadline_urls.get(
dl_server_info = deadline_module.deadline_servers_info.get(
deadline_server_name)
default_deadline_webservice = deadline_module.deadline_urls["default"]
deadline_webservice = (
deadline_webservice
or default_deadline_webservice
)
if dl_server_info:
deadline_url = dl_server_info["value"]
else:
default_dl_server_info = deadline_module.deadline_servers_info[0]
deadline_url = default_dl_server_info["value"]
context.data["defaultDeadline"] = deadline_webservice.strip().rstrip("/") # noqa
context.data["deadline"] = {}
context.data["deadline"]["defaultUrl"] = (
deadline_url.strip().rstrip("/"))

View file

@ -0,0 +1,92 @@
# -*- coding: utf-8 -*-
"""Collect user credentials
Requires:
context -> project_settings
instance.data["deadline"]["url"]
Provides:
instance.data["deadline"] -> require_authentication (bool)
instance.data["deadline"] -> auth (tuple (str, str)) -
(username, password) or None
"""
import pyblish.api
from ayon_api import get_server_api_connection
from ayon_core.modules.deadline.deadline_module import DeadlineModule
from ayon_core.modules.deadline import __version__
class CollectDeadlineUserCredentials(pyblish.api.InstancePlugin):
"""Collects user name and password for artist if DL requires authentication
"""
order = pyblish.api.CollectorOrder + 0.250
label = "Collect Deadline User Credentials"
targets = ["local"]
hosts = ["aftereffects",
"blender",
"fusion",
"harmony",
"nuke",
"maya",
"max",
"houdini"]
families = ["render",
"rendering",
"render.farm",
"renderFarm",
"renderlayer",
"maxrender",
"usdrender",
"redshift_rop",
"arnold_rop",
"mantra_rop",
"karma_rop",
"vray_rop",
"publish.hou"]
def process(self, instance):
if not instance.data.get("farm"):
self.log.debug("Should not be processed on farm, skipping.")
return
collected_deadline_url = instance.data["deadline"]["url"]
if not collected_deadline_url:
raise ValueError("Instance doesn't have '[deadline][url]'.")
context_data = instance.context.data
deadline_settings = context_data["project_settings"]["deadline"]
deadline_server_name = None
# deadline url might be set directly from instance, need to find
# metadata for it
for deadline_info in deadline_settings["deadline_urls"]:
dl_settings_url = deadline_info["value"].strip().rstrip("/")
if dl_settings_url == collected_deadline_url:
deadline_server_name = deadline_info["name"]
break
if not deadline_server_name:
raise ValueError(f"Collected {collected_deadline_url} doesn't "
"match any site configured in Studio Settings")
instance.data["deadline"]["require_authentication"] = (
deadline_info["require_authentication"]
)
instance.data["deadline"]["auth"] = None
instance.data["deadline"]["verify"] = (
not deadline_info["not_verify_ssl"])
if not deadline_info["require_authentication"]:
return
# TODO import 'get_addon_site_settings' when available
# in public 'ayon_api'
local_settings = get_server_api_connection().get_addon_site_settings(
DeadlineModule.name, __version__)
local_settings = local_settings["local_settings"]
for server_info in local_settings:
if deadline_server_name == server_info["server_name"]:
instance.data["deadline"]["auth"] = (server_info["username"],
server_info["password"])

View file

@ -0,0 +1,17 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Deadline Authentication</title>
<description>
## Deadline authentication is required
This project has set in Settings that Deadline requires authentication.
### How to repair?
Please go to Ayon Server > Site Settings and provide your Deadline username and password.
In some cases the password may be empty if Deadline is configured to allow that. Ask your administrator.
</description>
</error>
</root>

View file

@ -174,7 +174,9 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
instance.data["toBeRenderedOn"] = "deadline"
payload = self.assemble_payload()
return self.submit(payload)
auth = instance.data["deadline"]["auth"]
verify = instance.data["deadline"]["verify"]
return self.submit(payload, auth=auth, verify=verify)
def from_published_scene(self):
"""

View file

@ -2,9 +2,10 @@ import os
import re
import json
import getpass
import requests
import pyblish.api
from openpype_modules.deadline.abstract_submit_deadline import requests_post
class CelactionSubmitDeadline(pyblish.api.InstancePlugin):
"""Submit CelAction2D scene to Deadline
@ -30,11 +31,7 @@ class CelactionSubmitDeadline(pyblish.api.InstancePlugin):
context = instance.context
# get default deadline webservice url from deadline module
deadline_url = instance.context.data["defaultDeadline"]
# if custom one is set in instance, use that
if instance.data.get("deadlineUrl"):
deadline_url = instance.data.get("deadlineUrl")
deadline_url = instance.data["deadline"]["url"]
assert deadline_url, "Requires Deadline Webservice URL"
self.deadline_url = "{}/api/jobs".format(deadline_url)
@ -196,8 +193,11 @@ class CelactionSubmitDeadline(pyblish.api.InstancePlugin):
self.expected_files(instance, render_path)
self.log.debug("__ expectedFiles: `{}`".format(
instance.data["expectedFiles"]))
response = requests.post(self.deadline_url, json=payload)
auth = instance.data["deadline"]["auth"]
verify = instance.data["deadline"]["verify"]
response = requests_post(self.deadline_url, json=payload,
auth=auth,
verify=verify)
if not response.ok:
self.log.error(

View file

@ -2,17 +2,13 @@ import os
import json
import getpass
import requests
import pyblish.api
from openpype_modules.deadline.abstract_submit_deadline import requests_post
from ayon_core.pipeline.publish import (
AYONPyblishPluginMixin
)
from ayon_core.lib import (
BoolDef,
NumberDef,
)
from ayon_core.lib import NumberDef
class FusionSubmitDeadline(
@ -64,11 +60,6 @@ class FusionSubmitDeadline(
decimals=0,
minimum=1,
maximum=10
),
BoolDef(
"suspend_publish",
default=False,
label="Suspend publish"
)
]
@ -80,10 +71,6 @@ class FusionSubmitDeadline(
attribute_values = self.get_attr_values_from_data(
instance.data)
# add suspend_publish attributeValue to instance data
instance.data["suspend_publish"] = attribute_values[
"suspend_publish"]
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)
@ -94,11 +81,7 @@ class FusionSubmitDeadline(
from ayon_core.hosts.fusion.api.lib import get_frame_path
# get default deadline webservice url from deadline module
deadline_url = instance.context.data["defaultDeadline"]
# if custom one is set in instance, use that
if instance.data.get("deadlineUrl"):
deadline_url = instance.data.get("deadlineUrl")
deadline_url = instance.data["deadline"]["url"]
assert deadline_url, "Requires Deadline Webservice URL"
# Collect all saver instances in context that are to be rendered
@ -258,7 +241,9 @@ class FusionSubmitDeadline(
# E.g. http://192.168.0.1:8082/api/jobs
url = "{}/api/jobs".format(deadline_url)
response = requests.post(url, json=payload)
auth = instance.data["deadline"]["auth"]
verify = instance.data["deadline"]["verify"]
response = requests_post(url, json=payload, auth=auth, verify=verify)
if not response.ok:
raise Exception(response.text)

View file

@ -10,7 +10,6 @@ from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
from ayon_core.lib import (
is_in_tests,
BoolDef,
TextDef,
NumberDef
)
@ -86,15 +85,10 @@ class HoudiniSubmitDeadline(
priority = 50
chunk_size = 1
group = ""
@classmethod
def get_attribute_defs(cls):
return [
BoolDef(
"suspend_publish",
default=False,
label="Suspend publish"
),
NumberDef(
"priority",
label="Priority",
@ -194,7 +188,7 @@ class HoudiniSubmitDeadline(
job_info.Pool = instance.data.get("primaryPool")
job_info.SecondaryPool = instance.data.get("secondaryPool")
if split_render_job and is_export_job:
job_info.Priority = attribute_values.get(
"export_priority", self.export_priority
@ -315,6 +309,11 @@ class HoudiniSubmitDeadline(
return attr.asdict(plugin_info)
def process(self, instance):
if not instance.data["farm"]:
self.log.debug("Render on farm is disabled. "
"Skipping deadline submission.")
return
super(HoudiniSubmitDeadline, self).process(instance)
# TODO: Avoid the need for this logic here, needed for submit publish

View file

@ -181,17 +181,27 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
self.log.debug("Submitting 3dsMax render..")
project_settings = instance.context.data["project_settings"]
auth = instance.data["deadline"]["auth"]
verify = instance.data["deadline"]["verify"]
if instance.data.get("multiCamera"):
self.log.debug("Submitting jobs for multiple cameras..")
payload = self._use_published_name_for_multiples(
payload_data, project_settings)
job_infos, plugin_infos = payload
for job_info, plugin_info in zip(job_infos, plugin_infos):
self.submit(self.assemble_payload(job_info, plugin_info))
self.submit(
self.assemble_payload(job_info, plugin_info),
auth=auth,
verify=verify
)
else:
payload = self._use_published_name(payload_data, project_settings)
job_info, plugin_info = payload
self.submit(self.assemble_payload(job_info, plugin_info))
self.submit(
self.assemble_payload(job_info, plugin_info),
auth=auth,
verify=verify
)
def _use_published_name(self, data, project_settings):
# Not all hosts can import these modules.

Some files were not shown because too many files have changed in this diff Show more