mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' into enhancement/AY-974_Nuke-simplified-deadline-submission-on-write-node
This commit is contained in:
commit
818aaf96ad
90 changed files with 2950 additions and 1270 deletions
|
|
@ -1,7 +1,7 @@
|
|||
from ayon_applications import PreLaunchHook
|
||||
|
||||
from ayon_core.pipeline.colorspace import get_imageio_config
|
||||
from ayon_core.pipeline.template_data import get_template_data_with_names
|
||||
from ayon_core.pipeline.colorspace import get_imageio_config_preset
|
||||
from ayon_core.pipeline.template_data import get_template_data
|
||||
|
||||
|
||||
class OCIOEnvHook(PreLaunchHook):
|
||||
|
|
@ -26,32 +26,38 @@ class OCIOEnvHook(PreLaunchHook):
|
|||
def execute(self):
|
||||
"""Hook entry method."""
|
||||
|
||||
template_data = get_template_data_with_names(
|
||||
project_name=self.data["project_name"],
|
||||
folder_path=self.data["folder_path"],
|
||||
task_name=self.data["task_name"],
|
||||
folder_entity = self.data["folder_entity"]
|
||||
|
||||
template_data = get_template_data(
|
||||
self.data["project_entity"],
|
||||
folder_entity=folder_entity,
|
||||
task_entity=self.data["task_entity"],
|
||||
host_name=self.host_name,
|
||||
settings=self.data["project_settings"]
|
||||
settings=self.data["project_settings"],
|
||||
)
|
||||
|
||||
config_data = get_imageio_config(
|
||||
project_name=self.data["project_name"],
|
||||
host_name=self.host_name,
|
||||
project_settings=self.data["project_settings"],
|
||||
anatomy_data=template_data,
|
||||
config_data = get_imageio_config_preset(
|
||||
self.data["project_name"],
|
||||
self.data["folder_path"],
|
||||
self.data["task_name"],
|
||||
self.host_name,
|
||||
anatomy=self.data["anatomy"],
|
||||
project_settings=self.data["project_settings"],
|
||||
template_data=template_data,
|
||||
env=self.launch_context.env,
|
||||
folder_id=folder_entity["id"],
|
||||
)
|
||||
|
||||
if config_data:
|
||||
ocio_path = config_data["path"]
|
||||
|
||||
if self.host_name in ["nuke", "hiero"]:
|
||||
ocio_path = ocio_path.replace("\\", "/")
|
||||
|
||||
self.log.info(
|
||||
f"Setting OCIO environment to config path: {ocio_path}")
|
||||
|
||||
self.launch_context.env["OCIO"] = ocio_path
|
||||
else:
|
||||
if not config_data:
|
||||
self.log.debug("OCIO not set or enabled")
|
||||
return
|
||||
|
||||
ocio_path = config_data["path"]
|
||||
|
||||
if self.host_name in ["nuke", "hiero"]:
|
||||
ocio_path = ocio_path.replace("\\", "/")
|
||||
|
||||
self.log.info(
|
||||
f"Setting OCIO environment to config path: {ocio_path}")
|
||||
|
||||
self.launch_context.env["OCIO"] = ocio_path
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ def main(*subprocess_args):
|
|||
)
|
||||
)
|
||||
|
||||
elif os.environ.get("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", True):
|
||||
elif os.environ.get("AVALON_AFTEREFFECTS_WORKFILES_ON_LAUNCH", True):
|
||||
save = False
|
||||
if os.getenv("WORKFILES_SAVE_AS"):
|
||||
save = True
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ def load_scripts(paths):
|
|||
if register:
|
||||
try:
|
||||
register()
|
||||
except:
|
||||
except: # noqa E722
|
||||
traceback.print_exc()
|
||||
else:
|
||||
print("\nWarning! '%s' has no register function, "
|
||||
|
|
@ -45,7 +45,7 @@ def load_scripts(paths):
|
|||
if unregister:
|
||||
try:
|
||||
unregister()
|
||||
except:
|
||||
except: # noqa E722
|
||||
traceback.print_exc()
|
||||
|
||||
def test_reload(mod):
|
||||
|
|
@ -57,7 +57,7 @@ def load_scripts(paths):
|
|||
|
||||
try:
|
||||
return importlib.reload(mod)
|
||||
except:
|
||||
except: # noqa E722
|
||||
traceback.print_exc()
|
||||
|
||||
def test_register(mod):
|
||||
|
|
|
|||
|
|
@ -43,7 +43,10 @@ class AbcCameraLoader(plugin.AssetLoader):
|
|||
def _process(self, libpath, asset_group, group_name):
|
||||
plugin.deselect_all()
|
||||
|
||||
bpy.ops.wm.alembic_import(filepath=libpath)
|
||||
# Force the creation of the transform cache even if the camera
|
||||
# doesn't have an animation. We use the cache to update the camera.
|
||||
bpy.ops.wm.alembic_import(
|
||||
filepath=libpath, always_add_cache_reader=True)
|
||||
|
||||
objects = lib.get_selection()
|
||||
|
||||
|
|
@ -178,12 +181,33 @@ class AbcCameraLoader(plugin.AssetLoader):
|
|||
self.log.info("Library already loaded, not updating...")
|
||||
return
|
||||
|
||||
mat = asset_group.matrix_basis.copy()
|
||||
for obj in asset_group.children:
|
||||
found = False
|
||||
for constraint in obj.constraints:
|
||||
if constraint.type == "TRANSFORM_CACHE":
|
||||
constraint.cache_file.filepath = libpath.as_posix()
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
# This is to keep compatibility with cameras loaded with
|
||||
# the old loader
|
||||
# Create a new constraint for the cache file
|
||||
constraint = obj.constraints.new("TRANSFORM_CACHE")
|
||||
bpy.ops.cachefile.open(filepath=libpath.as_posix())
|
||||
constraint.cache_file = bpy.data.cache_files[-1]
|
||||
constraint.cache_file.scale = 1.0
|
||||
|
||||
self._remove(asset_group)
|
||||
self._process(str(libpath), asset_group, object_name)
|
||||
# This is a workaround to set the object path. Blender doesn't
|
||||
# load the list of object paths until the object is evaluated.
|
||||
# This is a hack to force the object to be evaluated.
|
||||
# The modifier doesn't need to be removed because camera
|
||||
# objects don't have modifiers.
|
||||
obj.modifiers.new(
|
||||
name='MeshSequenceCache', type='MESH_SEQUENCE_CACHE')
|
||||
bpy.context.evaluated_depsgraph_get()
|
||||
|
||||
asset_group.matrix_basis = mat
|
||||
constraint.object_path = (
|
||||
constraint.cache_file.object_paths[0].path)
|
||||
|
||||
metadata["libpath"] = str(libpath)
|
||||
metadata["representation"] = repre_entity["id"]
|
||||
|
|
|
|||
|
|
@ -1110,10 +1110,7 @@ def apply_colorspace_project():
|
|||
'''
|
||||
# backward compatibility layer
|
||||
# TODO: remove this after some time
|
||||
config_data = get_imageio_config(
|
||||
project_name=get_current_project_name(),
|
||||
host_name="hiero"
|
||||
)
|
||||
config_data = get_current_context_imageio_config_preset()
|
||||
|
||||
if config_data:
|
||||
presets.update({
|
||||
|
|
|
|||
|
|
@ -13,11 +13,17 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
# Default extension
|
||||
ext = "exr"
|
||||
|
||||
# Default to split export and render jobs
|
||||
export_job = True
|
||||
# Default render target
|
||||
render_target = "farm_split"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
import hou
|
||||
# Transfer settings from pre create to instance
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
for key in ["render_target", "review"]:
|
||||
if key in pre_create_data:
|
||||
creator_attributes[key] = pre_create_data[key]
|
||||
|
||||
# Remove the active, we are checking the bypass flag of the nodes
|
||||
instance_data.pop("active", None)
|
||||
|
|
@ -25,8 +31,6 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
|
||||
# Add chunk size attribute
|
||||
instance_data["chunkSize"] = 1
|
||||
# Submit for job publishing
|
||||
instance_data["farm"] = pre_create_data.get("farm")
|
||||
|
||||
instance = super(CreateArnoldRop, self).create(
|
||||
product_name,
|
||||
|
|
@ -51,7 +55,7 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
"ar_exr_half_precision": 1 # half precision
|
||||
}
|
||||
|
||||
if pre_create_data.get("export_job"):
|
||||
if pre_create_data.get("render_target") == "farm_split":
|
||||
ass_filepath = \
|
||||
"{export_dir}{product_name}/{product_name}.$F4.ass".format(
|
||||
export_dir=hou.text.expandString("$HIP/pyblish/ass/"),
|
||||
|
|
@ -66,23 +70,41 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
to_lock = ["productType", "id"]
|
||||
self.lock_parameters(instance_node, to_lock)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateArnoldRop, self).get_pre_create_attr_defs()
|
||||
def get_instance_attr_defs(self):
|
||||
"""get instance attribute definitions.
|
||||
|
||||
Attributes defined in this method are exposed in
|
||||
publish tab in the publisher UI.
|
||||
"""
|
||||
|
||||
render_target_items = {
|
||||
"local": "Local machine rendering",
|
||||
"local_no_render": "Use existing frames (local)",
|
||||
"farm": "Farm Rendering",
|
||||
"farm_split": "Farm Rendering - Split export & render jobs",
|
||||
}
|
||||
|
||||
return [
|
||||
BoolDef("review",
|
||||
label="Review",
|
||||
tooltip="Mark as reviewable",
|
||||
default=True),
|
||||
EnumDef("render_target",
|
||||
items=render_target_items,
|
||||
label="Render target",
|
||||
default=self.render_target),
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
image_format_enum = [
|
||||
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
||||
"rad", "rat", "rta", "sgi", "tga", "tif",
|
||||
]
|
||||
|
||||
return attrs + [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("export_job",
|
||||
label="Split export and render jobs",
|
||||
default=self.export_job),
|
||||
attrs = [
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default=self.ext,
|
||||
label="Image Format Options")
|
||||
label="Image Format Options"),
|
||||
]
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
|
|
@ -11,15 +11,23 @@ class CreateKarmaROP(plugin.HoudiniCreator):
|
|||
product_type = "karma_rop"
|
||||
icon = "magic"
|
||||
|
||||
# Default render target
|
||||
render_target = "farm"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
import hou # noqa
|
||||
# Transfer settings from pre create to instance
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
|
||||
for key in ["render_target", "review"]:
|
||||
if key in pre_create_data:
|
||||
creator_attributes[key] = pre_create_data[key]
|
||||
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "karma"})
|
||||
# Add chunk size attribute
|
||||
instance_data["chunkSize"] = 10
|
||||
# Submit for job publishing
|
||||
instance_data["farm"] = pre_create_data.get("farm")
|
||||
|
||||
instance = super(CreateKarmaROP, self).create(
|
||||
product_name,
|
||||
|
|
@ -86,18 +94,40 @@ class CreateKarmaROP(plugin.HoudiniCreator):
|
|||
to_lock = ["productType", "id"]
|
||||
self.lock_parameters(instance_node, to_lock)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs()
|
||||
def get_instance_attr_defs(self):
|
||||
"""get instance attribute definitions.
|
||||
|
||||
Attributes defined in this method are exposed in
|
||||
publish tab in the publisher UI.
|
||||
"""
|
||||
|
||||
render_target_items = {
|
||||
"local": "Local machine rendering",
|
||||
"local_no_render": "Use existing frames (local)",
|
||||
"farm": "Farm Rendering",
|
||||
}
|
||||
|
||||
return [
|
||||
BoolDef("review",
|
||||
label="Review",
|
||||
tooltip="Mark as reviewable",
|
||||
default=True),
|
||||
EnumDef("render_target",
|
||||
items=render_target_items,
|
||||
label="Render target",
|
||||
default=self.render_target)
|
||||
]
|
||||
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
image_format_enum = [
|
||||
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
||||
"rad", "rat", "rta", "sgi", "tga", "tif",
|
||||
]
|
||||
|
||||
return attrs + [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs()
|
||||
|
||||
attrs += [
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default="exr",
|
||||
|
|
@ -112,5 +142,6 @@ class CreateKarmaROP(plugin.HoudiniCreator):
|
|||
decimals=0),
|
||||
BoolDef("cam_res",
|
||||
label="Camera Resolution",
|
||||
default=False)
|
||||
default=False),
|
||||
]
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
|
|
@ -11,18 +11,22 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
product_type = "mantra_rop"
|
||||
icon = "magic"
|
||||
|
||||
# Default to split export and render jobs
|
||||
export_job = True
|
||||
# Default render target
|
||||
render_target = "farm_split"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
import hou # noqa
|
||||
# Transfer settings from pre create to instance
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
for key in ["render_target", "review"]:
|
||||
if key in pre_create_data:
|
||||
creator_attributes[key] = pre_create_data[key]
|
||||
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "ifd"})
|
||||
# Add chunk size attribute
|
||||
instance_data["chunkSize"] = 10
|
||||
# Submit for job publishing
|
||||
instance_data["farm"] = pre_create_data.get("farm")
|
||||
|
||||
instance = super(CreateMantraROP, self).create(
|
||||
product_name,
|
||||
|
|
@ -46,7 +50,7 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
"vm_picture": filepath,
|
||||
}
|
||||
|
||||
if pre_create_data.get("export_job"):
|
||||
if pre_create_data.get("render_target") == "farm_split":
|
||||
ifd_filepath = \
|
||||
"{export_dir}{product_name}/{product_name}.$F4.ifd".format(
|
||||
export_dir=hou.text.expandString("$HIP/pyblish/ifd/"),
|
||||
|
|
@ -77,21 +81,40 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
to_lock = ["productType", "id"]
|
||||
self.lock_parameters(instance_node, to_lock)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateMantraROP, self).get_pre_create_attr_defs()
|
||||
def get_instance_attr_defs(self):
|
||||
"""get instance attribute definitions.
|
||||
|
||||
Attributes defined in this method are exposed in
|
||||
publish tab in the publisher UI.
|
||||
"""
|
||||
|
||||
render_target_items = {
|
||||
"local": "Local machine rendering",
|
||||
"local_no_render": "Use existing frames (local)",
|
||||
"farm": "Farm Rendering",
|
||||
"farm_split": "Farm Rendering - Split export & render jobs",
|
||||
}
|
||||
|
||||
return [
|
||||
BoolDef("review",
|
||||
label="Review",
|
||||
tooltip="Mark as reviewable",
|
||||
default=True),
|
||||
EnumDef("render_target",
|
||||
items=render_target_items,
|
||||
label="Render target",
|
||||
default=self.render_target)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
image_format_enum = [
|
||||
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
||||
"rad", "rat", "rta", "sgi", "tga", "tif",
|
||||
]
|
||||
|
||||
return attrs + [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("export_job",
|
||||
label="Split export and render jobs",
|
||||
default=self.export_job),
|
||||
attrs = super(CreateMantraROP, self).get_pre_create_attr_defs()
|
||||
|
||||
attrs += [
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default="exr",
|
||||
|
|
@ -100,5 +123,6 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
label="Override Camera Resolution",
|
||||
tooltip="Override the current camera "
|
||||
"resolution, recommended for IPR.",
|
||||
default=False)
|
||||
default=False),
|
||||
]
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
|
|
@ -17,17 +17,21 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
ext = "exr"
|
||||
multi_layered_mode = "No Multi-Layered EXR File"
|
||||
|
||||
# Default to split export and render jobs
|
||||
split_render = True
|
||||
# Default render target
|
||||
render_target = "farm_split"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
# Transfer settings from pre create to instance
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
for key in ["render_target", "review"]:
|
||||
if key in pre_create_data:
|
||||
creator_attributes[key] = pre_create_data[key]
|
||||
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "Redshift_ROP"})
|
||||
# Add chunk size attribute
|
||||
instance_data["chunkSize"] = 10
|
||||
# Submit for job publishing
|
||||
instance_data["farm"] = pre_create_data.get("farm")
|
||||
|
||||
instance = super(CreateRedshiftROP, self).create(
|
||||
product_name,
|
||||
|
|
@ -99,7 +103,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
rs_filepath = f"{export_dir}{product_name}/{product_name}.$F4.rs"
|
||||
parms["RS_archive_file"] = rs_filepath
|
||||
|
||||
if pre_create_data.get("split_render", self.split_render):
|
||||
if pre_create_data.get("render_target") == "farm_split":
|
||||
parms["RS_archive_enable"] = 1
|
||||
|
||||
instance_node.setParms(parms)
|
||||
|
|
@ -118,24 +122,44 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
|
||||
return super(CreateRedshiftROP, self).remove_instances(instances)
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
"""get instance attribute definitions.
|
||||
|
||||
Attributes defined in this method are exposed in
|
||||
publish tab in the publisher UI.
|
||||
"""
|
||||
|
||||
render_target_items = {
|
||||
"local": "Local machine rendering",
|
||||
"local_no_render": "Use existing frames (local)",
|
||||
"farm": "Farm Rendering",
|
||||
"farm_split": "Farm Rendering - Split export & render jobs",
|
||||
}
|
||||
|
||||
return [
|
||||
BoolDef("review",
|
||||
label="Review",
|
||||
tooltip="Mark as reviewable",
|
||||
default=True),
|
||||
EnumDef("render_target",
|
||||
items=render_target_items,
|
||||
label="Render target",
|
||||
default=self.render_target)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
|
||||
|
||||
image_format_enum = [
|
||||
"exr", "tif", "jpg", "png",
|
||||
]
|
||||
|
||||
multi_layered_mode = [
|
||||
"No Multi-Layered EXR File",
|
||||
"Full Multi-Layered EXR File"
|
||||
]
|
||||
|
||||
|
||||
return attrs + [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("split_render",
|
||||
label="Split export and render jobs",
|
||||
default=self.split_render),
|
||||
attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
|
||||
attrs += [
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default=self.ext,
|
||||
|
|
@ -143,5 +167,6 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
EnumDef("multi_layered_mode",
|
||||
multi_layered_mode,
|
||||
default=self.multi_layered_mode,
|
||||
label="Multi-Layered EXR")
|
||||
label="Multi-Layered EXR"),
|
||||
]
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
|
|
@ -16,17 +16,21 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
icon = "magic"
|
||||
ext = "exr"
|
||||
|
||||
# Default to split export and render jobs
|
||||
export_job = True
|
||||
# Default render target
|
||||
render_target = "farm_split"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
# Transfer settings from pre create to instance
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
for key in ["render_target", "review"]:
|
||||
if key in pre_create_data:
|
||||
creator_attributes[key] = pre_create_data[key]
|
||||
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "vray_renderer"})
|
||||
# Add chunk size attribute
|
||||
instance_data["chunkSize"] = 10
|
||||
# Submit for job publishing
|
||||
instance_data["farm"] = pre_create_data.get("farm")
|
||||
|
||||
instance = super(CreateVrayROP, self).create(
|
||||
product_name,
|
||||
|
|
@ -55,7 +59,7 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
"SettingsEXR_bits_per_channel": "16" # half precision
|
||||
}
|
||||
|
||||
if pre_create_data.get("export_job"):
|
||||
if pre_create_data.get("render_target") == "farm_split":
|
||||
scene_filepath = \
|
||||
"{export_dir}{product_name}/{product_name}.$F4.vrscene".format(
|
||||
export_dir=hou.text.expandString("$HIP/pyblish/vrscene/"),
|
||||
|
|
@ -143,20 +147,41 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
|
||||
return super(CreateVrayROP, self).remove_instances(instances)
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
"""get instance attribute definitions.
|
||||
|
||||
Attributes defined in this method are exposed in
|
||||
publish tab in the publisher UI.
|
||||
"""
|
||||
|
||||
|
||||
render_target_items = {
|
||||
"local": "Local machine rendering",
|
||||
"local_no_render": "Use existing frames (local)",
|
||||
"farm": "Farm Rendering",
|
||||
"farm_split": "Farm Rendering - Split export & render jobs",
|
||||
}
|
||||
|
||||
return [
|
||||
BoolDef("review",
|
||||
label="Review",
|
||||
tooltip="Mark as reviewable",
|
||||
default=True),
|
||||
EnumDef("render_target",
|
||||
items=render_target_items,
|
||||
label="Render target",
|
||||
default=self.render_target)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateVrayROP, self).get_pre_create_attr_defs()
|
||||
image_format_enum = [
|
||||
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
||||
"rad", "rat", "rta", "sgi", "tga", "tif",
|
||||
]
|
||||
|
||||
return attrs + [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("export_job",
|
||||
label="Split export and render jobs",
|
||||
default=self.export_job),
|
||||
attrs = super(CreateVrayROP, self).get_pre_create_attr_defs()
|
||||
|
||||
attrs += [
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default=self.ext,
|
||||
|
|
@ -172,3 +197,4 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
"if enabled",
|
||||
default=False)
|
||||
]
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator):
|
|||
# write workfile information to context container.
|
||||
op_ctx = hou.node(CONTEXT_CONTAINER)
|
||||
if not op_ctx:
|
||||
op_ctx = self.create_context_node()
|
||||
op_ctx = self.host.create_context_node()
|
||||
|
||||
workfile_data = {"workfile": current_instance.data_to_store()}
|
||||
imprint(op_ctx, workfile_data)
|
||||
|
|
|
|||
|
|
@ -40,12 +40,9 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
default_prefix = evalParmNoFrame(rop, "ar_picture")
|
||||
render_products = []
|
||||
|
||||
# Store whether we are splitting the render job (export + render)
|
||||
split_render = bool(rop.parm("ar_ass_export_enable").eval())
|
||||
instance.data["splitRender"] = split_render
|
||||
export_prefix = None
|
||||
export_products = []
|
||||
if split_render:
|
||||
if instance.data["splitRender"]:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "ar_ass_file", pad_character="0"
|
||||
)
|
||||
|
|
@ -68,7 +65,12 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
"": self.generate_expected_files(instance, beauty_product)
|
||||
}
|
||||
|
||||
# Assume it's a multipartExr Render.
|
||||
multipartExr = True
|
||||
|
||||
num_aovs = rop.evalParm("ar_aovs")
|
||||
# TODO: Check the following logic.
|
||||
# as it always assumes that all AOV are not merged.
|
||||
for index in range(1, num_aovs + 1):
|
||||
# Skip disabled AOVs
|
||||
if not rop.evalParm("ar_enable_aov{}".format(index)):
|
||||
|
|
@ -85,6 +87,14 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
files_by_aov[label] = self.generate_expected_files(instance,
|
||||
aov_product)
|
||||
|
||||
# Set to False as soon as we have a separated aov.
|
||||
multipartExr = False
|
||||
|
||||
# Review Logic expects this key to exist and be True
|
||||
# if render is a multipart Exr.
|
||||
# As long as we have one AOV then multipartExr should be True.
|
||||
instance.data["multipartExr"] = multipartExr
|
||||
|
||||
for product in render_products:
|
||||
self.log.debug("Found render product: {}".format(product))
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,35 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFarmInstances(pyblish.api.InstancePlugin):
|
||||
"""Collect instances for farm render."""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
families = ["mantra_rop",
|
||||
"karma_rop",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"vray_rop"]
|
||||
|
||||
hosts = ["houdini"]
|
||||
targets = ["local", "remote"]
|
||||
label = "Collect farm instances"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
creator_attribute = instance.data["creator_attributes"]
|
||||
|
||||
# Collect Render Target
|
||||
if creator_attribute.get("render_target") not in {
|
||||
"farm_split", "farm"
|
||||
}:
|
||||
instance.data["farm"] = False
|
||||
instance.data["splitRender"] = False
|
||||
self.log.debug("Render on farm is disabled. "
|
||||
"Skipping farm collecting.")
|
||||
return
|
||||
|
||||
instance.data["farm"] = True
|
||||
instance.data["splitRender"] = (
|
||||
creator_attribute.get("render_target") == "farm_split"
|
||||
)
|
||||
|
|
@ -55,6 +55,12 @@ class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
beauty_product)
|
||||
}
|
||||
|
||||
# Review Logic expects this key to exist and be True
|
||||
# if render is a multipart Exr.
|
||||
# As long as we have one AOV then multipartExr should be True.
|
||||
# By default karma render is a multipart Exr.
|
||||
instance.data["multipartExr"] = True
|
||||
|
||||
filenames = list(render_products)
|
||||
instance.data["files"] = filenames
|
||||
instance.data["renderProducts"] = colorspace.ARenderProduct()
|
||||
|
|
|
|||
|
|
@ -0,0 +1,137 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline.create import get_product_name
|
||||
from ayon_core.pipeline.farm.patterning import match_aov_pattern
|
||||
from ayon_core.pipeline.publish import (
|
||||
get_plugin_settings,
|
||||
apply_plugin_settings_automatically
|
||||
)
|
||||
|
||||
|
||||
class CollectLocalRenderInstances(pyblish.api.InstancePlugin):
|
||||
"""Collect instances for local render.
|
||||
|
||||
Agnostic Local Render Collector.
|
||||
"""
|
||||
|
||||
# this plugin runs after Collect Render Products
|
||||
order = pyblish.api.CollectorOrder + 0.12
|
||||
families = ["mantra_rop",
|
||||
"karma_rop",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"vray_rop"]
|
||||
|
||||
hosts = ["houdini"]
|
||||
label = "Collect local render instances"
|
||||
|
||||
use_deadline_aov_filter = False
|
||||
aov_filter = {"host_name": "houdini",
|
||||
"value": [".*([Bb]eauty).*"]}
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings):
|
||||
# Preserve automatic settings applying logic
|
||||
settings = get_plugin_settings(plugin=cls,
|
||||
project_settings=project_settings,
|
||||
log=cls.log,
|
||||
category="houdini")
|
||||
apply_plugin_settings_automatically(cls, settings, logger=cls.log)
|
||||
|
||||
if not cls.use_deadline_aov_filter:
|
||||
# get aov_filter from collector settings
|
||||
# and restructure it as match_aov_pattern requires.
|
||||
cls.aov_filter = {
|
||||
cls.aov_filter["host_name"]: cls.aov_filter["value"]
|
||||
}
|
||||
else:
|
||||
# get aov_filter from deadline settings
|
||||
cls.aov_filter = project_settings["deadline"]["publish"]["ProcessSubmittedJobOnFarm"]["aov_filter"]
|
||||
cls.aov_filter = {
|
||||
item["name"]: item["value"]
|
||||
for item in cls.aov_filter
|
||||
}
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if instance.data["farm"]:
|
||||
self.log.debug("Render on farm is enabled. "
|
||||
"Skipping local render collecting.")
|
||||
return
|
||||
|
||||
# Create Instance for each AOV.
|
||||
context = instance.context
|
||||
expectedFiles = next(iter(instance.data["expectedFiles"]), {})
|
||||
|
||||
product_type = "render" # is always render
|
||||
product_group = get_product_name(
|
||||
context.data["projectName"],
|
||||
context.data["taskEntity"]["name"],
|
||||
context.data["taskEntity"]["taskType"],
|
||||
context.data["hostName"],
|
||||
product_type,
|
||||
instance.data["productName"]
|
||||
)
|
||||
|
||||
for aov_name, aov_filepaths in expectedFiles.items():
|
||||
product_name = product_group
|
||||
|
||||
if aov_name:
|
||||
product_name = "{}_{}".format(product_name, aov_name)
|
||||
|
||||
# Create instance for each AOV
|
||||
aov_instance = context.create_instance(product_name)
|
||||
|
||||
# Prepare Representation for each AOV
|
||||
aov_filenames = [os.path.basename(path) for path in aov_filepaths]
|
||||
staging_dir = os.path.dirname(aov_filepaths[0])
|
||||
ext = aov_filepaths[0].split(".")[-1]
|
||||
|
||||
# Decide if instance is reviewable
|
||||
preview = False
|
||||
if instance.data.get("multipartExr", False):
|
||||
# Add preview tag because its multipartExr.
|
||||
preview = True
|
||||
else:
|
||||
# Add Preview tag if the AOV matches the filter.
|
||||
preview = match_aov_pattern(
|
||||
"houdini", self.aov_filter, aov_filenames[0]
|
||||
)
|
||||
|
||||
preview = preview and instance.data.get("review", False)
|
||||
|
||||
# Support Single frame.
|
||||
# The integrator wants single files to be a single
|
||||
# filename instead of a list.
|
||||
# More info: https://github.com/ynput/ayon-core/issues/238
|
||||
if len(aov_filenames) == 1:
|
||||
aov_filenames = aov_filenames[0]
|
||||
|
||||
aov_instance.data.update({
|
||||
# 'label': label,
|
||||
"task": instance.data["task"],
|
||||
"folderPath": instance.data["folderPath"],
|
||||
"frameStart": instance.data["frameStartHandle"],
|
||||
"frameEnd": instance.data["frameEndHandle"],
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"productName": product_name,
|
||||
"productGroup": product_group,
|
||||
"families": ["render.local.hou", "review"],
|
||||
"instance_node": instance.data["instance_node"],
|
||||
"representations": [
|
||||
{
|
||||
"stagingDir": staging_dir,
|
||||
"ext": ext,
|
||||
"name": ext,
|
||||
"tags": ["review"] if preview else [],
|
||||
"files": aov_filenames,
|
||||
"frameStart": instance.data["frameStartHandle"],
|
||||
"frameEnd": instance.data["frameEndHandle"]
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
# Remove original render instance
|
||||
# I can't remove it here as I still need it to trigger the render.
|
||||
# context.remove(instance)
|
||||
|
|
@ -44,12 +44,9 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
default_prefix = evalParmNoFrame(rop, "vm_picture")
|
||||
render_products = []
|
||||
|
||||
# Store whether we are splitting the render job (export + render)
|
||||
split_render = bool(rop.parm("soho_outputmode").eval())
|
||||
instance.data["splitRender"] = split_render
|
||||
export_prefix = None
|
||||
export_products = []
|
||||
if split_render:
|
||||
if instance.data["splitRender"]:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "soho_diskfile", pad_character="0"
|
||||
)
|
||||
|
|
@ -74,6 +71,11 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
beauty_product)
|
||||
}
|
||||
|
||||
# Assume it's a multipartExr Render.
|
||||
multipartExr = True
|
||||
|
||||
# TODO: This logic doesn't take into considerations
|
||||
# cryptomatte defined in 'Images > Cryptomatte'
|
||||
aov_numbers = rop.evalParm("vm_numaux")
|
||||
if aov_numbers > 0:
|
||||
# get the filenames of the AOVs
|
||||
|
|
@ -93,6 +95,14 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
|
||||
files_by_aov[var] = self.generate_expected_files(instance, aov_product) # noqa
|
||||
|
||||
# Set to False as soon as we have a separated aov.
|
||||
multipartExr = False
|
||||
|
||||
# Review Logic expects this key to exist and be True
|
||||
# if render is a multipart Exr.
|
||||
# As long as we have one AOV then multipartExr should be True.
|
||||
instance.data["multipartExr"] = multipartExr
|
||||
|
||||
for product in render_products:
|
||||
self.log.debug("Found render product: %s" % product)
|
||||
|
||||
|
|
|
|||
|
|
@ -42,11 +42,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
|
||||
default_prefix = evalParmNoFrame(rop, "RS_outputFileNamePrefix")
|
||||
beauty_suffix = rop.evalParm("RS_outputBeautyAOVSuffix")
|
||||
# Store whether we are splitting the render job (export + render)
|
||||
split_render = bool(rop.parm("RS_archive_enable").eval())
|
||||
instance.data["splitRender"] = split_render
|
||||
|
||||
export_products = []
|
||||
if split_render:
|
||||
if instance.data["splitRender"]:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "RS_archive_file", pad_character="0"
|
||||
)
|
||||
|
|
@ -63,9 +61,12 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
full_exr_mode = (rop.evalParm("RS_outputMultilayerMode") == "2")
|
||||
if full_exr_mode:
|
||||
# Ignore beauty suffix if full mode is enabled
|
||||
# As this is what the rop does.
|
||||
# As this is what the rop does.
|
||||
beauty_suffix = ""
|
||||
|
||||
# Assume it's a multipartExr Render.
|
||||
multipartExr = True
|
||||
|
||||
# Default beauty/main layer AOV
|
||||
beauty_product = self.get_render_product_name(
|
||||
prefix=default_prefix, suffix=beauty_suffix
|
||||
|
|
@ -75,7 +76,7 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
beauty_suffix: self.generate_expected_files(instance,
|
||||
beauty_product)
|
||||
}
|
||||
|
||||
|
||||
aovs_rop = rop.parm("RS_aovGetFromNode").evalAsNode()
|
||||
if aovs_rop:
|
||||
rop = aovs_rop
|
||||
|
|
@ -98,13 +99,21 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
|
||||
if rop.parm(f"RS_aovID_{i}").evalAsString() == "CRYPTOMATTE" or \
|
||||
not full_exr_mode:
|
||||
|
||||
|
||||
aov_product = self.get_render_product_name(aov_prefix, aov_suffix)
|
||||
render_products.append(aov_product)
|
||||
|
||||
files_by_aov[aov_suffix] = self.generate_expected_files(instance,
|
||||
aov_product) # noqa
|
||||
|
||||
# Set to False as soon as we have a separated aov.
|
||||
multipartExr = False
|
||||
|
||||
# Review Logic expects this key to exist and be True
|
||||
# if render is a multipart Exr.
|
||||
# As long as we have one AOV then multipartExr should be True.
|
||||
instance.data["multipartExr"] = multipartExr
|
||||
|
||||
for product in render_products:
|
||||
self.log.debug("Found render product: %s" % product)
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
|
|||
label = "Collect Review Data"
|
||||
# This specific order value is used so that
|
||||
# this plugin runs after CollectRopFrameRange
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
# Also after CollectLocalRenderInstances
|
||||
order = pyblish.api.CollectorOrder + 0.13
|
||||
hosts = ["houdini"]
|
||||
families = ["review"]
|
||||
|
||||
|
|
@ -28,7 +29,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
|
|||
ropnode_path = instance.data["instance_node"]
|
||||
ropnode = hou.node(ropnode_path)
|
||||
|
||||
camera_path = ropnode.parm("camera").eval()
|
||||
# Get camera based on the instance_node type.
|
||||
camera_path = self._get_camera_path(ropnode)
|
||||
camera_node = hou.node(camera_path)
|
||||
if not camera_node:
|
||||
self.log.warning("No valid camera node found on review node: "
|
||||
|
|
@ -55,3 +57,29 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
|
|||
# Store focal length in `burninDataMembers`
|
||||
burnin_members = instance.data.setdefault("burninDataMembers", {})
|
||||
burnin_members["focalLength"] = focal_length
|
||||
|
||||
def _get_camera_path(self, ropnode):
|
||||
"""Get the camera path associated with the given rop node.
|
||||
|
||||
This function evaluates the camera parameter according to the
|
||||
type of the given rop node.
|
||||
|
||||
Returns:
|
||||
Union[str, None]: Camera path or None.
|
||||
|
||||
This function can return empty string if the camera
|
||||
path is empty i.e. no camera path.
|
||||
"""
|
||||
|
||||
if ropnode.type().name() in {
|
||||
"opengl", "karma", "ifd", "arnold"
|
||||
}:
|
||||
return ropnode.parm("camera").eval()
|
||||
|
||||
elif ropnode.type().name() == "Redshift_ROP":
|
||||
return ropnode.parm("RS_renderCamera").eval()
|
||||
|
||||
elif ropnode.type().name() == "vray_renderer":
|
||||
return ropnode.parm("render_camera").eval()
|
||||
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -0,0 +1,22 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectReviewableInstances(pyblish.api.InstancePlugin):
|
||||
"""Collect Reviewable Instances.
|
||||
|
||||
Basically, all instances of the specified families
|
||||
with creator_attribure["review"]
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Reviewable Instances"
|
||||
families = ["mantra_rop",
|
||||
"karma_rop",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"vray_rop"]
|
||||
|
||||
def process(self, instance):
|
||||
creator_attribute = instance.data["creator_attributes"]
|
||||
|
||||
instance.data["review"] = creator_attribute.get("review", False)
|
||||
|
|
@ -45,12 +45,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
render_products = []
|
||||
# TODO: add render elements if render element
|
||||
|
||||
# Store whether we are splitting the render job in an export + render
|
||||
split_render = rop.parm("render_export_mode").eval() == "2"
|
||||
instance.data["splitRender"] = split_render
|
||||
export_prefix = None
|
||||
export_products = []
|
||||
if split_render:
|
||||
if instance.data["splitRender"]:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "render_export_filepath", pad_character="0"
|
||||
)
|
||||
|
|
@ -70,6 +67,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
"": self.generate_expected_files(instance,
|
||||
beauty_product)}
|
||||
|
||||
# Assume it's a multipartExr Render.
|
||||
multipartExr = True
|
||||
|
||||
if instance.data.get("RenderElement", True):
|
||||
render_element = self.get_render_element_name(rop, default_prefix)
|
||||
if render_element:
|
||||
|
|
@ -77,7 +77,13 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
render_products.append(renderpass)
|
||||
files_by_aov[aov] = self.generate_expected_files(
|
||||
instance, renderpass)
|
||||
# Set to False as soon as we have a separated aov.
|
||||
multipartExr = False
|
||||
|
||||
# Review Logic expects this key to exist and be True
|
||||
# if render is a multipart Exr.
|
||||
# As long as we have one AOV then multipartExr should be True.
|
||||
instance.data["multipartExr"] = multipartExr
|
||||
|
||||
for product in render_products:
|
||||
self.log.debug("Found render product: %s" % product)
|
||||
|
|
|
|||
|
|
@ -19,6 +19,16 @@ class ExtractOpenGL(publish.Extractor,
|
|||
def process(self, instance):
|
||||
ropnode = hou.node(instance.data.get("instance_node"))
|
||||
|
||||
# This plugin is triggered when marking render as reviewable.
|
||||
# Therefore, this plugin will run on over wrong instances.
|
||||
# TODO: Don't run this plugin on wrong instances.
|
||||
# This plugin should run only on review product type
|
||||
# with instance node of opengl type.
|
||||
if ropnode.type().name() != "opengl":
|
||||
self.log.debug("Skipping OpenGl extraction. Rop node {} "
|
||||
"is not an OpenGl node.".format(ropnode.path()))
|
||||
return
|
||||
|
||||
output = ropnode.evalParm("picture")
|
||||
staging_dir = os.path.normpath(os.path.dirname(output))
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
|
|
|||
|
|
@ -0,0 +1,74 @@
|
|||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_core.hosts.houdini.api.lib import render_rop
|
||||
import hou
|
||||
import os
|
||||
|
||||
|
||||
class ExtractRender(publish.Extractor):
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Render"
|
||||
hosts = ["houdini"]
|
||||
families = ["mantra_rop",
|
||||
"karma_rop",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"vray_rop"]
|
||||
|
||||
def process(self, instance):
|
||||
creator_attribute = instance.data["creator_attributes"]
|
||||
product_type = instance.data["productType"]
|
||||
rop_node = hou.node(instance.data.get("instance_node"))
|
||||
|
||||
# Align split parameter value on rop node to the render target.
|
||||
if instance.data["splitRender"]:
|
||||
if product_type == "arnold_rop":
|
||||
rop_node.setParms({"ar_ass_export_enable": 1})
|
||||
elif product_type == "mantra_rop":
|
||||
rop_node.setParms({"soho_outputmode": 1})
|
||||
elif product_type == "redshift_rop":
|
||||
rop_node.setParms({"RS_archive_enable": 1})
|
||||
elif product_type == "vray_rop":
|
||||
rop_node.setParms({"render_export_mode": "2"})
|
||||
else:
|
||||
if product_type == "arnold_rop":
|
||||
rop_node.setParms({"ar_ass_export_enable": 0})
|
||||
elif product_type == "mantra_rop":
|
||||
rop_node.setParms({"soho_outputmode": 0})
|
||||
elif product_type == "redshift_rop":
|
||||
rop_node.setParms({"RS_archive_enable": 0})
|
||||
elif product_type == "vray_rop":
|
||||
rop_node.setParms({"render_export_mode": "1"})
|
||||
|
||||
if instance.data.get("farm"):
|
||||
self.log.debug("Render should be processed on farm, skipping local render.")
|
||||
return
|
||||
|
||||
if creator_attribute.get("render_target") == "local":
|
||||
ropnode = hou.node(instance.data.get("instance_node"))
|
||||
render_rop(ropnode)
|
||||
|
||||
# `ExpectedFiles` is a list that includes one dict.
|
||||
expected_files = instance.data["expectedFiles"][0]
|
||||
# Each key in that dict is a list of files.
|
||||
# Combine lists of files into one big list.
|
||||
all_frames = []
|
||||
for value in expected_files.values():
|
||||
if isinstance(value, str):
|
||||
all_frames.append(value)
|
||||
elif isinstance(value, list):
|
||||
all_frames.extend(value)
|
||||
# Check missing frames.
|
||||
# Frames won't exist if user cancels the render.
|
||||
missing_frames = [
|
||||
frame
|
||||
for frame in all_frames
|
||||
if not os.path.exists(frame)
|
||||
]
|
||||
if missing_frames:
|
||||
# TODO: Use user friendly error reporting.
|
||||
raise RuntimeError("Failed to complete render extraction. "
|
||||
"Missing output files: {}".format(
|
||||
missing_frames))
|
||||
|
|
@ -17,11 +17,13 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.IntegratorOrder + 9.0
|
||||
hosts = ["houdini"]
|
||||
families = ["workfile",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"usdrender",
|
||||
"mantra_rop",
|
||||
"karma_rop",
|
||||
"usdrender",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"vray_rop",
|
||||
"render.local.hou",
|
||||
"publish.hou"]
|
||||
optional = True
|
||||
|
||||
|
|
|
|||
|
|
@ -56,6 +56,18 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
|
|||
|
||||
def process(self, instance):
|
||||
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
|
||||
# This plugin is triggered when marking render as reviewable.
|
||||
# Therefore, this plugin will run on over wrong instances.
|
||||
# TODO: Don't run this plugin on wrong instances.
|
||||
# This plugin should run only on review product type
|
||||
# with instance node of opengl type.
|
||||
if rop_node.type().name() != "opengl":
|
||||
self.log.debug("Skipping Validation. Rop node {} "
|
||||
"is not an OpenGl node.".format(rop_node.path()))
|
||||
return
|
||||
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
|
|
@ -66,7 +78,6 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
|
|||
)
|
||||
return
|
||||
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
if rop_node.evalParm("colorcorrect") != 2:
|
||||
# any colorspace settings other than default requires
|
||||
# 'Color Correct' parm to be set to 'OpenColorIO'
|
||||
|
|
|
|||
|
|
@ -20,6 +20,16 @@ class ValidateSceneReview(pyblish.api.InstancePlugin):
|
|||
report = []
|
||||
instance_node = hou.node(instance.data.get("instance_node"))
|
||||
|
||||
# This plugin is triggered when marking render as reviewable.
|
||||
# Therefore, this plugin will run on over wrong instances.
|
||||
# TODO: Don't run this plugin on wrong instances.
|
||||
# This plugin should run only on review product type
|
||||
# with instance node of opengl type.
|
||||
if instance_node.type().name() != "opengl":
|
||||
self.log.debug("Skipping Validation. Rop node {} "
|
||||
"is not an OpenGl node.".format(instance_node.path()))
|
||||
return
|
||||
|
||||
invalid = self.get_invalid_scene_path(instance_node)
|
||||
if invalid:
|
||||
report.append(invalid)
|
||||
|
|
|
|||
29
client/ayon_core/hosts/houdini/startup/OPmenu.xml
Normal file
29
client/ayon_core/hosts/houdini/startup/OPmenu.xml
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!-- OPMenu Stencil.
|
||||
It's used to extend the OPMenu.
|
||||
-->
|
||||
|
||||
<menuDocument>
|
||||
<menu>
|
||||
<!-- Operator type and asset options. -->
|
||||
<subMenu id="opmenu.vhda_options_create">
|
||||
<insertBefore>opmenu.unsynchronize</insertBefore>
|
||||
<scriptItem id="opmenu.vhda_create_ayon">
|
||||
<insertAfter>opmenu.vhda_create</insertAfter>
|
||||
<label>Create New (AYON)...</label>
|
||||
<context>
|
||||
</context>
|
||||
<scriptCode>
|
||||
<![CDATA[
|
||||
from ayon_core.hosts.houdini.api.creator_node_shelves import create_interactive
|
||||
|
||||
node = kwargs["node"]
|
||||
if node not in hou.selectedNodes():
|
||||
node.setSelected(True)
|
||||
create_interactive("io.openpype.creators.houdini.hda", **kwargs)
|
||||
]]>
|
||||
</scriptCode>
|
||||
</scriptItem>
|
||||
</subMenu>
|
||||
</menu>
|
||||
</menuDocument>
|
||||
|
|
@ -6,12 +6,9 @@ import json
|
|||
from typing import Any, Dict, Union
|
||||
|
||||
import six
|
||||
import ayon_api
|
||||
|
||||
from ayon_core.pipeline import (
|
||||
get_current_project_name,
|
||||
get_current_folder_path,
|
||||
get_current_task_name,
|
||||
colorspace
|
||||
)
|
||||
from ayon_core.settings import get_project_settings
|
||||
|
|
@ -372,12 +369,8 @@ def reset_colorspace():
|
|||
"""
|
||||
if int(get_max_version()) < 2024:
|
||||
return
|
||||
project_name = get_current_project_name()
|
||||
colorspace_mgr = rt.ColorPipelineMgr
|
||||
project_settings = get_project_settings(project_name)
|
||||
|
||||
max_config_data = colorspace.get_imageio_config(
|
||||
project_name, "max", project_settings)
|
||||
max_config_data = colorspace.get_current_context_imageio_config_preset()
|
||||
if max_config_data:
|
||||
ocio_config_path = max_config_data["path"]
|
||||
colorspace_mgr = rt.ColorPipelineMgr
|
||||
|
|
@ -392,10 +385,7 @@ def check_colorspace():
|
|||
"because Max main window can't be found.")
|
||||
if int(get_max_version()) >= 2024:
|
||||
color_mgr = rt.ColorPipelineMgr
|
||||
project_name = get_current_project_name()
|
||||
project_settings = get_project_settings(project_name)
|
||||
max_config_data = colorspace.get_imageio_config(
|
||||
project_name, "max", project_settings)
|
||||
max_config_data = colorspace.get_current_context_imageio_config_preset()
|
||||
if max_config_data and color_mgr.Mode != rt.Name("OCIO_Custom"):
|
||||
if not is_headless():
|
||||
from ayon_core.tools.utils import SimplePopup
|
||||
|
|
|
|||
|
|
@ -52,11 +52,7 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
|||
|
||||
self._has_been_setup = True
|
||||
|
||||
def context_setting():
|
||||
return lib.set_context_setting()
|
||||
|
||||
rt.callbacks.addScript(rt.Name('systemPostNew'),
|
||||
context_setting)
|
||||
rt.callbacks.addScript(rt.Name('systemPostNew'), on_new)
|
||||
|
||||
rt.callbacks.addScript(rt.Name('filePostOpen'),
|
||||
lib.check_colorspace)
|
||||
|
|
@ -163,6 +159,14 @@ def ls() -> list:
|
|||
yield lib.read(container)
|
||||
|
||||
|
||||
def on_new():
|
||||
lib.set_context_setting()
|
||||
if rt.checkForSave():
|
||||
rt.resetMaxFile(rt.Name("noPrompt"))
|
||||
rt.clearUndoBuffer()
|
||||
rt.redrawViews()
|
||||
|
||||
|
||||
def containerise(name: str, nodes: list, context,
|
||||
namespace=None, loader=None, suffix="_CON"):
|
||||
data = {
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ ALEMBIC_ARGS = {
|
|||
"melPostJobCallback": str,
|
||||
"noNormals": bool,
|
||||
"preRoll": bool,
|
||||
"preRollStartFrame": int,
|
||||
"pythonPerFrameCallback": str,
|
||||
"pythonPostJobCallback": str,
|
||||
"renderableOnly": bool,
|
||||
|
|
@ -54,15 +53,22 @@ def extract_alembic(
|
|||
endFrame=None,
|
||||
eulerFilter=True,
|
||||
frameRange="",
|
||||
melPerFrameCallback=None,
|
||||
melPostJobCallback=None,
|
||||
noNormals=False,
|
||||
preRoll=False,
|
||||
preRollStartFrame=0,
|
||||
pythonPerFrameCallback=None,
|
||||
pythonPostJobCallback=None,
|
||||
renderableOnly=False,
|
||||
root=None,
|
||||
selection=True,
|
||||
startFrame=None,
|
||||
step=1.0,
|
||||
stripNamespaces=True,
|
||||
userAttr=None,
|
||||
userAttrPrefix=None,
|
||||
uvsOnly=False,
|
||||
uvWrite=True,
|
||||
verbose=False,
|
||||
wholeFrameGeo=False,
|
||||
|
|
@ -102,6 +108,11 @@ def extract_alembic(
|
|||
string formatted as: "startFrame endFrame". This argument
|
||||
overrides `startFrame` and `endFrame` arguments.
|
||||
|
||||
melPerFrameCallback (Optional[str]): MEL callback run per frame.
|
||||
|
||||
melPostJobCallback (Optional[str]): MEL callback after last frame is
|
||||
written.
|
||||
|
||||
noNormals (bool): When on, normal data from the original polygon
|
||||
objects is not included in the exported Alembic cache file.
|
||||
|
||||
|
|
@ -113,6 +124,11 @@ def extract_alembic(
|
|||
dependent translations and can be used to evaluate run-up that
|
||||
isn't actually translated. Defaults to 0.
|
||||
|
||||
pythonPerFrameCallback (Optional[str]): Python callback run per frame.
|
||||
|
||||
pythonPostJobCallback (Optional[str]): Python callback after last frame
|
||||
is written.
|
||||
|
||||
renderableOnly (bool): When on, any non-renderable nodes or hierarchy,
|
||||
such as hidden objects, are not included in the Alembic file.
|
||||
Defaults to False.
|
||||
|
|
@ -137,6 +153,15 @@ def extract_alembic(
|
|||
object with the namespace taco:foo:bar appears as bar in the
|
||||
Alembic file.
|
||||
|
||||
userAttr (list of str, optional): A specific user defined attribute to
|
||||
write out. Defaults to [].
|
||||
|
||||
userAttrPrefix (list of str, optional): Prefix filter for determining
|
||||
which user defined attributes to write out. Defaults to [].
|
||||
|
||||
uvsOnly (bool): When on, only uv data for PolyMesh and SubD shapes
|
||||
will be written to the Alembic file.
|
||||
|
||||
uvWrite (bool): When on, UV data from polygon meshes and subdivision
|
||||
objects are written to the Alembic file. Only the current UV map is
|
||||
included.
|
||||
|
|
@ -183,6 +208,8 @@ def extract_alembic(
|
|||
# Ensure list arguments are valid.
|
||||
attr = attr or []
|
||||
attrPrefix = attrPrefix or []
|
||||
userAttr = userAttr or []
|
||||
userAttrPrefix = userAttrPrefix or []
|
||||
root = root or []
|
||||
|
||||
# Pass the start and end frame on as `frameRange` so that it
|
||||
|
|
@ -213,8 +240,10 @@ def extract_alembic(
|
|||
"eulerFilter": eulerFilter,
|
||||
"noNormals": noNormals,
|
||||
"preRoll": preRoll,
|
||||
"root": root,
|
||||
"renderableOnly": renderableOnly,
|
||||
"uvWrite": uvWrite,
|
||||
"uvsOnly": uvsOnly,
|
||||
"writeColorSets": writeColorSets,
|
||||
"writeFaceSets": writeFaceSets,
|
||||
"wholeFrameGeo": wholeFrameGeo,
|
||||
|
|
@ -226,9 +255,10 @@ def extract_alembic(
|
|||
"step": step,
|
||||
"attr": attr,
|
||||
"attrPrefix": attrPrefix,
|
||||
"userAttr": userAttr,
|
||||
"userAttrPrefix": userAttrPrefix,
|
||||
"stripNamespaces": stripNamespaces,
|
||||
"verbose": verbose,
|
||||
"preRollStartFrame": preRollStartFrame
|
||||
"verbose": verbose
|
||||
}
|
||||
|
||||
# Validate options
|
||||
|
|
@ -264,6 +294,17 @@ def extract_alembic(
|
|||
if maya_version >= 2018:
|
||||
options['autoSubd'] = options.pop('writeCreases', False)
|
||||
|
||||
# Only add callbacks if they are set so that we're not passing `None`
|
||||
callbacks = {
|
||||
"melPerFrameCallback": melPerFrameCallback,
|
||||
"melPostJobCallback": melPostJobCallback,
|
||||
"pythonPerFrameCallback": pythonPerFrameCallback,
|
||||
"pythonPostJobCallback": pythonPostJobCallback,
|
||||
}
|
||||
for key, callback in callbacks.items():
|
||||
if callback:
|
||||
options[key] = str(callback)
|
||||
|
||||
# Format the job string from options
|
||||
job_args = list()
|
||||
for key, value in options.items():
|
||||
|
|
@ -297,7 +338,11 @@ def extract_alembic(
|
|||
# exports are made. (PLN-31)
|
||||
# TODO: Make sure this actually fixes the issues
|
||||
with evaluation("off"):
|
||||
cmds.AbcExport(j=job_str, verbose=verbose)
|
||||
cmds.AbcExport(
|
||||
j=job_str,
|
||||
verbose=verbose,
|
||||
preRollStartFrame=preRollStartFrame
|
||||
)
|
||||
|
||||
if verbose:
|
||||
log.debug("Extracted Alembic to: %s", file)
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ class FBXExtractor:
|
|||
"smoothMesh": bool,
|
||||
"instances": bool,
|
||||
# "referencedContainersContent": bool, # deprecated in Maya 2016+
|
||||
"bakeComplexAnimation": int,
|
||||
"bakeComplexAnimation": bool,
|
||||
"bakeComplexStart": int,
|
||||
"bakeComplexEnd": int,
|
||||
"bakeComplexStep": int,
|
||||
|
|
@ -59,6 +59,7 @@ class FBXExtractor:
|
|||
"constraints": bool,
|
||||
"lights": bool,
|
||||
"embeddedTextures": bool,
|
||||
"includeChildren": bool,
|
||||
"inputConnections": bool,
|
||||
"upAxis": str, # x, y or z,
|
||||
"triangulate": bool,
|
||||
|
|
@ -102,6 +103,7 @@ class FBXExtractor:
|
|||
"constraints": False,
|
||||
"lights": True,
|
||||
"embeddedTextures": False,
|
||||
"includeChildren": True,
|
||||
"inputConnections": True,
|
||||
"upAxis": "y",
|
||||
"triangulate": False,
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ from ayon_core.lib import (
|
|||
BoolDef,
|
||||
NumberDef,
|
||||
)
|
||||
from ayon_core.pipeline import CreatedInstance
|
||||
|
||||
|
||||
def _get_animation_attr_defs(cls):
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from ayon_core.pipeline import (
|
|||
from ayon_core.pipeline.load.utils import get_representation_path_from_context
|
||||
from ayon_core.pipeline.colorspace import (
|
||||
get_imageio_file_rules_colorspace_from_filepath,
|
||||
get_imageio_config,
|
||||
get_current_context_imageio_config_preset,
|
||||
get_imageio_file_rules
|
||||
)
|
||||
from ayon_core.settings import get_project_settings
|
||||
|
|
@ -270,8 +270,7 @@ class FileNodeLoader(load.LoaderPlugin):
|
|||
host_name = get_current_host_name()
|
||||
project_settings = get_project_settings(project_name)
|
||||
|
||||
config_data = get_imageio_config(
|
||||
project_name, host_name,
|
||||
config_data = get_current_context_imageio_config_preset(
|
||||
project_settings=project_settings
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -35,7 +35,8 @@ class ExtractFBXAnimation(publish.Extractor):
|
|||
fbx_exporter = fbx.FBXExtractor(log=self.log)
|
||||
out_members = instance.data.get("animated_skeleton", [])
|
||||
# Export
|
||||
instance.data["constraints"] = True
|
||||
# TODO: need to set up the options for users to set up
|
||||
# the flags they intended to export
|
||||
instance.data["skeletonDefinitions"] = True
|
||||
instance.data["referencedAssetsContent"] = True
|
||||
fbx_exporter.set_options_from_instance(instance)
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ from maya import cmds
|
|||
from ayon_core.pipeline import publish
|
||||
from ayon_core.hosts.maya.api.alembic import extract_alembic
|
||||
from ayon_core.hosts.maya.api.lib import (
|
||||
get_all_children,
|
||||
suspended_refresh,
|
||||
maintained_selection,
|
||||
iter_visible_nodes_in_range
|
||||
|
|
@ -40,7 +41,6 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
|
|||
# From settings
|
||||
attr = []
|
||||
attrPrefix = []
|
||||
autoSubd = False
|
||||
bake_attributes = []
|
||||
bake_attribute_prefixes = []
|
||||
dataFormat = "ogawa"
|
||||
|
|
@ -63,6 +63,7 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
|
|||
wholeFrameGeo = False
|
||||
worldSpace = True
|
||||
writeColorSets = False
|
||||
writeCreases = False
|
||||
writeFaceSets = False
|
||||
writeNormals = True
|
||||
writeUVSets = False
|
||||
|
|
@ -173,15 +174,9 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
|
|||
"writeVisibility": attribute_values.get(
|
||||
"writeVisibility", self.writeVisibility
|
||||
),
|
||||
"autoSubd": attribute_values.get(
|
||||
"autoSubd", self.autoSubd
|
||||
),
|
||||
"uvsOnly": attribute_values.get(
|
||||
"uvsOnly", self.uvsOnly
|
||||
),
|
||||
"writeNormals": attribute_values.get(
|
||||
"writeNormals", self.writeNormals
|
||||
),
|
||||
"melPerFrameCallback": attribute_values.get(
|
||||
"melPerFrameCallback", self.melPerFrameCallback
|
||||
),
|
||||
|
|
@ -193,7 +188,12 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
|
|||
),
|
||||
"pythonPostJobCallback": attribute_values.get(
|
||||
"pythonPostJobCallback", self.pythonPostJobCallback
|
||||
)
|
||||
),
|
||||
# Note that this converts `writeNormals` to `noNormals` for the
|
||||
# `AbcExport` equivalent in `extract_alembic`
|
||||
"noNormals": not attribute_values.get(
|
||||
"writeNormals", self.writeNormals
|
||||
),
|
||||
}
|
||||
|
||||
if instance.data.get("visibleOnly", False):
|
||||
|
|
@ -249,7 +249,6 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
|
|||
with maintained_selection():
|
||||
cmds.select(instance.data["proxy"])
|
||||
extract_alembic(**kwargs)
|
||||
|
||||
representation = {
|
||||
"name": "proxy",
|
||||
"ext": "abc",
|
||||
|
|
@ -268,20 +267,6 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
|
|||
return []
|
||||
|
||||
override_defs = OrderedDict({
|
||||
"autoSubd": BoolDef(
|
||||
"autoSubd",
|
||||
label="Auto Subd",
|
||||
default=cls.autoSubd,
|
||||
tooltip=(
|
||||
"If this flag is present and the mesh has crease edges, "
|
||||
"crease vertices or holes, the mesh (OPolyMesh) would now "
|
||||
"be written out as an OSubD and crease info will be stored"
|
||||
" in the Alembic file. Otherwise, creases info won't be "
|
||||
"preserved in Alembic file unless a custom Boolean "
|
||||
"attribute SubDivisionMesh has been added to mesh node and"
|
||||
" its value is true."
|
||||
)
|
||||
),
|
||||
"eulerFilter": BoolDef(
|
||||
"eulerFilter",
|
||||
label="Euler Filter",
|
||||
|
|
@ -354,6 +339,13 @@ class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
|
|||
default=cls.writeColorSets,
|
||||
tooltip="Write vertex colors with the geometry."
|
||||
),
|
||||
"writeCreases": BoolDef(
|
||||
"writeCreases",
|
||||
label="Write Creases",
|
||||
default=cls.writeCreases,
|
||||
tooltip="Write the geometry's edge and vertex crease "
|
||||
"information."
|
||||
),
|
||||
"writeFaceSets": BoolDef(
|
||||
"writeFaceSets",
|
||||
label="Write Face Sets",
|
||||
|
|
@ -527,9 +519,7 @@ class ExtractAnimation(ExtractAlembic):
|
|||
roots = cmds.sets(out_set, query=True) or []
|
||||
|
||||
# Include all descendants
|
||||
nodes = roots
|
||||
nodes += cmds.listRelatives(
|
||||
roots, allDescendents=True, fullPath=True
|
||||
) or []
|
||||
nodes = roots.copy()
|
||||
nodes.extend(get_all_children(roots, ignore_intermediate_objects=True))
|
||||
|
||||
return nodes, roots
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import inspect
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import OptionalPyblishPluginMixin
|
||||
|
|
@ -29,29 +30,28 @@ class ValidateAlembicDefaultsPointcache(
|
|||
|
||||
@classmethod
|
||||
def _get_publish_attributes(cls, instance):
|
||||
attributes = instance.data["publish_attributes"][
|
||||
cls.plugin_name(
|
||||
instance.data["publish_attributes"]
|
||||
)
|
||||
]
|
||||
|
||||
return attributes
|
||||
return instance.data["publish_attributes"][cls.plugin_name]
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
settings = self._get_settings(instance.context)
|
||||
|
||||
attributes = self._get_publish_attributes(instance)
|
||||
|
||||
msg = (
|
||||
"Alembic Extract setting \"{}\" is not the default value:"
|
||||
"\nCurrent: {}"
|
||||
"\nDefault Value: {}\n"
|
||||
)
|
||||
errors = []
|
||||
invalid = {}
|
||||
for key, value in attributes.items():
|
||||
if key not in settings:
|
||||
# This may occur if attributes have changed over time and an
|
||||
# existing instance has older legacy attributes that do not
|
||||
# match the current settings definition.
|
||||
self.log.warning(
|
||||
"Publish attribute %s not found in Alembic Export "
|
||||
"default settings. Ignoring validation for attribute.",
|
||||
key
|
||||
)
|
||||
continue
|
||||
|
||||
default_value = settings[key]
|
||||
|
||||
# Lists are best to compared sorted since we cant rely on the order
|
||||
|
|
@ -61,10 +61,35 @@ class ValidateAlembicDefaultsPointcache(
|
|||
default_value = sorted(default_value)
|
||||
|
||||
if value != default_value:
|
||||
errors.append(msg.format(key, value, default_value))
|
||||
invalid[key] = value, default_value
|
||||
|
||||
if errors:
|
||||
raise PublishValidationError("\n".join(errors))
|
||||
if invalid:
|
||||
non_defaults = "\n".join(
|
||||
f"- {key}: {value} \t(default: {default_value})"
|
||||
for key, (value, default_value) in invalid.items()
|
||||
)
|
||||
|
||||
raise PublishValidationError(
|
||||
"Alembic extract options differ from default values:\n"
|
||||
f"{non_defaults}",
|
||||
description=self.get_description()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_description():
|
||||
return inspect.cleandoc(
|
||||
"""### Alembic Extract settings differ from defaults
|
||||
|
||||
The alembic export options differ from the project default values.
|
||||
|
||||
If this is intentional you can disable this validation by
|
||||
disabling **Validate Alembic Options Default**.
|
||||
|
||||
If not you may use the "Repair" action to revert all the options to
|
||||
their default values.
|
||||
|
||||
"""
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
|
|
@ -75,13 +100,20 @@ class ValidateAlembicDefaultsPointcache(
|
|||
)
|
||||
|
||||
# Set the settings values on the create context then save to workfile.
|
||||
publish_attributes = instance.data["publish_attributes"]
|
||||
plugin_name = cls.plugin_name(publish_attributes)
|
||||
attributes = cls._get_publish_attributes(instance)
|
||||
settings = cls._get_settings(instance.context)
|
||||
create_publish_attributes = create_instance.data["publish_attributes"]
|
||||
attributes = cls._get_publish_attributes(create_instance)
|
||||
for key in attributes:
|
||||
create_publish_attributes[plugin_name][key] = settings[key]
|
||||
if key not in settings:
|
||||
# This may occur if attributes have changed over time and an
|
||||
# existing instance has older legacy attributes that do not
|
||||
# match the current settings definition.
|
||||
cls.log.warning(
|
||||
"Publish attribute %s not found in Alembic Export "
|
||||
"default settings. Ignoring repair for attribute.",
|
||||
key
|
||||
)
|
||||
continue
|
||||
attributes[key] = settings[key]
|
||||
|
||||
create_context.save_changes()
|
||||
|
||||
|
|
@ -93,6 +125,6 @@ class ValidateAlembicDefaultsAnimation(
|
|||
|
||||
The defaults are defined in the project settings.
|
||||
"""
|
||||
label = "Validate Alembic Options Defaults"
|
||||
label = "Validate Alembic Options Defaults"
|
||||
families = ["animation"]
|
||||
plugin_name = "ExtractAnimation"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,201 @@
|
|||
from maya import cmds
|
||||
|
||||
from ayon_core.hosts.maya.api.workfile_template_builder import (
|
||||
MayaPlaceholderPlugin
|
||||
)
|
||||
from ayon_core.lib import NumberDef, TextDef, EnumDef
|
||||
from ayon_core.lib.events import weakref_partial
|
||||
|
||||
|
||||
EXAMPLE_SCRIPT = """
|
||||
# Access maya commands
|
||||
from maya import cmds
|
||||
|
||||
# Access the placeholder node
|
||||
placeholder_node = placeholder.scene_identifier
|
||||
|
||||
# Access the event callback
|
||||
if event is None:
|
||||
print(f"Populating {placeholder}")
|
||||
else:
|
||||
if event.topic == "template.depth_processed":
|
||||
print(f"Processed depth: {event.get('depth')}")
|
||||
elif event.topic == "template.finished":
|
||||
print("Build finished.")
|
||||
""".strip()
|
||||
|
||||
|
||||
class MayaPlaceholderScriptPlugin(MayaPlaceholderPlugin):
|
||||
"""Execute a script at the given `order` during workfile build.
|
||||
|
||||
This is a very low-level placeholder to run Python scripts at a given
|
||||
point in time during the workfile template build.
|
||||
|
||||
It can create either a locator or an objectSet as placeholder node.
|
||||
It defaults to an objectSet, since allowing to run on e.g. other
|
||||
placeholder node members can be useful, e.g. using:
|
||||
|
||||
>>> members = cmds.sets(placeholder.scene_identifier, query=True)
|
||||
|
||||
"""
|
||||
|
||||
identifier = "maya.runscript"
|
||||
label = "Run Python Script"
|
||||
|
||||
use_selection_as_parent = False
|
||||
|
||||
def get_placeholder_options(self, options=None):
|
||||
options = options or {}
|
||||
return [
|
||||
NumberDef(
|
||||
"order",
|
||||
label="Order",
|
||||
default=options.get("order") or 0,
|
||||
decimals=0,
|
||||
minimum=0,
|
||||
maximum=999,
|
||||
tooltip=(
|
||||
"Order"
|
||||
"\nOrder defines asset loading priority (0 to 999)"
|
||||
"\nPriority rule is : \"lowest is first to load\"."
|
||||
)
|
||||
),
|
||||
TextDef(
|
||||
"prepare_script",
|
||||
label="Run at\nprepare",
|
||||
tooltip="Run before populate at prepare order",
|
||||
multiline=True,
|
||||
default=options.get("prepare_script", "")
|
||||
),
|
||||
TextDef(
|
||||
"populate_script",
|
||||
label="Run at\npopulate",
|
||||
tooltip="Run script at populate node order<br>"
|
||||
"This is the <b>default</b> behavior",
|
||||
multiline=True,
|
||||
default=options.get("populate_script", EXAMPLE_SCRIPT)
|
||||
),
|
||||
TextDef(
|
||||
"depth_processed_script",
|
||||
label="Run after\ndepth\niteration",
|
||||
tooltip="Run script after every build depth iteration",
|
||||
multiline=True,
|
||||
default=options.get("depth_processed_script", "")
|
||||
),
|
||||
TextDef(
|
||||
"finished_script",
|
||||
label="Run after\nbuild",
|
||||
tooltip=(
|
||||
"Run script at build finished.<br>"
|
||||
"<b>Note</b>: this even runs if other placeholders had "
|
||||
"errors during the build"
|
||||
),
|
||||
multiline=True,
|
||||
default=options.get("finished_script", "")
|
||||
),
|
||||
EnumDef(
|
||||
"create_nodetype",
|
||||
label="Nodetype",
|
||||
items={
|
||||
"spaceLocator": "Locator",
|
||||
"objectSet": "ObjectSet"
|
||||
},
|
||||
tooltip=(
|
||||
"The placeholder's node type to be created.<br>"
|
||||
"<b>Note</b> this only works on create, not on update"
|
||||
),
|
||||
default=options.get("create_nodetype", "objectSet")
|
||||
),
|
||||
]
|
||||
|
||||
def create_placeholder(self, placeholder_data):
|
||||
nodetype = placeholder_data.get("create_nodetype", "objectSet")
|
||||
|
||||
if nodetype == "spaceLocator":
|
||||
super(MayaPlaceholderScriptPlugin, self).create_placeholder(
|
||||
placeholder_data
|
||||
)
|
||||
elif nodetype == "objectSet":
|
||||
placeholder_data["plugin_identifier"] = self.identifier
|
||||
|
||||
# Create maya objectSet on selection
|
||||
selection = cmds.ls(selection=True, long=True)
|
||||
name = self._create_placeholder_name(placeholder_data)
|
||||
node = cmds.sets(selection, name=name)
|
||||
|
||||
self.imprint(node, placeholder_data)
|
||||
|
||||
def prepare_placeholders(self, placeholders):
|
||||
super(MayaPlaceholderScriptPlugin, self).prepare_placeholders(
|
||||
placeholders
|
||||
)
|
||||
for placeholder in placeholders:
|
||||
prepare_script = placeholder.data.get("prepare_script")
|
||||
if not prepare_script:
|
||||
continue
|
||||
|
||||
self.run_script(placeholder, prepare_script)
|
||||
|
||||
def populate_placeholder(self, placeholder):
|
||||
|
||||
populate_script = placeholder.data.get("populate_script")
|
||||
depth_script = placeholder.data.get("depth_processed_script")
|
||||
finished_script = placeholder.data.get("finished_script")
|
||||
|
||||
# Run now
|
||||
if populate_script:
|
||||
self.run_script(placeholder, populate_script)
|
||||
|
||||
if not any([depth_script, finished_script]):
|
||||
# No callback scripts to run
|
||||
if not placeholder.data.get("keep_placeholder", True):
|
||||
self.delete_placeholder(placeholder)
|
||||
return
|
||||
|
||||
# Run at each depth processed
|
||||
if depth_script:
|
||||
callback = weakref_partial(
|
||||
self.run_script, placeholder, depth_script)
|
||||
self.builder.add_on_depth_processed_callback(
|
||||
callback, order=placeholder.order)
|
||||
|
||||
# Run at build finish
|
||||
if finished_script:
|
||||
callback = weakref_partial(
|
||||
self.run_script, placeholder, finished_script)
|
||||
self.builder.add_on_finished_callback(
|
||||
callback, order=placeholder.order)
|
||||
|
||||
# If placeholder should be deleted, delete it after finish so
|
||||
# the scripts have access to it up to the last run
|
||||
if not placeholder.data.get("keep_placeholder", True):
|
||||
delete_callback = weakref_partial(
|
||||
self.delete_placeholder, placeholder)
|
||||
self.builder.add_on_finished_callback(
|
||||
delete_callback, order=placeholder.order + 1)
|
||||
|
||||
def run_script(self, placeholder, script, event=None):
|
||||
"""Run script
|
||||
|
||||
Even though `placeholder` is an unused arguments by exposing it as
|
||||
an input argument it means it makes it available through
|
||||
globals()/locals() in the `exec` call, giving the script access
|
||||
to the placeholder.
|
||||
|
||||
For example:
|
||||
>>> node = placeholder.scene_identifier
|
||||
|
||||
In the case the script is running at a callback level (not during
|
||||
populate) then it has access to the `event` as well, otherwise the
|
||||
value is None if it runs during `populate_placeholder` directly.
|
||||
|
||||
For example adding this as the callback script:
|
||||
>>> if event is not None:
|
||||
>>> if event.topic == "on_depth_processed":
|
||||
>>> print(f"Processed depth: {event.get('depth')}")
|
||||
>>> elif event.topic == "on_finished":
|
||||
>>> print("Build finished.")
|
||||
|
||||
"""
|
||||
self.log.debug(f"Running script at event: {event}")
|
||||
exec(script, locals())
|
||||
|
|
@ -43,7 +43,9 @@ from ayon_core.pipeline import (
|
|||
from ayon_core.pipeline.context_tools import (
|
||||
get_current_context_custom_workfile_template
|
||||
)
|
||||
from ayon_core.pipeline.colorspace import get_imageio_config
|
||||
from ayon_core.pipeline.colorspace import (
|
||||
get_current_context_imageio_config_preset
|
||||
)
|
||||
from ayon_core.pipeline.workfile import BuildWorkfile
|
||||
from . import gizmo_menu
|
||||
from .constants import ASSIST
|
||||
|
|
@ -1577,10 +1579,7 @@ class WorkfileSettings(object):
|
|||
imageio_host (dict): host colorspace configurations
|
||||
|
||||
'''
|
||||
config_data = get_imageio_config(
|
||||
project_name=get_current_project_name(),
|
||||
host_name="nuke"
|
||||
)
|
||||
config_data = get_current_context_imageio_config_preset()
|
||||
|
||||
workfile_settings = imageio_host["workfile"]
|
||||
color_management = workfile_settings["color_management"]
|
||||
|
|
|
|||
|
|
@ -778,6 +778,7 @@ class ExporterReviewMov(ExporterReview):
|
|||
# deal with now lut defined in viewer lut
|
||||
self.viewer_lut_raw = klass.viewer_lut_raw
|
||||
self.write_colorspace = instance.data["colorspace"]
|
||||
self.color_channels = instance.data["color_channels"]
|
||||
|
||||
self.name = name or "baked"
|
||||
self.ext = ext or "mov"
|
||||
|
|
@ -834,7 +835,7 @@ class ExporterReviewMov(ExporterReview):
|
|||
self.log.info("Nodes exported...")
|
||||
return path
|
||||
|
||||
def generate_mov(self, farm=False, **kwargs):
|
||||
def generate_mov(self, farm=False, delete=True, **kwargs):
|
||||
# colorspace data
|
||||
colorspace = None
|
||||
# get colorspace settings
|
||||
|
|
@ -947,6 +948,8 @@ class ExporterReviewMov(ExporterReview):
|
|||
self.log.debug("Path: {}".format(self.path))
|
||||
write_node["file"].setValue(str(self.path))
|
||||
write_node["file_type"].setValue(str(self.ext))
|
||||
write_node["channels"].setValue(str(self.color_channels))
|
||||
|
||||
# Knobs `meta_codec` and `mov64_codec` are not available on centos.
|
||||
# TODO shouldn't this come from settings on outputs?
|
||||
try:
|
||||
|
|
@ -987,8 +990,13 @@ class ExporterReviewMov(ExporterReview):
|
|||
self.render(write_node.name())
|
||||
|
||||
# ---------- generate representation data
|
||||
tags = ["review", "need_thumbnail"]
|
||||
|
||||
if delete:
|
||||
tags.append("delete")
|
||||
|
||||
self.get_representation_data(
|
||||
tags=["review", "need_thumbnail", "delete"] + add_tags,
|
||||
tags=tags + add_tags,
|
||||
custom_tags=add_custom_tags,
|
||||
range=True,
|
||||
colorspace=colorspace
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ class LoadBackdropNodes(load.LoaderPlugin):
|
|||
}
|
||||
|
||||
# add attributes from the version to imprint to metadata knob
|
||||
for k in ["source", "author", "fps"]:
|
||||
for k in ["source", "fps"]:
|
||||
data_imprint[k] = version_attributes[k]
|
||||
|
||||
# getting file path
|
||||
|
|
@ -206,7 +206,7 @@ class LoadBackdropNodes(load.LoaderPlugin):
|
|||
"colorspaceInput": colorspace,
|
||||
}
|
||||
|
||||
for k in ["source", "author", "fps"]:
|
||||
for k in ["source", "fps"]:
|
||||
data_imprint[k] = version_attributes[k]
|
||||
|
||||
# adding nodes to node graph
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ class AlembicCameraLoader(load.LoaderPlugin):
|
|||
"frameEnd": last,
|
||||
"version": version_entity["version"],
|
||||
}
|
||||
for k in ["source", "author", "fps"]:
|
||||
for k in ["source", "fps"]:
|
||||
data_imprint[k] = version_attributes[k]
|
||||
|
||||
# getting file path
|
||||
|
|
@ -123,7 +123,7 @@ class AlembicCameraLoader(load.LoaderPlugin):
|
|||
}
|
||||
|
||||
# add attributes from the version to imprint to metadata knob
|
||||
for k in ["source", "author", "fps"]:
|
||||
for k in ["source", "fps"]:
|
||||
data_imprint[k] = version_attributes[k]
|
||||
|
||||
# getting file path
|
||||
|
|
|
|||
|
|
@ -9,7 +9,8 @@ from ayon_core.pipeline import (
|
|||
get_representation_path,
|
||||
)
|
||||
from ayon_core.pipeline.colorspace import (
|
||||
get_imageio_file_rules_colorspace_from_filepath
|
||||
get_imageio_file_rules_colorspace_from_filepath,
|
||||
get_current_context_imageio_config_preset,
|
||||
)
|
||||
from ayon_core.hosts.nuke.api.lib import (
|
||||
get_imageio_input_colorspace,
|
||||
|
|
@ -197,7 +198,6 @@ class LoadClip(plugin.NukeLoader):
|
|||
"frameStart",
|
||||
"frameEnd",
|
||||
"source",
|
||||
"author",
|
||||
"fps",
|
||||
"handleStart",
|
||||
"handleEnd",
|
||||
|
|
@ -347,8 +347,7 @@ class LoadClip(plugin.NukeLoader):
|
|||
"source": version_attributes.get("source"),
|
||||
"handleStart": str(self.handle_start),
|
||||
"handleEnd": str(self.handle_end),
|
||||
"fps": str(version_attributes.get("fps")),
|
||||
"author": version_attributes.get("author")
|
||||
"fps": str(version_attributes.get("fps"))
|
||||
}
|
||||
|
||||
last_version_entity = ayon_api.get_last_version_by_product_id(
|
||||
|
|
@ -547,9 +546,10 @@ class LoadClip(plugin.NukeLoader):
|
|||
f"Colorspace from representation colorspaceData: {colorspace}"
|
||||
)
|
||||
|
||||
config_data = get_current_context_imageio_config_preset()
|
||||
# check if any filerules are not applicable
|
||||
new_parsed_colorspace = get_imageio_file_rules_colorspace_from_filepath( # noqa
|
||||
filepath, "nuke", project_name
|
||||
filepath, "nuke", project_name, config_data=config_data
|
||||
)
|
||||
self.log.debug(f"Colorspace new filerules: {new_parsed_colorspace}")
|
||||
|
||||
|
|
|
|||
|
|
@ -69,7 +69,6 @@ class LoadEffects(load.LoaderPlugin):
|
|||
"handleStart",
|
||||
"handleEnd",
|
||||
"source",
|
||||
"author",
|
||||
"fps"
|
||||
]:
|
||||
data_imprint[k] = version_attributes[k]
|
||||
|
|
@ -189,7 +188,6 @@ class LoadEffects(load.LoaderPlugin):
|
|||
"handleStart",
|
||||
"handleEnd",
|
||||
"source",
|
||||
"author",
|
||||
"fps",
|
||||
]:
|
||||
data_imprint[k] = version_attributes[k]
|
||||
|
|
|
|||
|
|
@ -69,7 +69,6 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
|
|||
"handleStart",
|
||||
"handleEnd",
|
||||
"source",
|
||||
"author",
|
||||
"fps"
|
||||
]:
|
||||
data_imprint[k] = version_attributes[k]
|
||||
|
|
@ -192,7 +191,6 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
|
|||
"handleStart",
|
||||
"handleEnd",
|
||||
"source",
|
||||
"author",
|
||||
"fps"
|
||||
]:
|
||||
data_imprint[k] = version_attributes[k]
|
||||
|
|
|
|||
|
|
@ -71,7 +71,6 @@ class LoadGizmo(load.LoaderPlugin):
|
|||
"handleStart",
|
||||
"handleEnd",
|
||||
"source",
|
||||
"author",
|
||||
"fps"
|
||||
]:
|
||||
data_imprint[k] = version_attributes[k]
|
||||
|
|
@ -139,7 +138,6 @@ class LoadGizmo(load.LoaderPlugin):
|
|||
"handleStart",
|
||||
"handleEnd",
|
||||
"source",
|
||||
"author",
|
||||
"fps"
|
||||
]:
|
||||
data_imprint[k] = version_attributes[k]
|
||||
|
|
|
|||
|
|
@ -73,7 +73,6 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
|
|||
"handleStart",
|
||||
"handleEnd",
|
||||
"source",
|
||||
"author",
|
||||
"fps"
|
||||
]:
|
||||
data_imprint[k] = version_attributes[k]
|
||||
|
|
@ -145,7 +144,6 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
|
|||
"handleStart",
|
||||
"handleEnd",
|
||||
"source",
|
||||
"author",
|
||||
"fps"
|
||||
]:
|
||||
data_imprint[k] = version_attributes[k]
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ class LoadImage(load.LoaderPlugin):
|
|||
"version": version_entity["version"],
|
||||
"colorspace": colorspace,
|
||||
}
|
||||
for k in ["source", "author", "fps"]:
|
||||
for k in ["source", "fps"]:
|
||||
data_imprint[k] = version_attributes.get(k, str(None))
|
||||
|
||||
r["tile_color"].setValue(int("0x4ecd25ff", 16))
|
||||
|
|
@ -207,7 +207,6 @@ class LoadImage(load.LoaderPlugin):
|
|||
"colorspace": version_attributes.get("colorSpace"),
|
||||
"source": version_attributes.get("source"),
|
||||
"fps": str(version_attributes.get("fps")),
|
||||
"author": version_attributes.get("author")
|
||||
}
|
||||
|
||||
# change color of node
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ class AlembicModelLoader(load.LoaderPlugin):
|
|||
"version": version_entity["version"]
|
||||
}
|
||||
# add attributes from the version to imprint to metadata knob
|
||||
for k in ["source", "author", "fps"]:
|
||||
for k in ["source", "fps"]:
|
||||
data_imprint[k] = version_attributes[k]
|
||||
|
||||
# getting file path
|
||||
|
|
@ -130,7 +130,7 @@ class AlembicModelLoader(load.LoaderPlugin):
|
|||
}
|
||||
|
||||
# add additional metadata from the version to imprint to Avalon knob
|
||||
for k in ["source", "author", "fps"]:
|
||||
for k in ["source", "fps"]:
|
||||
data_imprint[k] = version_attributes[k]
|
||||
|
||||
# getting file path
|
||||
|
|
|
|||
|
|
@ -55,7 +55,6 @@ class LinkAsGroup(load.LoaderPlugin):
|
|||
"handleStart",
|
||||
"handleEnd",
|
||||
"source",
|
||||
"author",
|
||||
"fps"
|
||||
]:
|
||||
data_imprint[k] = version_attributes[k]
|
||||
|
|
@ -131,7 +130,6 @@ class LinkAsGroup(load.LoaderPlugin):
|
|||
"colorspace": version_attributes.get("colorSpace"),
|
||||
"source": version_attributes.get("source"),
|
||||
"fps": version_attributes.get("fps"),
|
||||
"author": version_attributes.get("author")
|
||||
}
|
||||
|
||||
# Update the imprinted representation
|
||||
|
|
|
|||
|
|
@ -153,6 +153,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin,
|
|||
# Determine defined file type
|
||||
ext = write_node["file_type"].value()
|
||||
|
||||
# determine defined channel type
|
||||
color_channels = write_node["channels"].value()
|
||||
|
||||
# get frame range data
|
||||
handle_start = instance.context.data["handleStart"]
|
||||
handle_end = instance.context.data["handleEnd"]
|
||||
|
|
@ -172,7 +175,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin,
|
|||
"path": write_file_path,
|
||||
"outputDir": output_dir,
|
||||
"ext": ext,
|
||||
"colorspace": colorspace
|
||||
"colorspace": colorspace,
|
||||
"color_channels": color_channels
|
||||
})
|
||||
|
||||
if product_type == "render":
|
||||
|
|
|
|||
|
|
@ -136,11 +136,16 @@ class ExtractReviewIntermediates(publish.Extractor):
|
|||
self, instance, o_name, o_data["extension"],
|
||||
multiple_presets)
|
||||
|
||||
o_data["add_custom_tags"].append("intermediate")
|
||||
delete = not o_data.get("publish", False)
|
||||
|
||||
if instance.data.get("farm"):
|
||||
if "review" in instance.data["families"]:
|
||||
instance.data["families"].remove("review")
|
||||
|
||||
data = exporter.generate_mov(farm=True, **o_data)
|
||||
data = exporter.generate_mov(
|
||||
farm=True, delete=delete, **o_data
|
||||
)
|
||||
|
||||
self.log.debug(
|
||||
"_ data: {}".format(data))
|
||||
|
|
@ -154,7 +159,7 @@ class ExtractReviewIntermediates(publish.Extractor):
|
|||
"bakeWriteNodeName": data.get("bakeWriteNodeName")
|
||||
})
|
||||
else:
|
||||
data = exporter.generate_mov(**o_data)
|
||||
data = exporter.generate_mov(delete=delete, **o_data)
|
||||
|
||||
# add representation generated by exporter
|
||||
generated_repres.extend(data["representations"])
|
||||
|
|
|
|||
|
|
@ -35,8 +35,12 @@ class ImageCreator(Creator):
|
|||
create_empty_group = False
|
||||
|
||||
stub = api.stub() # only after PS is up
|
||||
top_level_selected_items = stub.get_selected_layers()
|
||||
if pre_create_data.get("use_selection"):
|
||||
try:
|
||||
top_level_selected_items = stub.get_selected_layers()
|
||||
except ValueError:
|
||||
raise CreatorError("Cannot group locked Background layer!")
|
||||
|
||||
only_single_item_selected = len(top_level_selected_items) == 1
|
||||
if (
|
||||
only_single_item_selected or
|
||||
|
|
@ -50,11 +54,12 @@ class ImageCreator(Creator):
|
|||
group = stub.group_selected_layers(product_name_from_ui)
|
||||
groups_to_create.append(group)
|
||||
else:
|
||||
stub.select_layers(stub.get_layers())
|
||||
try:
|
||||
stub.select_layers(stub.get_layers())
|
||||
group = stub.group_selected_layers(product_name_from_ui)
|
||||
except:
|
||||
except ValueError:
|
||||
raise CreatorError("Cannot group locked Background layer!")
|
||||
|
||||
groups_to_create.append(group)
|
||||
|
||||
# create empty group if nothing selected
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
import pyblish.util
|
||||
|
||||
|
|
|
|||
|
|
@ -156,14 +156,9 @@ This creator publishes color space look file (LUT).
|
|||
]
|
||||
|
||||
def apply_settings(self, project_settings):
|
||||
host = self.create_context.host
|
||||
host_name = host.name
|
||||
project_name = host.get_current_project_name()
|
||||
config_data = colorspace.get_imageio_config(
|
||||
project_name, host_name,
|
||||
config_data = colorspace.get_current_context_imageio_config_preset(
|
||||
project_settings=project_settings
|
||||
)
|
||||
|
||||
if not config_data:
|
||||
self.enabled = False
|
||||
return
|
||||
|
|
|
|||
|
|
@ -0,0 +1,96 @@
|
|||
from pathlib import Path
|
||||
|
||||
from ayon_core.pipeline import (
|
||||
CreatedInstance,
|
||||
)
|
||||
|
||||
from ayon_core.lib.attribute_definitions import (
|
||||
FileDef,
|
||||
BoolDef,
|
||||
TextDef,
|
||||
)
|
||||
from ayon_core.hosts.traypublisher.api.plugin import TrayPublishCreator
|
||||
|
||||
|
||||
class EditorialPackageCreator(TrayPublishCreator):
|
||||
"""Creates instance for OTIO file from published folder.
|
||||
|
||||
Folder contains OTIO file and exported .mov files. Process should publish
|
||||
whole folder as single `editorial_pckg` product type and (possibly) convert
|
||||
.mov files into different format and copy them into `publish` `resources`
|
||||
subfolder.
|
||||
"""
|
||||
identifier = "editorial_pckg"
|
||||
label = "Editorial package"
|
||||
product_type = "editorial_pckg"
|
||||
description = "Publish folder with OTIO file and resources"
|
||||
|
||||
# Position batch creator after simple creators
|
||||
order = 120
|
||||
|
||||
conversion_enabled = False
|
||||
|
||||
def apply_settings(self, project_settings):
|
||||
self.conversion_enabled = (
|
||||
project_settings["traypublisher"]
|
||||
["publish"]
|
||||
["ExtractEditorialPckgConversion"]
|
||||
["conversion_enabled"]
|
||||
)
|
||||
|
||||
def get_icon(self):
|
||||
return "fa.folder"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
folder_path = pre_create_data.get("folder_path")
|
||||
if not folder_path:
|
||||
return
|
||||
|
||||
instance_data["creator_attributes"] = {
|
||||
"folder_path": (Path(folder_path["directory"]) /
|
||||
Path(folder_path["filenames"][0])).as_posix(),
|
||||
"conversion_enabled": pre_create_data["conversion_enabled"]
|
||||
}
|
||||
|
||||
# Create new instance
|
||||
new_instance = CreatedInstance(self.product_type, product_name,
|
||||
instance_data, self)
|
||||
self._store_new_instance(new_instance)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
# Use same attributes as for instance attributes
|
||||
return [
|
||||
FileDef(
|
||||
"folder_path",
|
||||
folders=True,
|
||||
single_item=True,
|
||||
extensions=[],
|
||||
allow_sequences=False,
|
||||
label="Folder path"
|
||||
),
|
||||
BoolDef("conversion_enabled",
|
||||
tooltip="Convert to output defined in Settings.",
|
||||
default=self.conversion_enabled,
|
||||
label="Convert resources"),
|
||||
]
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
TextDef(
|
||||
"folder_path",
|
||||
label="Folder path",
|
||||
disabled=True
|
||||
),
|
||||
BoolDef("conversion_enabled",
|
||||
tooltip="Convert to output defined in Settings.",
|
||||
label="Convert resources"),
|
||||
]
|
||||
|
||||
def get_detail_description(self):
|
||||
return """# Publish folder with OTIO file and video clips
|
||||
|
||||
Folder contains OTIO file and exported .mov files. Process should
|
||||
publish whole folder as single `editorial_pckg` product type and
|
||||
(possibly) convert .mov files into different format and copy them into
|
||||
`publish` `resources` subfolder.
|
||||
"""
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
"""Produces instance.data["editorial_pckg"] data used during integration.
|
||||
|
||||
Requires:
|
||||
instance.data["creator_attributes"]["path"] - from creator
|
||||
|
||||
Provides:
|
||||
instance -> editorial_pckg (dict):
|
||||
folder_path (str)
|
||||
otio_path (str) - from dragged folder
|
||||
resource_paths (list)
|
||||
|
||||
"""
|
||||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.lib.transcoding import VIDEO_EXTENSIONS
|
||||
|
||||
|
||||
class CollectEditorialPackage(pyblish.api.InstancePlugin):
|
||||
"""Collects path to OTIO file and resources"""
|
||||
|
||||
label = "Collect Editorial Package"
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
|
||||
hosts = ["traypublisher"]
|
||||
families = ["editorial_pckg"]
|
||||
|
||||
def process(self, instance):
|
||||
folder_path = instance.data["creator_attributes"]["folder_path"]
|
||||
if not folder_path or not os.path.exists(folder_path):
|
||||
self.log.info((
|
||||
"Instance doesn't contain collected existing folder path."
|
||||
))
|
||||
return
|
||||
|
||||
instance.data["editorial_pckg"] = {}
|
||||
instance.data["editorial_pckg"]["folder_path"] = folder_path
|
||||
|
||||
otio_path, resource_paths = (
|
||||
self._get_otio_and_resource_paths(folder_path))
|
||||
|
||||
instance.data["editorial_pckg"]["otio_path"] = otio_path
|
||||
instance.data["editorial_pckg"]["resource_paths"] = resource_paths
|
||||
|
||||
def _get_otio_and_resource_paths(self, folder_path):
|
||||
otio_path = None
|
||||
resource_paths = []
|
||||
|
||||
file_names = os.listdir(folder_path)
|
||||
for filename in file_names:
|
||||
_, ext = os.path.splitext(filename)
|
||||
file_path = os.path.join(folder_path, filename)
|
||||
if ext == ".otio":
|
||||
otio_path = file_path
|
||||
elif ext in VIDEO_EXTENSIONS:
|
||||
resource_paths.append(file_path)
|
||||
return otio_path, resource_paths
|
||||
|
|
@ -1,10 +1,7 @@
|
|||
import pyblish.api
|
||||
from ayon_core.pipeline import (
|
||||
publish,
|
||||
registered_host
|
||||
)
|
||||
from ayon_core.lib import EnumDef
|
||||
from ayon_core.pipeline import colorspace
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_core.pipeline.publish import KnownPublishError
|
||||
|
||||
|
||||
|
|
@ -19,9 +16,10 @@ class CollectColorspace(pyblish.api.InstancePlugin,
|
|||
families = ["render", "plate", "reference", "image", "online"]
|
||||
enabled = False
|
||||
|
||||
colorspace_items = [
|
||||
default_colorspace_items = [
|
||||
(None, "Don't override")
|
||||
]
|
||||
colorspace_items = list(default_colorspace_items)
|
||||
colorspace_attr_show = False
|
||||
config_items = None
|
||||
|
||||
|
|
@ -69,14 +67,13 @@ class CollectColorspace(pyblish.api.InstancePlugin,
|
|||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings):
|
||||
host = registered_host()
|
||||
host_name = host.name
|
||||
project_name = host.get_current_project_name()
|
||||
config_data = colorspace.get_imageio_config(
|
||||
project_name, host_name,
|
||||
config_data = colorspace.get_current_context_imageio_config_preset(
|
||||
project_settings=project_settings
|
||||
)
|
||||
|
||||
enabled = False
|
||||
colorspace_items = list(cls.default_colorspace_items)
|
||||
config_items = None
|
||||
if config_data:
|
||||
filepath = config_data["path"]
|
||||
config_items = colorspace.get_ocio_config_colorspaces(filepath)
|
||||
|
|
@ -85,9 +82,11 @@ class CollectColorspace(pyblish.api.InstancePlugin,
|
|||
include_aliases=True,
|
||||
include_roles=True
|
||||
)
|
||||
cls.config_items = config_items
|
||||
cls.colorspace_items.extend(labeled_colorspaces)
|
||||
cls.enabled = True
|
||||
colorspace_items.extend(labeled_colorspaces)
|
||||
|
||||
cls.config_items = config_items
|
||||
cls.colorspace_items = colorspace_items
|
||||
cls.enabled = enabled
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
|
|
|
|||
|
|
@ -10,9 +10,13 @@ class CollectFrameDataFromAssetEntity(pyblish.api.InstancePlugin):
|
|||
|
||||
order = pyblish.api.CollectorOrder + 0.491
|
||||
label = "Collect Missing Frame Data From Folder"
|
||||
families = ["plate", "pointcache",
|
||||
"vdbcache", "online",
|
||||
"render"]
|
||||
families = [
|
||||
"plate",
|
||||
"pointcache",
|
||||
"vdbcache",
|
||||
"online",
|
||||
"render",
|
||||
]
|
||||
hosts = ["traypublisher"]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
@ -22,16 +26,26 @@ class CollectFrameDataFromAssetEntity(pyblish.api.InstancePlugin):
|
|||
"frameStart",
|
||||
"frameEnd",
|
||||
"handleStart",
|
||||
"handleEnd"
|
||||
"handleEnd",
|
||||
):
|
||||
if key not in instance.data:
|
||||
missing_keys.append(key)
|
||||
|
||||
# Skip the logic if all keys are already collected.
|
||||
# NOTE: In editorial is not 'folderEntity' filled, so it would crash
|
||||
# even if we don't need it.
|
||||
if not missing_keys:
|
||||
return
|
||||
|
||||
keys_set = []
|
||||
folder_attributes = instance.data["folderEntity"]["attrib"]
|
||||
for key in missing_keys:
|
||||
if key in folder_attributes:
|
||||
instance.data[key] = folder_attributes[key]
|
||||
keys_set.append(key)
|
||||
|
||||
if keys_set:
|
||||
self.log.debug(f"Frame range data {keys_set} "
|
||||
"has been collected from folder entity.")
|
||||
self.log.debug(
|
||||
f"Frame range data {keys_set} "
|
||||
"has been collected from folder entity."
|
||||
)
|
||||
|
|
@ -0,0 +1,232 @@
|
|||
import copy
|
||||
import os.path
|
||||
import subprocess
|
||||
|
||||
import opentimelineio
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.lib import get_ffmpeg_tool_args, run_subprocess
|
||||
from ayon_core.pipeline import publish
|
||||
|
||||
|
||||
class ExtractEditorialPckgConversion(publish.Extractor):
|
||||
"""Replaces movie paths in otio file with publish rootless
|
||||
|
||||
Prepares movie resources for integration (adds them to `transfers`).
|
||||
Converts .mov files according to output definition.
|
||||
"""
|
||||
|
||||
label = "Extract Editorial Package"
|
||||
order = pyblish.api.ExtractorOrder - 0.45
|
||||
hosts = ["traypublisher"]
|
||||
families = ["editorial_pckg"]
|
||||
|
||||
def process(self, instance):
|
||||
editorial_pckg_data = instance.data.get("editorial_pckg")
|
||||
|
||||
otio_path = editorial_pckg_data["otio_path"]
|
||||
otio_basename = os.path.basename(otio_path)
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
editorial_pckg_repre = {
|
||||
'name': "editorial_pckg",
|
||||
'ext': "otio",
|
||||
'files': otio_basename,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
otio_staging_path = os.path.join(staging_dir, otio_basename)
|
||||
|
||||
instance.data["representations"].append(editorial_pckg_repre)
|
||||
|
||||
publish_resource_folder = self._get_publish_resource_folder(instance)
|
||||
resource_paths = editorial_pckg_data["resource_paths"]
|
||||
transfers = self._get_transfers(resource_paths,
|
||||
publish_resource_folder)
|
||||
|
||||
project_settings = instance.context.data["project_settings"]
|
||||
output_def = (project_settings["traypublisher"]
|
||||
["publish"]
|
||||
["ExtractEditorialPckgConversion"]
|
||||
["output"])
|
||||
|
||||
conversion_enabled = (instance.data["creator_attributes"]
|
||||
["conversion_enabled"])
|
||||
|
||||
if conversion_enabled and output_def["ext"]:
|
||||
transfers = self._convert_resources(output_def, transfers)
|
||||
|
||||
instance.data["transfers"] = transfers
|
||||
|
||||
source_to_rootless = self._get_resource_path_mapping(instance,
|
||||
transfers)
|
||||
|
||||
otio_data = editorial_pckg_data["otio_data"]
|
||||
otio_data = self._replace_target_urls(otio_data, source_to_rootless)
|
||||
|
||||
opentimelineio.adapters.write_to_file(otio_data, otio_staging_path)
|
||||
|
||||
self.log.info("Added Editorial Package representation: {}".format(
|
||||
editorial_pckg_repre))
|
||||
|
||||
def _get_publish_resource_folder(self, instance):
|
||||
"""Calculates publish folder and create it."""
|
||||
publish_path = self._get_published_path(instance)
|
||||
publish_folder = os.path.dirname(publish_path)
|
||||
publish_resource_folder = os.path.join(publish_folder, "resources")
|
||||
|
||||
if not os.path.exists(publish_resource_folder):
|
||||
os.makedirs(publish_resource_folder, exist_ok=True)
|
||||
return publish_resource_folder
|
||||
|
||||
def _get_resource_path_mapping(self, instance, transfers):
|
||||
"""Returns dict of {source_mov_path: rootless_published_path}."""
|
||||
replace_paths = {}
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
for source, destination in transfers:
|
||||
rootless_path = self._get_rootless(anatomy, destination)
|
||||
source_file_name = os.path.basename(source)
|
||||
replace_paths[source_file_name] = rootless_path
|
||||
return replace_paths
|
||||
|
||||
def _get_transfers(self, resource_paths, publish_resource_folder):
|
||||
"""Returns list of tuples (source, destination) with movie paths."""
|
||||
transfers = []
|
||||
for res_path in resource_paths:
|
||||
res_basename = os.path.basename(res_path)
|
||||
pub_res_path = os.path.join(publish_resource_folder, res_basename)
|
||||
transfers.append((res_path, pub_res_path))
|
||||
return transfers
|
||||
|
||||
def _replace_target_urls(self, otio_data, replace_paths):
|
||||
"""Replace original movie paths with published rootless ones."""
|
||||
for track in otio_data.tracks:
|
||||
for clip in track:
|
||||
# Check if the clip has a media reference
|
||||
if clip.media_reference is not None:
|
||||
# Access the target_url from the media reference
|
||||
target_url = clip.media_reference.target_url
|
||||
if not target_url:
|
||||
continue
|
||||
file_name = os.path.basename(target_url)
|
||||
replace_path = replace_paths.get(file_name)
|
||||
if replace_path:
|
||||
clip.media_reference.target_url = replace_path
|
||||
if clip.name == file_name:
|
||||
clip.name = os.path.basename(replace_path)
|
||||
|
||||
return otio_data
|
||||
|
||||
def _get_rootless(self, anatomy, path):
|
||||
"""Try to find rootless {root[work]} path from `path`"""
|
||||
success, rootless_path = anatomy.find_root_template_from_path(
|
||||
path)
|
||||
if not success:
|
||||
# `rootless_path` is not set to `output_dir` if none of roots match
|
||||
self.log.warning(
|
||||
f"Could not find root path for remapping '{path}'."
|
||||
)
|
||||
rootless_path = path
|
||||
|
||||
return rootless_path
|
||||
|
||||
def _get_published_path(self, instance):
|
||||
"""Calculates expected `publish` folder"""
|
||||
# determine published path from Anatomy.
|
||||
template_data = instance.data.get("anatomyData")
|
||||
rep = instance.data["representations"][0]
|
||||
template_data["representation"] = rep.get("name")
|
||||
template_data["ext"] = rep.get("ext")
|
||||
template_data["comment"] = None
|
||||
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
template_data["root"] = anatomy.roots
|
||||
template = anatomy.get_template_item("publish", "default", "path")
|
||||
template_filled = template.format_strict(template_data)
|
||||
return os.path.normpath(template_filled)
|
||||
|
||||
def _convert_resources(self, output_def, transfers):
|
||||
"""Converts all resource files to configured format."""
|
||||
out_extension = output_def["ext"]
|
||||
if not out_extension:
|
||||
self.log.warning("No output extension configured in "
|
||||
"ayon+settings://traypublisher/publish/ExtractEditorialPckgConversion") # noqa
|
||||
return transfers
|
||||
|
||||
final_transfers = []
|
||||
out_def_ffmpeg_args = output_def["ffmpeg_args"]
|
||||
ffmpeg_input_args = [
|
||||
value.strip()
|
||||
for value in out_def_ffmpeg_args["input"]
|
||||
if value.strip()
|
||||
]
|
||||
ffmpeg_video_filters = [
|
||||
value.strip()
|
||||
for value in out_def_ffmpeg_args["video_filters"]
|
||||
if value.strip()
|
||||
]
|
||||
ffmpeg_audio_filters = [
|
||||
value.strip()
|
||||
for value in out_def_ffmpeg_args["audio_filters"]
|
||||
if value.strip()
|
||||
]
|
||||
ffmpeg_output_args = [
|
||||
value.strip()
|
||||
for value in out_def_ffmpeg_args["output"]
|
||||
if value.strip()
|
||||
]
|
||||
ffmpeg_input_args = self._split_ffmpeg_args(ffmpeg_input_args)
|
||||
|
||||
generic_args = [
|
||||
subprocess.list2cmdline(get_ffmpeg_tool_args("ffmpeg"))
|
||||
]
|
||||
generic_args.extend(ffmpeg_input_args)
|
||||
if ffmpeg_video_filters:
|
||||
generic_args.append("-filter:v")
|
||||
generic_args.append(
|
||||
"\"{}\"".format(",".join(ffmpeg_video_filters)))
|
||||
|
||||
if ffmpeg_audio_filters:
|
||||
generic_args.append("-filter:a")
|
||||
generic_args.append(
|
||||
"\"{}\"".format(",".join(ffmpeg_audio_filters)))
|
||||
|
||||
for source, destination in transfers:
|
||||
base_name = os.path.basename(destination)
|
||||
file_name, ext = os.path.splitext(base_name)
|
||||
dest_path = os.path.join(os.path.dirname(destination),
|
||||
f"{file_name}.{out_extension}")
|
||||
final_transfers.append((source, dest_path))
|
||||
|
||||
all_args = copy.deepcopy(generic_args)
|
||||
all_args.append(f"-i \"{source}\"")
|
||||
all_args.extend(ffmpeg_output_args) # order matters
|
||||
all_args.append(f"\"{dest_path}\"")
|
||||
subprcs_cmd = " ".join(all_args)
|
||||
|
||||
# run subprocess
|
||||
self.log.debug("Executing: {}".format(subprcs_cmd))
|
||||
run_subprocess(subprcs_cmd, shell=True, logger=self.log)
|
||||
return final_transfers
|
||||
|
||||
def _split_ffmpeg_args(self, in_args):
|
||||
"""Makes sure all entered arguments are separated in individual items.
|
||||
|
||||
Split each argument string with " -" to identify if string contains
|
||||
one or more arguments.
|
||||
"""
|
||||
splitted_args = []
|
||||
for arg in in_args:
|
||||
sub_args = arg.split(" -")
|
||||
if len(sub_args) == 1:
|
||||
if arg and arg not in splitted_args:
|
||||
splitted_args.append(arg)
|
||||
continue
|
||||
|
||||
for idx, arg in enumerate(sub_args):
|
||||
if idx != 0:
|
||||
arg = "-" + arg
|
||||
|
||||
if arg and arg not in splitted_args:
|
||||
splitted_args.append(arg)
|
||||
return splitted_args
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
import os
|
||||
import opentimelineio
|
||||
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline import PublishValidationError
|
||||
|
||||
|
||||
class ValidateEditorialPackage(pyblish.api.InstancePlugin):
|
||||
"""Checks that published folder contains all resources from otio
|
||||
|
||||
Currently checks only by file names and expects flat structure.
|
||||
It ignores path to resources in otio file as folder might be dragged in and
|
||||
published from different location than it was created.
|
||||
"""
|
||||
|
||||
label = "Validate Editorial Package"
|
||||
order = pyblish.api.ValidatorOrder - 0.49
|
||||
|
||||
hosts = ["traypublisher"]
|
||||
families = ["editorial_pckg"]
|
||||
|
||||
def process(self, instance):
|
||||
editorial_pckg_data = instance.data.get("editorial_pckg")
|
||||
if not editorial_pckg_data:
|
||||
raise PublishValidationError("Editorial package not collected")
|
||||
|
||||
folder_path = editorial_pckg_data["folder_path"]
|
||||
|
||||
otio_path = editorial_pckg_data["otio_path"]
|
||||
if not otio_path:
|
||||
raise PublishValidationError(
|
||||
f"Folder {folder_path} missing otio file")
|
||||
|
||||
resource_paths = editorial_pckg_data["resource_paths"]
|
||||
|
||||
resource_file_names = {os.path.basename(path)
|
||||
for path in resource_paths}
|
||||
|
||||
otio_data = opentimelineio.adapters.read_from_file(otio_path)
|
||||
|
||||
target_urls = self._get_all_target_urls(otio_data)
|
||||
missing_files = set()
|
||||
for target_url in target_urls:
|
||||
target_basename = os.path.basename(target_url)
|
||||
if target_basename not in resource_file_names:
|
||||
missing_files.add(target_basename)
|
||||
|
||||
if missing_files:
|
||||
raise PublishValidationError(
|
||||
f"Otio file contains missing files `{missing_files}`.\n\n"
|
||||
f"Please add them to `{folder_path}` and republish.")
|
||||
|
||||
instance.data["editorial_pckg"]["otio_data"] = otio_data
|
||||
|
||||
def _get_all_target_urls(self, otio_data):
|
||||
target_urls = []
|
||||
|
||||
# Iterate through tracks, clips, or other elements
|
||||
for track in otio_data.tracks:
|
||||
for clip in track:
|
||||
# Check if the clip has a media reference
|
||||
if clip.media_reference is not None:
|
||||
# Access the target_url from the media reference
|
||||
target_url = clip.media_reference.target_url
|
||||
if target_url:
|
||||
target_urls.append(target_url)
|
||||
|
||||
return target_urls
|
||||
|
|
@ -139,6 +139,7 @@ from .path_tools import (
|
|||
)
|
||||
|
||||
from .ayon_info import (
|
||||
is_in_ayon_launcher_process,
|
||||
is_running_from_build,
|
||||
is_using_ayon_console,
|
||||
is_staging_enabled,
|
||||
|
|
@ -248,6 +249,7 @@ __all__ = [
|
|||
|
||||
"Logger",
|
||||
|
||||
"is_in_ayon_launcher_process",
|
||||
"is_running_from_build",
|
||||
"is_using_ayon_console",
|
||||
"is_staging_enabled",
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import sys
|
||||
import json
|
||||
import datetime
|
||||
import platform
|
||||
|
|
@ -25,6 +26,18 @@ def get_ayon_launcher_version():
|
|||
return content["__version__"]
|
||||
|
||||
|
||||
def is_in_ayon_launcher_process():
|
||||
"""Determine if current process is running from AYON launcher.
|
||||
|
||||
Returns:
|
||||
bool: True if running from AYON launcher.
|
||||
|
||||
"""
|
||||
ayon_executable_path = os.path.normpath(os.environ["AYON_EXECUTABLE"])
|
||||
executable_path = os.path.normpath(sys.executable)
|
||||
return ayon_executable_path == executable_path
|
||||
|
||||
|
||||
def is_running_from_build():
|
||||
"""Determine if current process is running from build or code.
|
||||
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ class HoudiniSubmitDeadline(
|
|||
priority = 50
|
||||
chunk_size = 1
|
||||
group = ""
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
return [
|
||||
|
|
@ -188,7 +188,7 @@ class HoudiniSubmitDeadline(
|
|||
|
||||
job_info.Pool = instance.data.get("primaryPool")
|
||||
job_info.SecondaryPool = instance.data.get("secondaryPool")
|
||||
|
||||
|
||||
if split_render_job and is_export_job:
|
||||
job_info.Priority = attribute_values.get(
|
||||
"export_priority", self.export_priority
|
||||
|
|
@ -309,6 +309,11 @@ class HoudiniSubmitDeadline(
|
|||
return attr.asdict(plugin_info)
|
||||
|
||||
def process(self, instance):
|
||||
if not instance.data["farm"]:
|
||||
self.log.debug("Render on farm is disabled. "
|
||||
"Skipping deadline submission.")
|
||||
return
|
||||
|
||||
super(HoudiniSubmitDeadline, self).process(instance)
|
||||
|
||||
# TODO: Avoid the need for this logic here, needed for submit publish
|
||||
|
|
|
|||
|
|
@ -467,8 +467,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
|
|||
|
||||
# Inject deadline url to instances to query DL for job id for overrides
|
||||
for inst in instances:
|
||||
if not "deadline" in inst:
|
||||
inst["deadline"] = {}
|
||||
inst["deadline"] = instance.data["deadline"]
|
||||
|
||||
# publish job file
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin,
|
|||
auth=auth,
|
||||
log=self.log)
|
||||
# some DL return "none" as a pool name
|
||||
if not "none" in pools:
|
||||
if "none" not in pools:
|
||||
pools.append("none")
|
||||
self.log.info("Available pools: {}".format(pools))
|
||||
self.pools_per_url[deadline_url] = pools
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from ayon_core.lib import Logger, run_subprocess, AYONSettingsRegistry
|
|||
from ayon_core.lib.vendor_bin_utils import find_tool_in_custom_paths
|
||||
|
||||
from .rr_job import SubmitFile
|
||||
from .rr_job import RRjob, SubmitterParameter # noqa F401
|
||||
from .rr_job import RRJob, SubmitterParameter # noqa F401
|
||||
|
||||
|
||||
class Api:
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -459,36 +459,6 @@ def is_representation_from_latest(representation):
|
|||
)
|
||||
|
||||
|
||||
def get_template_data_from_session(session=None, settings=None):
|
||||
"""Template data for template fill from session keys.
|
||||
|
||||
Args:
|
||||
session (Union[Dict[str, str], None]): The Session to use. If not
|
||||
provided use the currently active global Session.
|
||||
settings (Optional[Dict[str, Any]]): Prepared studio or project
|
||||
settings.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: All available data from session.
|
||||
"""
|
||||
|
||||
if session is not None:
|
||||
project_name = session["AYON_PROJECT_NAME"]
|
||||
folder_path = session["AYON_FOLDER_PATH"]
|
||||
task_name = session["AYON_TASK_NAME"]
|
||||
host_name = session["AYON_HOST_NAME"]
|
||||
else:
|
||||
context = get_current_context()
|
||||
project_name = context["project_name"]
|
||||
folder_path = context["folder_path"]
|
||||
task_name = context["task_name"]
|
||||
host_name = get_current_host_name()
|
||||
|
||||
return get_template_data_with_names(
|
||||
project_name, folder_path, task_name, host_name, settings
|
||||
)
|
||||
|
||||
|
||||
def get_current_context_template_data(settings=None):
|
||||
"""Prepare template data for current context.
|
||||
|
||||
|
|
|
|||
|
|
@ -2053,7 +2053,7 @@ class CreateContext:
|
|||
exc_info = sys.exc_info()
|
||||
self.log.warning(error_message.format(identifier, exc_info[1]))
|
||||
|
||||
except:
|
||||
except: # noqa: E722
|
||||
add_traceback = True
|
||||
exc_info = sys.exc_info()
|
||||
self.log.warning(
|
||||
|
|
@ -2163,7 +2163,7 @@ class CreateContext:
|
|||
exc_info = sys.exc_info()
|
||||
self.log.warning(error_message.format(identifier, exc_info[1]))
|
||||
|
||||
except:
|
||||
except: # noqa: E722
|
||||
failed = True
|
||||
add_traceback = True
|
||||
exc_info = sys.exc_info()
|
||||
|
|
@ -2197,7 +2197,7 @@ class CreateContext:
|
|||
try:
|
||||
convertor.find_instances()
|
||||
|
||||
except:
|
||||
except: # noqa: E722
|
||||
failed_info.append(
|
||||
prepare_failed_convertor_operation_info(
|
||||
convertor.identifier, sys.exc_info()
|
||||
|
|
@ -2373,7 +2373,7 @@ class CreateContext:
|
|||
exc_info = sys.exc_info()
|
||||
self.log.warning(error_message.format(identifier, exc_info[1]))
|
||||
|
||||
except:
|
||||
except: # noqa: E722
|
||||
failed = True
|
||||
add_traceback = True
|
||||
exc_info = sys.exc_info()
|
||||
|
|
@ -2440,7 +2440,7 @@ class CreateContext:
|
|||
error_message.format(identifier, exc_info[1])
|
||||
)
|
||||
|
||||
except:
|
||||
except: # noqa: E722
|
||||
failed = True
|
||||
add_traceback = True
|
||||
exc_info = sys.exc_info()
|
||||
|
|
@ -2546,7 +2546,7 @@ class CreateContext:
|
|||
try:
|
||||
self.run_convertor(convertor_identifier)
|
||||
|
||||
except:
|
||||
except: # noqa: E722
|
||||
failed_info.append(
|
||||
prepare_failed_convertor_operation_info(
|
||||
convertor_identifier, sys.exc_info()
|
||||
|
|
|
|||
|
|
@ -73,8 +73,8 @@ def get_folder_template_data(folder_entity, project_name):
|
|||
- 'parent' - direct parent name, project name used if is under
|
||||
project
|
||||
|
||||
Required document fields:
|
||||
Folder: 'path' -> Plan to require: 'folderType'
|
||||
Required entity fields:
|
||||
Folder: 'path', 'folderType'
|
||||
|
||||
Args:
|
||||
folder_entity (Dict[str, Any]): Folder entity.
|
||||
|
|
@ -101,6 +101,8 @@ def get_folder_template_data(folder_entity, project_name):
|
|||
return {
|
||||
"folder": {
|
||||
"name": folder_name,
|
||||
"type": folder_entity["folderType"],
|
||||
"path": path,
|
||||
},
|
||||
"asset": folder_name,
|
||||
"hierarchy": hierarchy,
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ import collections
|
|||
import pyblish.api
|
||||
import ayon_api
|
||||
|
||||
from ayon_core.pipeline.template_data import get_folder_template_data
|
||||
from ayon_core.pipeline.version_start import get_versioning_start
|
||||
|
||||
|
||||
|
|
@ -383,24 +384,11 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
|||
# - 'folder', 'hierarchy', 'parent', 'folder'
|
||||
folder_entity = instance.data.get("folderEntity")
|
||||
if folder_entity:
|
||||
folder_name = folder_entity["name"]
|
||||
folder_path = folder_entity["path"]
|
||||
hierarchy_parts = folder_path.split("/")
|
||||
hierarchy_parts.pop(0)
|
||||
hierarchy_parts.pop(-1)
|
||||
parent_name = project_entity["name"]
|
||||
if hierarchy_parts:
|
||||
parent_name = hierarchy_parts[-1]
|
||||
|
||||
hierarchy = "/".join(hierarchy_parts)
|
||||
anatomy_data.update({
|
||||
"asset": folder_name,
|
||||
"hierarchy": hierarchy,
|
||||
"parent": parent_name,
|
||||
"folder": {
|
||||
"name": folder_name,
|
||||
},
|
||||
})
|
||||
folder_data = get_folder_template_data(
|
||||
folder_entity,
|
||||
project_entity["name"]
|
||||
)
|
||||
anatomy_data.update(folder_data)
|
||||
return
|
||||
|
||||
if instance.data.get("newAssetPublishing"):
|
||||
|
|
@ -418,6 +406,11 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
|||
"parent": parent_name,
|
||||
"folder": {
|
||||
"name": folder_name,
|
||||
"path": instance.data["folderPath"],
|
||||
# TODO get folder type from hierarchy
|
||||
# Using 'Shot' is current default behavior of editorial
|
||||
# (or 'newAssetPublishing') publishing.
|
||||
"type": "Shot",
|
||||
},
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -108,69 +108,6 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
|
||||
label = "Integrate Asset"
|
||||
order = pyblish.api.IntegratorOrder
|
||||
families = ["workfile",
|
||||
"pointcache",
|
||||
"pointcloud",
|
||||
"proxyAbc",
|
||||
"camera",
|
||||
"animation",
|
||||
"model",
|
||||
"maxScene",
|
||||
"mayaAscii",
|
||||
"mayaScene",
|
||||
"setdress",
|
||||
"layout",
|
||||
"ass",
|
||||
"assProxy",
|
||||
"vdbcache",
|
||||
"scene",
|
||||
"vrayproxy",
|
||||
"vrayscene_layer",
|
||||
"render",
|
||||
"prerender",
|
||||
"imagesequence",
|
||||
"review",
|
||||
"rendersetup",
|
||||
"rig",
|
||||
"plate",
|
||||
"look",
|
||||
"ociolook",
|
||||
"audio",
|
||||
"yetiRig",
|
||||
"yeticache",
|
||||
"nukenodes",
|
||||
"gizmo",
|
||||
"source",
|
||||
"matchmove",
|
||||
"image",
|
||||
"assembly",
|
||||
"fbx",
|
||||
"gltf",
|
||||
"textures",
|
||||
"action",
|
||||
"harmony.template",
|
||||
"harmony.palette",
|
||||
"editorial",
|
||||
"background",
|
||||
"camerarig",
|
||||
"redshiftproxy",
|
||||
"effect",
|
||||
"xgen",
|
||||
"hda",
|
||||
"usd",
|
||||
"staticMesh",
|
||||
"skeletalMesh",
|
||||
"mvLook",
|
||||
"mvUsd",
|
||||
"mvUsdComposition",
|
||||
"mvUsdOverride",
|
||||
"online",
|
||||
"uasset",
|
||||
"blendScene",
|
||||
"yeticacheUE",
|
||||
"tycache",
|
||||
"csv_ingest_file",
|
||||
]
|
||||
|
||||
default_template_name = "publish"
|
||||
|
||||
|
|
@ -360,7 +297,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
|
||||
# Compute the resource file infos once (files belonging to the
|
||||
# version instance instead of an individual representation) so
|
||||
# we can re-use those file infos per representation
|
||||
# we can reuse those file infos per representation
|
||||
resource_file_infos = self.get_files_info(
|
||||
resource_destinations, anatomy
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,28 +1,31 @@
|
|||
"""OpenColorIO Wrapper.
|
||||
|
||||
Only to be interpreted by Python 3. It is run in subprocess in case
|
||||
Python 2 hosts needs to use it. Or it is used as module for Python 3
|
||||
processing.
|
||||
|
||||
Providing functionality:
|
||||
- get_colorspace - console command - python 2
|
||||
- returning all available color spaces
|
||||
found in input config path.
|
||||
- _get_colorspace_data - python 3 - module function
|
||||
- returning all available colorspaces
|
||||
found in input config path.
|
||||
- get_views - console command - python 2
|
||||
- returning all available viewers
|
||||
found in input config path.
|
||||
- _get_views_data - python 3 - module function
|
||||
- returning all available viewers
|
||||
found in input config path.
|
||||
Receive OpenColorIO information and store it in JSON format for processed
|
||||
that don't have access to OpenColorIO or their version of OpenColorIO is
|
||||
not compatible.
|
||||
"""
|
||||
|
||||
import click
|
||||
import json
|
||||
from pathlib import Path
|
||||
import PyOpenColorIO as ocio
|
||||
|
||||
import click
|
||||
|
||||
from ayon_core.pipeline.colorspace import (
|
||||
has_compatible_ocio_package,
|
||||
get_display_view_colorspace_name,
|
||||
get_config_file_rules_colorspace_from_filepath,
|
||||
get_config_version_data,
|
||||
get_ocio_config_views,
|
||||
get_ocio_config_colorspaces,
|
||||
)
|
||||
|
||||
|
||||
def _save_output_to_json_file(output, output_path):
|
||||
json_path = Path(output_path)
|
||||
with open(json_path, "w") as stream:
|
||||
json.dump(output, stream)
|
||||
|
||||
print(f"Data are saved to '{json_path}'")
|
||||
|
||||
|
||||
@click.group()
|
||||
|
|
@ -30,404 +33,185 @@ def main():
|
|||
pass # noqa: WPS100
|
||||
|
||||
|
||||
@main.group()
|
||||
def config():
|
||||
"""Config related commands group
|
||||
|
||||
Example of use:
|
||||
> pyton.exe ./ocio_wrapper.py config <command> *args
|
||||
"""
|
||||
pass # noqa: WPS100
|
||||
|
||||
|
||||
@main.group()
|
||||
def colorspace():
|
||||
"""Colorspace related commands group
|
||||
|
||||
Example of use:
|
||||
> pyton.exe ./ocio_wrapper.py config <command> *args
|
||||
"""
|
||||
pass # noqa: WPS100
|
||||
|
||||
|
||||
@config.command(
|
||||
name="get_colorspace",
|
||||
help=(
|
||||
"return all colorspaces from config file "
|
||||
"--path input arg is required"
|
||||
)
|
||||
)
|
||||
@click.option("--in_path", required=True,
|
||||
help="path where to read ocio config file",
|
||||
type=click.Path(exists=True))
|
||||
@click.option("--out_path", required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def get_colorspace(in_path, out_path):
|
||||
@main.command(
|
||||
name="get_ocio_config_colorspaces",
|
||||
help="return all colorspaces from config file")
|
||||
@click.option(
|
||||
"--config_path",
|
||||
required=True,
|
||||
help="OCIO config path to read ocio config file.",
|
||||
type=click.Path(exists=True))
|
||||
@click.option(
|
||||
"--output_path",
|
||||
required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def _get_ocio_config_colorspaces(config_path, output_path):
|
||||
"""Aggregate all colorspace to file.
|
||||
|
||||
Python 2 wrapped console command
|
||||
|
||||
Args:
|
||||
in_path (str): config file path string
|
||||
out_path (str): temp json file path string
|
||||
config_path (str): config file path string
|
||||
output_path (str): temp json file path string
|
||||
|
||||
Example of use:
|
||||
> pyton.exe ./ocio_wrapper.py config get_colorspace
|
||||
--in_path=<path> --out_path=<path>
|
||||
--config_path <path> --output_path <path>
|
||||
"""
|
||||
json_path = Path(out_path)
|
||||
|
||||
out_data = _get_colorspace_data(in_path)
|
||||
|
||||
with open(json_path, "w") as f_:
|
||||
json.dump(out_data, f_)
|
||||
|
||||
print(f"Colorspace data are saved to '{json_path}'")
|
||||
|
||||
|
||||
def _get_colorspace_data(config_path):
|
||||
"""Return all found colorspace data.
|
||||
|
||||
Args:
|
||||
config_path (str): path string leading to config.ocio
|
||||
|
||||
Raises:
|
||||
IOError: Input config does not exist.
|
||||
|
||||
Returns:
|
||||
dict: aggregated available colorspaces
|
||||
"""
|
||||
config_path = Path(config_path)
|
||||
|
||||
if not config_path.is_file():
|
||||
raise IOError(
|
||||
f"Input path `{config_path}` should be `config.ocio` file")
|
||||
|
||||
config = ocio.Config().CreateFromFile(str(config_path))
|
||||
|
||||
colorspace_data = {
|
||||
"roles": {},
|
||||
"colorspaces": {
|
||||
color.getName(): {
|
||||
"family": color.getFamily(),
|
||||
"categories": list(color.getCategories()),
|
||||
"aliases": list(color.getAliases()),
|
||||
"equalitygroup": color.getEqualityGroup(),
|
||||
}
|
||||
for color in config.getColorSpaces()
|
||||
},
|
||||
"displays_views": {
|
||||
f"{view} ({display})": {
|
||||
"display": display,
|
||||
"view": view
|
||||
|
||||
}
|
||||
for display in config.getDisplays()
|
||||
for view in config.getViews(display)
|
||||
},
|
||||
"looks": {}
|
||||
}
|
||||
|
||||
# add looks
|
||||
looks = config.getLooks()
|
||||
if looks:
|
||||
colorspace_data["looks"] = {
|
||||
look.getName(): {"process_space": look.getProcessSpace()}
|
||||
for look in looks
|
||||
}
|
||||
|
||||
# add roles
|
||||
roles = config.getRoles()
|
||||
if roles:
|
||||
colorspace_data["roles"] = {
|
||||
role: {"colorspace": colorspace}
|
||||
for (role, colorspace) in roles
|
||||
}
|
||||
|
||||
return colorspace_data
|
||||
|
||||
|
||||
@config.command(
|
||||
name="get_views",
|
||||
help=(
|
||||
"return all viewers from config file "
|
||||
"--path input arg is required"
|
||||
_save_output_to_json_file(
|
||||
get_ocio_config_colorspaces(config_path),
|
||||
output_path
|
||||
)
|
||||
)
|
||||
@click.option("--in_path", required=True,
|
||||
help="path where to read ocio config file",
|
||||
type=click.Path(exists=True))
|
||||
@click.option("--out_path", required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def get_views(in_path, out_path):
|
||||
|
||||
|
||||
@main.command(
|
||||
name="get_ocio_config_views",
|
||||
help="All viewers from config file")
|
||||
@click.option(
|
||||
"--config_path",
|
||||
required=True,
|
||||
help="OCIO config path to read ocio config file.",
|
||||
type=click.Path(exists=True))
|
||||
@click.option(
|
||||
"--output_path",
|
||||
required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def _get_ocio_config_views(config_path, output_path):
|
||||
"""Aggregate all viewers to file.
|
||||
|
||||
Python 2 wrapped console command
|
||||
|
||||
Args:
|
||||
in_path (str): config file path string
|
||||
out_path (str): temp json file path string
|
||||
config_path (str): config file path string
|
||||
output_path (str): temp json file path string
|
||||
|
||||
Example of use:
|
||||
> pyton.exe ./ocio_wrapper.py config get_views \
|
||||
--in_path=<path> --out_path=<path>
|
||||
--config_path <path> --output <path>
|
||||
"""
|
||||
json_path = Path(out_path)
|
||||
|
||||
out_data = _get_views_data(in_path)
|
||||
|
||||
with open(json_path, "w") as f_:
|
||||
json.dump(out_data, f_)
|
||||
|
||||
print(f"Viewer data are saved to '{json_path}'")
|
||||
|
||||
|
||||
def _get_views_data(config_path):
|
||||
"""Return all found viewer data.
|
||||
|
||||
Args:
|
||||
config_path (str): path string leading to config.ocio
|
||||
|
||||
Raises:
|
||||
IOError: Input config does not exist.
|
||||
|
||||
Returns:
|
||||
dict: aggregated available viewers
|
||||
"""
|
||||
config_path = Path(config_path)
|
||||
|
||||
if not config_path.is_file():
|
||||
raise IOError("Input path should be `config.ocio` file")
|
||||
|
||||
config = ocio.Config().CreateFromFile(str(config_path))
|
||||
|
||||
data_ = {}
|
||||
for display in config.getDisplays():
|
||||
for view in config.getViews(display):
|
||||
colorspace = config.getDisplayViewColorSpaceName(display, view)
|
||||
# Special token. See https://opencolorio.readthedocs.io/en/latest/guides/authoring/authoring.html#shared-views # noqa
|
||||
if colorspace == "<USE_DISPLAY_NAME>":
|
||||
colorspace = display
|
||||
|
||||
data_[f"{display}/{view}"] = {
|
||||
"display": display,
|
||||
"view": view,
|
||||
"colorspace": colorspace
|
||||
}
|
||||
|
||||
return data_
|
||||
|
||||
|
||||
@config.command(
|
||||
name="get_version",
|
||||
help=(
|
||||
"return major and minor version from config file "
|
||||
"--config_path input arg is required"
|
||||
"--out_path input arg is required"
|
||||
_save_output_to_json_file(
|
||||
get_ocio_config_views(config_path),
|
||||
output_path
|
||||
)
|
||||
)
|
||||
@click.option("--config_path", required=True,
|
||||
help="path where to read ocio config file",
|
||||
type=click.Path(exists=True))
|
||||
@click.option("--out_path", required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def get_version(config_path, out_path):
|
||||
"""Get version of config.
|
||||
|
||||
Python 2 wrapped console command
|
||||
|
||||
@main.command(
|
||||
name="get_config_version_data",
|
||||
help="Get major and minor version from config file")
|
||||
@click.option(
|
||||
"--config_path",
|
||||
required=True,
|
||||
help="OCIO config path to read ocio config file.",
|
||||
type=click.Path(exists=True))
|
||||
@click.option(
|
||||
"--output_path",
|
||||
required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def _get_config_version_data(config_path, output_path):
|
||||
"""Get version of config.
|
||||
|
||||
Args:
|
||||
config_path (str): ocio config file path string
|
||||
out_path (str): temp json file path string
|
||||
output_path (str): temp json file path string
|
||||
|
||||
Example of use:
|
||||
> pyton.exe ./ocio_wrapper.py config get_version \
|
||||
--config_path=<path> --out_path=<path>
|
||||
--config_path <path> --output_path <path>
|
||||
"""
|
||||
json_path = Path(out_path)
|
||||
|
||||
out_data = _get_version_data(config_path)
|
||||
|
||||
with open(json_path, "w") as f_:
|
||||
json.dump(out_data, f_)
|
||||
|
||||
print(f"Config version data are saved to '{json_path}'")
|
||||
|
||||
|
||||
def _get_version_data(config_path):
|
||||
"""Return major and minor version info.
|
||||
|
||||
Args:
|
||||
config_path (str): path string leading to config.ocio
|
||||
|
||||
Raises:
|
||||
IOError: Input config does not exist.
|
||||
|
||||
Returns:
|
||||
dict: minor and major keys with values
|
||||
"""
|
||||
config_path = Path(config_path)
|
||||
|
||||
if not config_path.is_file():
|
||||
raise IOError("Input path should be `config.ocio` file")
|
||||
|
||||
config = ocio.Config().CreateFromFile(str(config_path))
|
||||
|
||||
return {
|
||||
"major": config.getMajorVersion(),
|
||||
"minor": config.getMinorVersion()
|
||||
}
|
||||
|
||||
|
||||
@colorspace.command(
|
||||
name="get_config_file_rules_colorspace_from_filepath",
|
||||
help=(
|
||||
"return colorspace from filepath "
|
||||
"--config_path - ocio config file path (input arg is required) "
|
||||
"--filepath - any file path (input arg is required) "
|
||||
"--out_path - temp json file path (input arg is required)"
|
||||
_save_output_to_json_file(
|
||||
get_config_version_data(config_path),
|
||||
output_path
|
||||
)
|
||||
)
|
||||
@click.option("--config_path", required=True,
|
||||
help="path where to read ocio config file",
|
||||
type=click.Path(exists=True))
|
||||
@click.option("--filepath", required=True,
|
||||
help="path to file to get colorspace from",
|
||||
type=click.Path())
|
||||
@click.option("--out_path", required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def get_config_file_rules_colorspace_from_filepath(
|
||||
config_path, filepath, out_path
|
||||
|
||||
|
||||
@main.command(
|
||||
name="get_config_file_rules_colorspace_from_filepath",
|
||||
help="Colorspace file rules from filepath")
|
||||
@click.option(
|
||||
"--config_path",
|
||||
required=True,
|
||||
help="OCIO config path to read ocio config file.",
|
||||
type=click.Path(exists=True))
|
||||
@click.option(
|
||||
"--filepath",
|
||||
required=True,
|
||||
help="Path to file to get colorspace from.",
|
||||
type=click.Path())
|
||||
@click.option(
|
||||
"--output_path",
|
||||
required=True,
|
||||
help="Path where to write output json file.",
|
||||
type=click.Path())
|
||||
def _get_config_file_rules_colorspace_from_filepath(
|
||||
config_path, filepath, output_path
|
||||
):
|
||||
"""Get colorspace from file path wrapper.
|
||||
|
||||
Python 2 wrapped console command
|
||||
|
||||
Args:
|
||||
config_path (str): config file path string
|
||||
filepath (str): path string leading to file
|
||||
out_path (str): temp json file path string
|
||||
output_path (str): temp json file path string
|
||||
|
||||
Example of use:
|
||||
> pyton.exe ./ocio_wrapper.py \
|
||||
> python.exe ./ocio_wrapper.py \
|
||||
colorspace get_config_file_rules_colorspace_from_filepath \
|
||||
--config_path=<path> --filepath=<path> --out_path=<path>
|
||||
--config_path <path> --filepath <path> --output_path <path>
|
||||
"""
|
||||
json_path = Path(out_path)
|
||||
|
||||
colorspace = _get_config_file_rules_colorspace_from_filepath(
|
||||
config_path, filepath)
|
||||
|
||||
with open(json_path, "w") as f_:
|
||||
json.dump(colorspace, f_)
|
||||
|
||||
print(f"Colorspace name is saved to '{json_path}'")
|
||||
_save_output_to_json_file(
|
||||
get_config_file_rules_colorspace_from_filepath(config_path, filepath),
|
||||
output_path
|
||||
)
|
||||
|
||||
|
||||
def _get_config_file_rules_colorspace_from_filepath(config_path, filepath):
|
||||
"""Return found colorspace data found in v2 file rules.
|
||||
|
||||
Args:
|
||||
config_path (str): path string leading to config.ocio
|
||||
filepath (str): path string leading to v2 file rules
|
||||
|
||||
Raises:
|
||||
IOError: Input config does not exist.
|
||||
|
||||
Returns:
|
||||
dict: aggregated available colorspaces
|
||||
"""
|
||||
config_path = Path(config_path)
|
||||
|
||||
if not config_path.is_file():
|
||||
raise IOError(
|
||||
f"Input path `{config_path}` should be `config.ocio` file")
|
||||
|
||||
config = ocio.Config().CreateFromFile(str(config_path))
|
||||
|
||||
# TODO: use `parseColorSpaceFromString` instead if ocio v1
|
||||
colorspace = config.getColorSpaceFromFilepath(str(filepath))
|
||||
|
||||
return colorspace
|
||||
|
||||
|
||||
def _get_display_view_colorspace_name(config_path, display, view):
|
||||
"""Returns the colorspace attribute of the (display, view) pair.
|
||||
|
||||
Args:
|
||||
config_path (str): path string leading to config.ocio
|
||||
display (str): display name e.g. "ACES"
|
||||
view (str): view name e.g. "sRGB"
|
||||
|
||||
|
||||
Raises:
|
||||
IOError: Input config does not exist.
|
||||
|
||||
Returns:
|
||||
view color space name (str) e.g. "Output - sRGB"
|
||||
"""
|
||||
|
||||
config_path = Path(config_path)
|
||||
|
||||
if not config_path.is_file():
|
||||
raise IOError("Input path should be `config.ocio` file")
|
||||
|
||||
config = ocio.Config.CreateFromFile(str(config_path))
|
||||
colorspace = config.getDisplayViewColorSpaceName(display, view)
|
||||
|
||||
return colorspace
|
||||
|
||||
|
||||
@config.command(
|
||||
@main.command(
|
||||
name="get_display_view_colorspace_name",
|
||||
help=(
|
||||
"return default view colorspace name "
|
||||
"for the given display and view "
|
||||
"--path input arg is required"
|
||||
)
|
||||
)
|
||||
@click.option("--in_path", required=True,
|
||||
help="path where to read ocio config file",
|
||||
type=click.Path(exists=True))
|
||||
@click.option("--out_path", required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
@click.option("--display", required=True,
|
||||
help="display name",
|
||||
type=click.STRING)
|
||||
@click.option("--view", required=True,
|
||||
help="view name",
|
||||
type=click.STRING)
|
||||
def get_display_view_colorspace_name(in_path, out_path,
|
||||
display, view):
|
||||
"Default view colorspace name for the given display and view"
|
||||
))
|
||||
@click.option(
|
||||
"--config_path",
|
||||
required=True,
|
||||
help="path where to read ocio config file",
|
||||
type=click.Path(exists=True))
|
||||
@click.option(
|
||||
"--display",
|
||||
required=True,
|
||||
help="Display name",
|
||||
type=click.STRING)
|
||||
@click.option(
|
||||
"--view",
|
||||
required=True,
|
||||
help="view name",
|
||||
type=click.STRING)
|
||||
@click.option(
|
||||
"--output_path",
|
||||
required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def _get_display_view_colorspace_name(
|
||||
config_path, display, view, output_path
|
||||
):
|
||||
"""Aggregate view colorspace name to file.
|
||||
|
||||
Wrapper command for processes without access to OpenColorIO
|
||||
|
||||
Args:
|
||||
in_path (str): config file path string
|
||||
out_path (str): temp json file path string
|
||||
config_path (str): config file path string
|
||||
output_path (str): temp json file path string
|
||||
display (str): display name e.g. "ACES"
|
||||
view (str): view name e.g. "sRGB"
|
||||
|
||||
Example of use:
|
||||
> pyton.exe ./ocio_wrapper.py config \
|
||||
get_display_view_colorspace_name --in_path=<path> \
|
||||
--out_path=<path> --display=<display> --view=<view>
|
||||
get_display_view_colorspace_name --config_path <path> \
|
||||
--output_path <path> --display <display> --view <view>
|
||||
"""
|
||||
_save_output_to_json_file(
|
||||
get_display_view_colorspace_name(config_path, display, view),
|
||||
output_path
|
||||
)
|
||||
|
||||
out_data = _get_display_view_colorspace_name(in_path,
|
||||
display,
|
||||
view)
|
||||
|
||||
with open(out_path, "w") as f:
|
||||
json.dump(out_data, f)
|
||||
|
||||
print(f"Display view colorspace saved to '{out_path}'")
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
if not has_compatible_ocio_package():
|
||||
raise RuntimeError("OpenColorIO is not available.")
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -104,14 +104,11 @@ class WebServerTool:
|
|||
again. In that case, use existing running webserver.
|
||||
Check here is easier than capturing exception from thread.
|
||||
"""
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
result = True
|
||||
try:
|
||||
sock.bind((host_name, port))
|
||||
result = False
|
||||
except:
|
||||
print("Port is in use")
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as con:
|
||||
result = con.connect_ex((host_name, port)) == 0
|
||||
|
||||
if result:
|
||||
print(f"Port {port} is already in use")
|
||||
return result
|
||||
|
||||
def call(self, func):
|
||||
|
|
|
|||
|
|
@ -335,9 +335,7 @@ class LoaderWindow(QtWidgets.QWidget):
|
|||
|
||||
def closeEvent(self, event):
|
||||
super(LoaderWindow, self).closeEvent(event)
|
||||
# Deselect project so current context will be selected
|
||||
# on next 'showEvent'
|
||||
self._controller.set_selected_project(None)
|
||||
|
||||
self._reset_on_show = True
|
||||
|
||||
def keyPressEvent(self, event):
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ class SelectionTypes:
|
|||
class BaseGroupWidget(QtWidgets.QWidget):
|
||||
selected = QtCore.Signal(str, str, str)
|
||||
removed_selected = QtCore.Signal()
|
||||
double_clicked = QtCore.Signal()
|
||||
|
||||
def __init__(self, group_name, parent):
|
||||
super(BaseGroupWidget, self).__init__(parent)
|
||||
|
|
@ -192,6 +193,7 @@ class ConvertorItemsGroupWidget(BaseGroupWidget):
|
|||
else:
|
||||
widget = ConvertorItemCardWidget(item, self)
|
||||
widget.selected.connect(self._on_widget_selection)
|
||||
widget.double_clicked(self.double_clicked)
|
||||
self._widgets_by_id[item.id] = widget
|
||||
self._content_layout.insertWidget(widget_idx, widget)
|
||||
widget_idx += 1
|
||||
|
|
@ -254,6 +256,7 @@ class InstanceGroupWidget(BaseGroupWidget):
|
|||
)
|
||||
widget.selected.connect(self._on_widget_selection)
|
||||
widget.active_changed.connect(self._on_active_changed)
|
||||
widget.double_clicked.connect(self.double_clicked)
|
||||
self._widgets_by_id[instance.id] = widget
|
||||
self._content_layout.insertWidget(widget_idx, widget)
|
||||
widget_idx += 1
|
||||
|
|
@ -271,6 +274,7 @@ class CardWidget(BaseClickableFrame):
|
|||
# Group identifier of card
|
||||
# - this must be set because if send when mouse is released with card id
|
||||
_group_identifier = None
|
||||
double_clicked = QtCore.Signal()
|
||||
|
||||
def __init__(self, parent):
|
||||
super(CardWidget, self).__init__(parent)
|
||||
|
|
@ -279,6 +283,11 @@ class CardWidget(BaseClickableFrame):
|
|||
self._selected = False
|
||||
self._id = None
|
||||
|
||||
def mouseDoubleClickEvent(self, event):
|
||||
super(CardWidget, self).mouseDoubleClickEvent(event)
|
||||
if self._is_valid_double_click(event):
|
||||
self.double_clicked.emit()
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
"""Id of card."""
|
||||
|
|
@ -312,6 +321,9 @@ class CardWidget(BaseClickableFrame):
|
|||
|
||||
self.selected.emit(self._id, self._group_identifier, selection_type)
|
||||
|
||||
def _is_valid_double_click(self, event):
|
||||
return True
|
||||
|
||||
|
||||
class ContextCardWidget(CardWidget):
|
||||
"""Card for global context.
|
||||
|
|
@ -527,6 +539,15 @@ class InstanceCardWidget(CardWidget):
|
|||
def _on_expend_clicked(self):
|
||||
self._set_expanded()
|
||||
|
||||
def _is_valid_double_click(self, event):
|
||||
widget = self.childAt(event.pos())
|
||||
if (
|
||||
widget is self._active_checkbox
|
||||
or widget is self._expand_btn
|
||||
):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class InstanceCardView(AbstractInstanceView):
|
||||
"""Publish access to card view.
|
||||
|
|
@ -534,6 +555,8 @@ class InstanceCardView(AbstractInstanceView):
|
|||
Wrapper of all widgets in card view.
|
||||
"""
|
||||
|
||||
double_clicked = QtCore.Signal()
|
||||
|
||||
def __init__(self, controller, parent):
|
||||
super(InstanceCardView, self).__init__(parent)
|
||||
|
||||
|
|
@ -715,6 +738,7 @@ class InstanceCardView(AbstractInstanceView):
|
|||
)
|
||||
group_widget.active_changed.connect(self._on_active_changed)
|
||||
group_widget.selected.connect(self._on_widget_selection)
|
||||
group_widget.double_clicked.connect(self.double_clicked)
|
||||
self._content_layout.insertWidget(widget_idx, group_widget)
|
||||
self._widgets_by_group[group_name] = group_widget
|
||||
|
||||
|
|
@ -755,6 +779,7 @@ class InstanceCardView(AbstractInstanceView):
|
|||
|
||||
widget = ContextCardWidget(self._content_widget)
|
||||
widget.selected.connect(self._on_widget_selection)
|
||||
widget.double_clicked.connect(self.double_clicked)
|
||||
|
||||
self._context_widget = widget
|
||||
|
||||
|
|
@ -778,6 +803,7 @@ class InstanceCardView(AbstractInstanceView):
|
|||
CONVERTOR_ITEM_GROUP, self._content_widget
|
||||
)
|
||||
group_widget.selected.connect(self._on_widget_selection)
|
||||
group_widget.double_clicked.connect(self.double_clicked)
|
||||
self._content_layout.insertWidget(1, group_widget)
|
||||
self._convertor_items_group = group_widget
|
||||
|
||||
|
|
|
|||
|
|
@ -110,6 +110,7 @@ class InstanceListItemWidget(QtWidgets.QWidget):
|
|||
This is required to be able use custom checkbox on custom place.
|
||||
"""
|
||||
active_changed = QtCore.Signal(str, bool)
|
||||
double_clicked = QtCore.Signal()
|
||||
|
||||
def __init__(self, instance, parent):
|
||||
super(InstanceListItemWidget, self).__init__(parent)
|
||||
|
|
@ -149,6 +150,12 @@ class InstanceListItemWidget(QtWidgets.QWidget):
|
|||
|
||||
self._set_valid_property(instance.has_valid_context)
|
||||
|
||||
def mouseDoubleClickEvent(self, event):
|
||||
widget = self.childAt(event.pos())
|
||||
super(InstanceListItemWidget, self).mouseDoubleClickEvent(event)
|
||||
if widget is not self._active_checkbox:
|
||||
self.double_clicked.emit()
|
||||
|
||||
def _set_valid_property(self, valid):
|
||||
if self._has_valid_context == valid:
|
||||
return
|
||||
|
|
@ -209,6 +216,8 @@ class InstanceListItemWidget(QtWidgets.QWidget):
|
|||
|
||||
class ListContextWidget(QtWidgets.QFrame):
|
||||
"""Context (or global attributes) widget."""
|
||||
double_clicked = QtCore.Signal()
|
||||
|
||||
def __init__(self, parent):
|
||||
super(ListContextWidget, self).__init__(parent)
|
||||
|
||||
|
|
@ -225,6 +234,10 @@ class ListContextWidget(QtWidgets.QFrame):
|
|||
|
||||
self.label_widget = label_widget
|
||||
|
||||
def mouseDoubleClickEvent(self, event):
|
||||
super(ListContextWidget, self).mouseDoubleClickEvent(event)
|
||||
self.double_clicked.emit()
|
||||
|
||||
|
||||
class InstanceListGroupWidget(QtWidgets.QFrame):
|
||||
"""Widget representing group of instances.
|
||||
|
|
@ -317,6 +330,7 @@ class InstanceListGroupWidget(QtWidgets.QFrame):
|
|||
class InstanceTreeView(QtWidgets.QTreeView):
|
||||
"""View showing instances and their groups."""
|
||||
toggle_requested = QtCore.Signal(int)
|
||||
double_clicked = QtCore.Signal()
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(InstanceTreeView, self).__init__(*args, **kwargs)
|
||||
|
|
@ -425,6 +439,9 @@ class InstanceListView(AbstractInstanceView):
|
|||
|
||||
This is public access to and from list view.
|
||||
"""
|
||||
|
||||
double_clicked = QtCore.Signal()
|
||||
|
||||
def __init__(self, controller, parent):
|
||||
super(InstanceListView, self).__init__(parent)
|
||||
|
||||
|
|
@ -454,6 +471,7 @@ class InstanceListView(AbstractInstanceView):
|
|||
instance_view.collapsed.connect(self._on_collapse)
|
||||
instance_view.expanded.connect(self._on_expand)
|
||||
instance_view.toggle_requested.connect(self._on_toggle_request)
|
||||
instance_view.double_clicked.connect(self.double_clicked)
|
||||
|
||||
self._group_items = {}
|
||||
self._group_widgets = {}
|
||||
|
|
@ -687,6 +705,7 @@ class InstanceListView(AbstractInstanceView):
|
|||
self._active_toggle_enabled
|
||||
)
|
||||
widget.active_changed.connect(self._on_active_changed)
|
||||
widget.double_clicked.connect(self.double_clicked)
|
||||
self._instance_view.setIndexWidget(proxy_index, widget)
|
||||
self._widgets_by_id[instance.id] = widget
|
||||
|
||||
|
|
@ -717,6 +736,7 @@ class InstanceListView(AbstractInstanceView):
|
|||
)
|
||||
proxy_index = self._proxy_model.mapFromSource(index)
|
||||
widget = ListContextWidget(self._instance_view)
|
||||
widget.double_clicked.connect(self.double_clicked)
|
||||
self._instance_view.setIndexWidget(proxy_index, widget)
|
||||
|
||||
self._context_widget = widget
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ class OverviewWidget(QtWidgets.QFrame):
|
|||
instance_context_changed = QtCore.Signal()
|
||||
create_requested = QtCore.Signal()
|
||||
convert_requested = QtCore.Signal()
|
||||
publish_tab_requested = QtCore.Signal()
|
||||
|
||||
anim_end_value = 200
|
||||
anim_duration = 200
|
||||
|
|
@ -113,9 +114,15 @@ class OverviewWidget(QtWidgets.QFrame):
|
|||
product_list_view.selection_changed.connect(
|
||||
self._on_product_change
|
||||
)
|
||||
product_list_view.double_clicked.connect(
|
||||
self.publish_tab_requested
|
||||
)
|
||||
product_view_cards.selection_changed.connect(
|
||||
self._on_product_change
|
||||
)
|
||||
product_view_cards.double_clicked.connect(
|
||||
self.publish_tab_requested
|
||||
)
|
||||
# Active instances changed
|
||||
product_list_view.active_changed.connect(
|
||||
self._on_active_changed
|
||||
|
|
|
|||
|
|
@ -258,6 +258,9 @@ class PublisherWindow(QtWidgets.QDialog):
|
|||
overview_widget.convert_requested.connect(
|
||||
self._on_convert_requested
|
||||
)
|
||||
overview_widget.publish_tab_requested.connect(
|
||||
self._go_to_publish_tab
|
||||
)
|
||||
|
||||
save_btn.clicked.connect(self._on_save_clicked)
|
||||
reset_btn.clicked.connect(self._on_reset_clicked)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON core addon version."""
|
||||
__version__ = "0.3.1-dev.1"
|
||||
__version__ = "0.3.2-dev.1"
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ aiohttp_json_rpc = "*" # TVPaint server
|
|||
aiohttp-middlewares = "^2.0.0"
|
||||
wsrpc_aiohttp = "^3.1.1" # websocket server
|
||||
Click = "^8"
|
||||
OpenTimelineIO = "0.14.1"
|
||||
OpenTimelineIO = "0.16.0"
|
||||
opencolorio = "2.2.1"
|
||||
Pillow = "9.5.0"
|
||||
pynput = "^1.7.2" # Timers manager - TODO remove
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
name = "core"
|
||||
title = "Core"
|
||||
version = "0.3.1-dev.1"
|
||||
version = "0.3.2-dev.1"
|
||||
|
||||
client_dir = "ayon_core"
|
||||
|
||||
plugin_for = ["ayon_server"]
|
||||
requires = [
|
||||
"~ayon_server-1.0.3+<2.0.0",
|
||||
]
|
||||
|
||||
ayon_server_version = ">=1.0.3,<2.0.0"
|
||||
ayon_launcher_version = ">=1.0.2"
|
||||
ayon_required_addons = {}
|
||||
ayon_compatible_addons = {}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
from typing import Any
|
||||
|
||||
from ayon_server.addons import BaseServerAddon
|
||||
|
||||
from .settings import CoreSettings, DEFAULT_VALUES
|
||||
|
|
@ -9,3 +11,53 @@ class CoreAddon(BaseServerAddon):
|
|||
async def get_default_settings(self):
|
||||
settings_model_cls = self.get_settings_model()
|
||||
return settings_model_cls(**DEFAULT_VALUES)
|
||||
|
||||
async def convert_settings_overrides(
|
||||
self,
|
||||
source_version: str,
|
||||
overrides: dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
self._convert_imagio_configs_0_3_1(overrides)
|
||||
# Use super conversion
|
||||
return await super().convert_settings_overrides(
|
||||
source_version, overrides
|
||||
)
|
||||
|
||||
def _convert_imagio_configs_0_3_1(self, overrides):
|
||||
"""Imageio config settings did change to profiles since 0.3.1. ."""
|
||||
imageio_overrides = overrides.get("imageio") or {}
|
||||
if (
|
||||
"ocio_config" not in imageio_overrides
|
||||
or "filepath" not in imageio_overrides["ocio_config"]
|
||||
):
|
||||
return
|
||||
|
||||
ocio_config = imageio_overrides.pop("ocio_config")
|
||||
|
||||
filepath = ocio_config["filepath"]
|
||||
if not filepath:
|
||||
return
|
||||
first_filepath = filepath[0]
|
||||
ocio_config_profiles = imageio_overrides.setdefault(
|
||||
"ocio_config_profiles", []
|
||||
)
|
||||
base_value = {
|
||||
"type": "builtin_path",
|
||||
"product_name": "",
|
||||
"host_names": [],
|
||||
"task_names": [],
|
||||
"task_types": [],
|
||||
"custom_path": "",
|
||||
"builtin_path": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio"
|
||||
}
|
||||
if first_filepath in (
|
||||
"{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio",
|
||||
"{BUILTIN_OCIO_ROOT}/nuke-default/config.ocio",
|
||||
):
|
||||
base_value["type"] = "builtin_path"
|
||||
base_value["builtin_path"] = first_filepath
|
||||
else:
|
||||
base_value["type"] = "custom_path"
|
||||
base_value["custom_path"] = first_filepath
|
||||
|
||||
ocio_config_profiles.append(base_value)
|
||||
|
|
|
|||
|
|
@ -54,9 +54,67 @@ class CoreImageIOFileRulesModel(BaseSettingsModel):
|
|||
return value
|
||||
|
||||
|
||||
class CoreImageIOConfigModel(BaseSettingsModel):
|
||||
filepath: list[str] = SettingsField(
|
||||
default_factory=list, title="Config path"
|
||||
def _ocio_config_profile_types():
|
||||
return [
|
||||
{"value": "builtin_path", "label": "AYON built-in OCIO config"},
|
||||
{"value": "custom_path", "label": "Path to OCIO config"},
|
||||
{"value": "product_name", "label": "Published product"},
|
||||
]
|
||||
|
||||
|
||||
def _ocio_built_in_paths():
|
||||
return [
|
||||
{
|
||||
"value": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio",
|
||||
"label": "ACES 1.2",
|
||||
"description": "Aces 1.2 OCIO config file."
|
||||
},
|
||||
{
|
||||
"value": "{BUILTIN_OCIO_ROOT}/nuke-default/config.ocio",
|
||||
"label": "Nuke default",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
class CoreImageIOConfigProfilesModel(BaseSettingsModel):
|
||||
_layout = "expanded"
|
||||
host_names: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Host names"
|
||||
)
|
||||
task_types: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Task types",
|
||||
enum_resolver=task_types_enum
|
||||
)
|
||||
task_names: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Task names"
|
||||
)
|
||||
type: str = SettingsField(
|
||||
title="Profile type",
|
||||
enum_resolver=_ocio_config_profile_types,
|
||||
conditionalEnum=True,
|
||||
default="builtin_path",
|
||||
section="---",
|
||||
)
|
||||
builtin_path: str = SettingsField(
|
||||
"ACES 1.2",
|
||||
title="Built-in OCIO config",
|
||||
enum_resolver=_ocio_built_in_paths,
|
||||
)
|
||||
custom_path: str = SettingsField(
|
||||
"",
|
||||
title="OCIO config path",
|
||||
description="Path to OCIO config. Anatomy formatting is supported.",
|
||||
)
|
||||
product_name: str = SettingsField(
|
||||
"",
|
||||
title="Product name",
|
||||
description=(
|
||||
"Published product name to get OCIO config from. "
|
||||
"Partial match is supported."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -65,9 +123,8 @@ class CoreImageIOBaseModel(BaseSettingsModel):
|
|||
False,
|
||||
title="Enable Color Management"
|
||||
)
|
||||
ocio_config: CoreImageIOConfigModel = SettingsField(
|
||||
default_factory=CoreImageIOConfigModel,
|
||||
title="OCIO config"
|
||||
ocio_config_profiles: list[CoreImageIOConfigProfilesModel] = SettingsField(
|
||||
default_factory=list, title="OCIO config profiles"
|
||||
)
|
||||
file_rules: CoreImageIOFileRulesModel = SettingsField(
|
||||
default_factory=CoreImageIOFileRulesModel,
|
||||
|
|
@ -186,12 +243,17 @@ class CoreSettings(BaseSettingsModel):
|
|||
DEFAULT_VALUES = {
|
||||
"imageio": {
|
||||
"activate_global_color_management": False,
|
||||
"ocio_config": {
|
||||
"filepath": [
|
||||
"{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio",
|
||||
"{BUILTIN_OCIO_ROOT}/nuke-default/config.ocio"
|
||||
]
|
||||
},
|
||||
"ocio_config_profiles": [
|
||||
{
|
||||
"host_names": [],
|
||||
"task_types": [],
|
||||
"task_names": [],
|
||||
"type": "builtin_path",
|
||||
"builtin_path": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio",
|
||||
"custom_path": "",
|
||||
"product_name": "",
|
||||
}
|
||||
],
|
||||
"file_rules": {
|
||||
"activate_global_file_rules": False,
|
||||
"rules": [
|
||||
|
|
@ -199,42 +261,57 @@ DEFAULT_VALUES = {
|
|||
"name": "example",
|
||||
"pattern": ".*(beauty).*",
|
||||
"colorspace": "ACES - ACEScg",
|
||||
"ext": "exr"
|
||||
"ext": "exr",
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
},
|
||||
},
|
||||
"studio_name": "",
|
||||
"studio_code": "",
|
||||
"environments": "{\n\"STUDIO_SW\": {\n \"darwin\": \"/mnt/REPO_SW\",\n \"linux\": \"/mnt/REPO_SW\",\n \"windows\": \"P:/REPO_SW\"\n }\n}",
|
||||
"environments": json.dumps(
|
||||
{
|
||||
"STUDIO_SW": {
|
||||
"darwin": "/mnt/REPO_SW",
|
||||
"linux": "/mnt/REPO_SW",
|
||||
"windows": "P:/REPO_SW"
|
||||
}
|
||||
},
|
||||
indent=4
|
||||
),
|
||||
"tools": DEFAULT_TOOLS_VALUES,
|
||||
"version_start_category": {
|
||||
"profiles": []
|
||||
},
|
||||
"publish": DEFAULT_PUBLISH_VALUES,
|
||||
"project_folder_structure": json.dumps({
|
||||
"__project_root__": {
|
||||
"prod": {},
|
||||
"resources": {
|
||||
"footage": {
|
||||
"plates": {},
|
||||
"offline": {}
|
||||
"project_folder_structure": json.dumps(
|
||||
{
|
||||
"__project_root__": {
|
||||
"prod": {},
|
||||
"resources": {
|
||||
"footage": {
|
||||
"plates": {},
|
||||
"offline": {}
|
||||
},
|
||||
"audio": {},
|
||||
"art_dept": {}
|
||||
},
|
||||
"audio": {},
|
||||
"art_dept": {}
|
||||
},
|
||||
"editorial": {},
|
||||
"assets": {
|
||||
"characters": {},
|
||||
"locations": {}
|
||||
},
|
||||
"shots": {}
|
||||
}
|
||||
}, indent=4),
|
||||
"editorial": {},
|
||||
"assets": {
|
||||
"characters": {},
|
||||
"locations": {}
|
||||
},
|
||||
"shots": {}
|
||||
}
|
||||
},
|
||||
indent=4
|
||||
),
|
||||
"project_plugins": {
|
||||
"windows": [],
|
||||
"darwin": [],
|
||||
"linux": []
|
||||
},
|
||||
"project_environments": "{}"
|
||||
"project_environments": json.dumps(
|
||||
{},
|
||||
indent=4
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
name = "houdini"
|
||||
title = "Houdini"
|
||||
version = "0.2.13"
|
||||
version = "0.2.14"
|
||||
|
|
|
|||
|
|
@ -1,4 +1,7 @@
|
|||
from ayon_server.settings import BaseSettingsModel, SettingsField
|
||||
from ayon_server.settings import (
|
||||
BaseSettingsModel,
|
||||
SettingsField
|
||||
)
|
||||
|
||||
|
||||
# Publish Plugins
|
||||
|
|
@ -20,6 +23,27 @@ class CollectChunkSizeModel(BaseSettingsModel):
|
|||
title="Frames Per Task")
|
||||
|
||||
|
||||
class AOVFilterSubmodel(BaseSettingsModel):
|
||||
"""You should use the same host name you are using for Houdini."""
|
||||
host_name: str = SettingsField("", title="Houdini Host name")
|
||||
value: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="AOV regex"
|
||||
)
|
||||
|
||||
class CollectLocalRenderInstancesModel(BaseSettingsModel):
|
||||
|
||||
use_deadline_aov_filter: bool = SettingsField(
|
||||
False,
|
||||
title="Use Deadline AOV Filter"
|
||||
)
|
||||
|
||||
aov_filter: AOVFilterSubmodel = SettingsField(
|
||||
default_factory=AOVFilterSubmodel,
|
||||
title="Reviewable products filter"
|
||||
)
|
||||
|
||||
|
||||
class ValidateWorkfilePathsModel(BaseSettingsModel):
|
||||
enabled: bool = SettingsField(title="Enabled")
|
||||
optional: bool = SettingsField(title="Optional")
|
||||
|
|
@ -49,6 +73,10 @@ class PublishPluginsModel(BaseSettingsModel):
|
|||
default_factory=CollectChunkSizeModel,
|
||||
title="Collect Chunk Size."
|
||||
)
|
||||
CollectLocalRenderInstances: CollectLocalRenderInstancesModel = SettingsField(
|
||||
default_factory=CollectLocalRenderInstancesModel,
|
||||
title="Collect Local Render Instances."
|
||||
)
|
||||
ValidateContainers: BasicValidateModel = SettingsField(
|
||||
default_factory=BasicValidateModel,
|
||||
title="Validate Latest Containers.",
|
||||
|
|
@ -82,6 +110,15 @@ DEFAULT_HOUDINI_PUBLISH_SETTINGS = {
|
|||
"optional": True,
|
||||
"chunk_size": 999999
|
||||
},
|
||||
"CollectLocalRenderInstances": {
|
||||
"use_deadline_aov_filter": False,
|
||||
"aov_filter" : {
|
||||
"host_name": "houdini",
|
||||
"value": [
|
||||
".*([Bb]eauty).*"
|
||||
]
|
||||
}
|
||||
},
|
||||
"ValidateContainers": {
|
||||
"enabled": True,
|
||||
"optional": True,
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
name = "maya"
|
||||
title = "Maya"
|
||||
version = "0.1.17"
|
||||
version = "0.1.18"
|
||||
|
|
|
|||
|
|
@ -46,7 +46,6 @@ def extract_alembic_overrides_enum():
|
|||
return [
|
||||
{"label": "Custom Attributes", "value": "attr"},
|
||||
{"label": "Custom Attributes Prefix", "value": "attrPrefix"},
|
||||
{"label": "Auto Subd", "value": "autoSubd"},
|
||||
{"label": "Data Format", "value": "dataFormat"},
|
||||
{"label": "Euler Filter", "value": "eulerFilter"},
|
||||
{"label": "Mel Per Frame Callback", "value": "melPerFrameCallback"},
|
||||
|
|
@ -347,17 +346,6 @@ class ExtractAlembicModel(BaseSettingsModel):
|
|||
families: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Families")
|
||||
autoSubd: bool = SettingsField(
|
||||
title="Auto Subd",
|
||||
description=(
|
||||
"If this flag is present and the mesh has crease edges, crease "
|
||||
"vertices or holes, the mesh (OPolyMesh) would now be written out "
|
||||
"as an OSubD and crease info will be stored in the Alembic file. "
|
||||
"Otherwise, creases info won't be preserved in Alembic file unless"
|
||||
" a custom Boolean attribute SubDivisionMesh has been added to "
|
||||
"mesh node and its value is true."
|
||||
)
|
||||
)
|
||||
eulerFilter: bool = SettingsField(
|
||||
title="Euler Filter",
|
||||
description="Apply Euler filter while sampling rotations."
|
||||
|
|
@ -409,6 +397,10 @@ class ExtractAlembicModel(BaseSettingsModel):
|
|||
title="Write Color Sets",
|
||||
description="Write vertex colors with the geometry."
|
||||
)
|
||||
writeCreases: bool = SettingsField(
|
||||
title="Write Creases",
|
||||
description="Write the geometry's edge and vertex crease information."
|
||||
)
|
||||
writeFaceSets: bool = SettingsField(
|
||||
title="Write Face Sets",
|
||||
description="Write face sets with the geometry."
|
||||
|
|
@ -1617,7 +1609,6 @@ DEFAULT_PUBLISH_SETTINGS = {
|
|||
],
|
||||
"attr": "",
|
||||
"attrPrefix": "",
|
||||
"autoSubd": False,
|
||||
"bake_attributes": [],
|
||||
"bake_attribute_prefixes": [],
|
||||
"dataFormat": "ogawa",
|
||||
|
|
@ -1641,7 +1632,7 @@ DEFAULT_PUBLISH_SETTINGS = {
|
|||
"renderableOnly": False,
|
||||
"stripNamespaces": True,
|
||||
"uvsOnly": False,
|
||||
"uvWrite": False,
|
||||
"uvWrite": True,
|
||||
"userAttr": "",
|
||||
"userAttrPrefix": "",
|
||||
"verbose": False,
|
||||
|
|
@ -1649,6 +1640,7 @@ DEFAULT_PUBLISH_SETTINGS = {
|
|||
"wholeFrameGeo": False,
|
||||
"worldSpace": True,
|
||||
"writeColorSets": False,
|
||||
"writeCreases": False,
|
||||
"writeFaceSets": False,
|
||||
"writeNormals": True,
|
||||
"writeUVSets": False,
|
||||
|
|
|
|||
|
|
@ -125,6 +125,7 @@ class ReformatNodesConfigModel(BaseSettingsModel):
|
|||
|
||||
class IntermediateOutputModel(BaseSettingsModel):
|
||||
name: str = SettingsField(title="Output name")
|
||||
publish: bool = SettingsField(title="Publish")
|
||||
filter: BakingStreamFilterModel = SettingsField(
|
||||
title="Filter", default_factory=BakingStreamFilterModel)
|
||||
read_raw: bool = SettingsField(
|
||||
|
|
@ -346,6 +347,7 @@ DEFAULT_PUBLISH_PLUGIN_SETTINGS = {
|
|||
"outputs": [
|
||||
{
|
||||
"name": "baking",
|
||||
"publish": False,
|
||||
"filter": {
|
||||
"task_types": [],
|
||||
"product_types": [],
|
||||
|
|
@ -401,6 +403,7 @@ DEFAULT_PUBLISH_PLUGIN_SETTINGS = {
|
|||
"outputs": [
|
||||
{
|
||||
"name": "baking",
|
||||
"publish": False,
|
||||
"filter": {
|
||||
"task_types": [],
|
||||
"product_types": [],
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
name = "traypublisher"
|
||||
title = "TrayPublisher"
|
||||
version = "0.1.4"
|
||||
version = "0.1.5"
|
||||
|
|
|
|||
|
|
@ -1,4 +1,7 @@
|
|||
from ayon_server.settings import BaseSettingsModel, SettingsField
|
||||
from ayon_server.settings import (
|
||||
BaseSettingsModel,
|
||||
SettingsField,
|
||||
)
|
||||
|
||||
|
||||
class ValidatePluginModel(BaseSettingsModel):
|
||||
|
|
@ -14,6 +17,45 @@ class ValidateFrameRangeModel(ValidatePluginModel):
|
|||
'my_asset_to_publish.mov')"""
|
||||
|
||||
|
||||
class ExtractEditorialPckgFFmpegModel(BaseSettingsModel):
|
||||
video_filters: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Video filters"
|
||||
)
|
||||
audio_filters: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Audio filters"
|
||||
)
|
||||
input: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Input arguments"
|
||||
)
|
||||
output: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Output arguments"
|
||||
)
|
||||
|
||||
|
||||
class ExtractEditorialPckgOutputDefModel(BaseSettingsModel):
|
||||
_layout = "expanded"
|
||||
ext: str = SettingsField("", title="Output extension")
|
||||
|
||||
ffmpeg_args: ExtractEditorialPckgFFmpegModel = SettingsField(
|
||||
default_factory=ExtractEditorialPckgFFmpegModel,
|
||||
title="FFmpeg arguments"
|
||||
)
|
||||
|
||||
|
||||
class ExtractEditorialPckgConversionModel(BaseSettingsModel):
|
||||
"""Set output definition if resource files should be converted."""
|
||||
conversion_enabled: bool = SettingsField(True,
|
||||
title="Conversion enabled")
|
||||
output: ExtractEditorialPckgOutputDefModel = SettingsField(
|
||||
default_factory=ExtractEditorialPckgOutputDefModel,
|
||||
title="Output Definitions",
|
||||
)
|
||||
|
||||
|
||||
class TrayPublisherPublishPlugins(BaseSettingsModel):
|
||||
CollectFrameDataFromAssetEntity: ValidatePluginModel = SettingsField(
|
||||
default_factory=ValidatePluginModel,
|
||||
|
|
@ -28,6 +70,13 @@ class TrayPublisherPublishPlugins(BaseSettingsModel):
|
|||
default_factory=ValidatePluginModel,
|
||||
)
|
||||
|
||||
ExtractEditorialPckgConversion: ExtractEditorialPckgConversionModel = (
|
||||
SettingsField(
|
||||
default_factory=ExtractEditorialPckgConversionModel,
|
||||
title="Extract Editorial Package Conversion"
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_PUBLISH_PLUGINS = {
|
||||
"CollectFrameDataFromAssetEntity": {
|
||||
|
|
@ -44,5 +93,24 @@ DEFAULT_PUBLISH_PLUGINS = {
|
|||
"enabled": True,
|
||||
"optional": True,
|
||||
"active": True
|
||||
},
|
||||
"ExtractEditorialPckgConversion": {
|
||||
"optional": False,
|
||||
"conversion_enabled": True,
|
||||
"output": {
|
||||
"ext": "",
|
||||
"ffmpeg_args": {
|
||||
"video_filters": [],
|
||||
"audio_filters": [],
|
||||
"input": [
|
||||
"-apply_trc gamma22"
|
||||
],
|
||||
"output": [
|
||||
"-pix_fmt yuv420p",
|
||||
"-crf 18",
|
||||
"-intra"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue