mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' into enhancement/1297-product-base-types-creation-and-creator-plugins
This commit is contained in:
commit
aec589d9dd
24 changed files with 840 additions and 104 deletions
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
|
|
@ -35,6 +35,8 @@ body:
|
|||
label: Version
|
||||
description: What version are you running? Look to AYON Tray
|
||||
options:
|
||||
- 1.6.13
|
||||
- 1.6.12
|
||||
- 1.6.11
|
||||
- 1.6.10
|
||||
- 1.6.9
|
||||
|
|
|
|||
|
|
@ -185,9 +185,14 @@ class IPluginPaths(AYONInterface):
|
|||
"""
|
||||
return self._get_plugin_paths_by_type("inventory")
|
||||
|
||||
def get_loader_action_plugin_paths(self) -> list[str]:
|
||||
def get_loader_action_plugin_paths(
|
||||
self, host_name: Optional[str]
|
||||
) -> list[str]:
|
||||
"""Receive loader action plugin paths.
|
||||
|
||||
Args:
|
||||
host_name (Optional[str]): Current host name.
|
||||
|
||||
Returns:
|
||||
list[str]: Paths to loader action plugins.
|
||||
|
||||
|
|
|
|||
|
|
@ -1232,6 +1232,14 @@ def oiio_color_convert(
|
|||
# Handle the different conversion cases
|
||||
# Source view and display are known
|
||||
if source_view and source_display:
|
||||
color_convert_args = None
|
||||
ocio_display_args = None
|
||||
oiio_cmd.extend([
|
||||
"--ociodisplay:inverse=1:subimages=0",
|
||||
source_display,
|
||||
source_view,
|
||||
])
|
||||
|
||||
if target_colorspace:
|
||||
# This is a two-step conversion process since there's no direct
|
||||
# display/view to colorspace command
|
||||
|
|
@ -1241,22 +1249,25 @@ def oiio_color_convert(
|
|||
elif source_display != target_display or source_view != target_view:
|
||||
# Complete display/view pair conversion
|
||||
# - go through a reference space
|
||||
color_convert_args = (target_display, target_view)
|
||||
ocio_display_args = (target_display, target_view)
|
||||
else:
|
||||
color_convert_args = None
|
||||
logger.debug(
|
||||
"Source and target display/view pairs are identical."
|
||||
" No color conversion needed."
|
||||
)
|
||||
|
||||
if color_convert_args:
|
||||
# Use colorconvert for colorspace target
|
||||
oiio_cmd.extend([
|
||||
"--ociodisplay:inverse=1:subimages=0",
|
||||
source_display,
|
||||
source_view,
|
||||
"--colorconvert:subimages=0",
|
||||
*color_convert_args
|
||||
])
|
||||
elif ocio_display_args:
|
||||
# Use ociodisplay for display/view target
|
||||
oiio_cmd.extend([
|
||||
"--ociodisplay:subimages=0",
|
||||
*ocio_display_args
|
||||
])
|
||||
|
||||
elif target_colorspace:
|
||||
# Standard color space to color space conversion
|
||||
|
|
@ -1281,24 +1292,6 @@ def oiio_color_convert(
|
|||
run_subprocess(oiio_cmd, logger=logger)
|
||||
|
||||
|
||||
def split_cmd_args(in_args):
|
||||
"""Makes sure all entered arguments are separated in individual items.
|
||||
|
||||
Split each argument string with " -" to identify if string contains
|
||||
one or more arguments.
|
||||
Args:
|
||||
in_args (list): of arguments ['-n', '-d uint10']
|
||||
Returns
|
||||
(list): ['-n', '-d', 'unint10']
|
||||
"""
|
||||
splitted_args = []
|
||||
for arg in in_args:
|
||||
if not arg.strip():
|
||||
continue
|
||||
splitted_args.extend(arg.split(" "))
|
||||
return splitted_args
|
||||
|
||||
|
||||
def get_rescaled_command_arguments(
|
||||
application,
|
||||
input_path,
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ from dataclasses import dataclass
|
|||
import ayon_api
|
||||
|
||||
from ayon_core import AYON_CORE_ROOT
|
||||
from ayon_core.lib import StrEnum, Logger
|
||||
from ayon_core.lib import StrEnum, Logger, is_func_signature_supported
|
||||
from ayon_core.host import AbstractHost
|
||||
from ayon_core.addon import AddonsManager, IPluginPaths
|
||||
from ayon_core.settings import get_studio_settings, get_project_settings
|
||||
|
|
@ -752,6 +752,7 @@ class LoaderActionsContext:
|
|||
|
||||
def _get_plugins(self) -> dict[str, LoaderActionPlugin]:
|
||||
if self._plugins is None:
|
||||
host_name = self.get_host_name()
|
||||
addons_manager = self.get_addons_manager()
|
||||
all_paths = [
|
||||
os.path.join(AYON_CORE_ROOT, "plugins", "loader")
|
||||
|
|
@ -759,7 +760,24 @@ class LoaderActionsContext:
|
|||
for addon in addons_manager.addons:
|
||||
if not isinstance(addon, IPluginPaths):
|
||||
continue
|
||||
paths = addon.get_loader_action_plugin_paths()
|
||||
|
||||
try:
|
||||
if is_func_signature_supported(
|
||||
addon.get_loader_action_plugin_paths,
|
||||
host_name
|
||||
):
|
||||
paths = addon.get_loader_action_plugin_paths(
|
||||
host_name
|
||||
)
|
||||
else:
|
||||
paths = addon.get_loader_action_plugin_paths()
|
||||
except Exception:
|
||||
self._log.warning(
|
||||
"Failed to get plugin paths for addon",
|
||||
exc_info=True
|
||||
)
|
||||
continue
|
||||
|
||||
if paths:
|
||||
all_paths.extend(paths)
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import platform
|
|||
import tempfile
|
||||
import warnings
|
||||
from copy import deepcopy
|
||||
from dataclasses import dataclass
|
||||
|
||||
import ayon_api
|
||||
|
||||
|
|
@ -26,6 +27,18 @@ from ayon_core.pipeline.load import get_representation_path_with_anatomy
|
|||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConfigData:
|
||||
"""OCIO Config to use in a certain context.
|
||||
|
||||
When enabled and no path/template are set, it will be considered invalid
|
||||
and will error on OCIO path not found. Enabled must be False to explicitly
|
||||
allow OCIO to be disabled."""
|
||||
path: str = ""
|
||||
template: str = ""
|
||||
enabled: bool = True
|
||||
|
||||
|
||||
class CachedData:
|
||||
remapping = {}
|
||||
has_compatible_ocio_package = None
|
||||
|
|
@ -710,7 +723,7 @@ def _get_config_path_from_profile_data(
|
|||
template_data (dict[str, Any]): Template data.
|
||||
|
||||
Returns:
|
||||
dict[str, str]: Config data with path and template.
|
||||
ConfigData: Config data with path and template.
|
||||
"""
|
||||
template = profile[profile_type]
|
||||
result = StringTemplate.format_strict_template(
|
||||
|
|
@ -719,12 +732,12 @@ def _get_config_path_from_profile_data(
|
|||
normalized_path = str(result.normalized())
|
||||
if not os.path.exists(normalized_path):
|
||||
log.warning(f"Path was not found '{normalized_path}'.")
|
||||
return None
|
||||
return ConfigData() # Return invalid config data
|
||||
|
||||
return {
|
||||
"path": normalized_path,
|
||||
"template": template
|
||||
}
|
||||
return ConfigData(
|
||||
path=normalized_path,
|
||||
template=template
|
||||
)
|
||||
|
||||
|
||||
def _get_global_config_data(
|
||||
|
|
@ -735,7 +748,7 @@ def _get_global_config_data(
|
|||
imageio_global,
|
||||
folder_id,
|
||||
log,
|
||||
):
|
||||
) -> ConfigData:
|
||||
"""Get global config data.
|
||||
|
||||
Global config from core settings is using profiles that are based on
|
||||
|
|
@ -759,8 +772,7 @@ def _get_global_config_data(
|
|||
log (logging.Logger): Logger object.
|
||||
|
||||
Returns:
|
||||
Union[dict[str, str], None]: Config data with path and template
|
||||
or None.
|
||||
ConfigData: Config data with path and template.
|
||||
|
||||
"""
|
||||
task_name = task_type = None
|
||||
|
|
@ -779,12 +791,14 @@ def _get_global_config_data(
|
|||
)
|
||||
if profile is None:
|
||||
log.info(f"No config profile matched filters {str(filter_values)}")
|
||||
return None
|
||||
return ConfigData(enabled=False)
|
||||
|
||||
profile_type = profile["type"]
|
||||
if profile_type in ("builtin_path", "custom_path"):
|
||||
if profile_type in {"builtin_path", "custom_path"}:
|
||||
return _get_config_path_from_profile_data(
|
||||
profile, profile_type, template_data)
|
||||
elif profile_type == "disabled":
|
||||
return ConfigData(enabled=False)
|
||||
|
||||
# TODO decide if this is the right name for representation
|
||||
repre_name = "ocioconfig"
|
||||
|
|
@ -798,7 +812,7 @@ def _get_global_config_data(
|
|||
"Colorspace OCIO config path cannot be set. "
|
||||
"Profile is set to published product but `Product name` is empty."
|
||||
)
|
||||
return None
|
||||
return ConfigData()
|
||||
|
||||
folder_info = template_data.get("folder")
|
||||
if not folder_info:
|
||||
|
|
@ -819,7 +833,7 @@ def _get_global_config_data(
|
|||
)
|
||||
if not folder_entity:
|
||||
log.warning(f"Folder entity '{folder_path}' was not found..")
|
||||
return None
|
||||
return ConfigData()
|
||||
folder_id = folder_entity["id"]
|
||||
|
||||
product_entities_by_name = {
|
||||
|
|
@ -855,7 +869,7 @@ def _get_global_config_data(
|
|||
log.info(
|
||||
f"Product '{product_name}' does not have available any versions."
|
||||
)
|
||||
return None
|
||||
return ConfigData()
|
||||
|
||||
# Find 'ocioconfig' representation entity
|
||||
repre_entity = ayon_api.get_representation_by_name(
|
||||
|
|
@ -868,15 +882,15 @@ def _get_global_config_data(
|
|||
f"Representation '{repre_name}'"
|
||||
f" not found on product '{product_name}'."
|
||||
)
|
||||
return None
|
||||
return ConfigData()
|
||||
|
||||
path = get_representation_path_with_anatomy(repre_entity, anatomy)
|
||||
template = repre_entity["attrib"]["template"]
|
||||
|
||||
return {
|
||||
"path": path,
|
||||
"template": template,
|
||||
}
|
||||
return ConfigData(
|
||||
path=path,
|
||||
template=template
|
||||
)
|
||||
|
||||
|
||||
def get_imageio_config_preset(
|
||||
|
|
@ -1015,13 +1029,19 @@ def get_imageio_config_preset(
|
|||
host_ocio_config["filepath"], template_data
|
||||
)
|
||||
|
||||
if not config_data:
|
||||
if not config_data.enabled:
|
||||
return {} # OCIO management disabled
|
||||
|
||||
if not config_data.path:
|
||||
raise FileExistsError(
|
||||
"No OCIO config found in settings. It is"
|
||||
" either missing or there is typo in path inputs"
|
||||
)
|
||||
|
||||
return config_data
|
||||
return {
|
||||
"path": config_data.path,
|
||||
"template": config_data.template,
|
||||
}
|
||||
|
||||
|
||||
def _get_host_config_data(templates, template_data):
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
"""Package to handle compatibility checks for pipeline components."""
|
||||
import ayon_api
|
||||
|
||||
|
||||
def is_product_base_type_supported() -> bool:
|
||||
|
|
@ -13,4 +14,7 @@ def is_product_base_type_supported() -> bool:
|
|||
bool: True if product base types are supported, False otherwise.
|
||||
|
||||
"""
|
||||
return False
|
||||
|
||||
if not hasattr(ayon_api, "is_product_base_type_supported"):
|
||||
return False
|
||||
return ayon_api.is_product_base_type_supported()
|
||||
|
|
|
|||
|
|
@ -54,8 +54,8 @@ def get_product_name_template(
|
|||
profiles = tools_settings["creator"]["product_name_profiles"]
|
||||
filtering_criteria = {
|
||||
"product_types": product_type,
|
||||
"hosts": host_name,
|
||||
"tasks": task_name,
|
||||
"host_names": host_name,: host_name,
|
||||
"task_names": task_name,
|
||||
"task_types": task_type,
|
||||
"product_base_types": product_base_type,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -253,6 +253,19 @@ def create_skeleton_instance(
|
|||
"reuseLastVersion": data.get("reuseLastVersion", False),
|
||||
}
|
||||
|
||||
# Pass on the OCIO metadata of what the source display and view are
|
||||
# so that the farm can correctly set up color management.
|
||||
if "sceneDisplay" in data and "sceneView" in data:
|
||||
instance_skeleton_data["sceneDisplay"] = data["sceneDisplay"]
|
||||
instance_skeleton_data["sceneView"] = data["sceneView"]
|
||||
elif "colorspaceDisplay" in data and "colorspaceView" in data:
|
||||
# Backwards compatibility for sceneDisplay and sceneView
|
||||
instance_skeleton_data["colorspaceDisplay"] = data["colorspaceDisplay"]
|
||||
instance_skeleton_data["colorspaceView"] = data["colorspaceView"]
|
||||
if "sourceDisplay" in data and "sourceView" in data:
|
||||
instance_skeleton_data["sourceDisplay"] = data["sourceDisplay"]
|
||||
instance_skeleton_data["sourceView"] = data["sourceView"]
|
||||
|
||||
if data.get("renderlayer"):
|
||||
instance_skeleton_data["renderlayer"] = data["renderlayer"]
|
||||
|
||||
|
|
@ -589,7 +602,6 @@ def create_instances_for_aov(
|
|||
"""
|
||||
# we cannot attach AOVs to other products as we consider every
|
||||
# AOV product of its own.
|
||||
|
||||
log = Logger.get_logger("farm_publishing")
|
||||
|
||||
# if there are product to attach to and more than one AOV,
|
||||
|
|
@ -612,8 +624,8 @@ def create_instances_for_aov(
|
|||
additional_data.update({
|
||||
"colorspaceConfig": colorspace_config,
|
||||
# Display/View are optional
|
||||
"display": instance.data.get("colorspaceDisplay"),
|
||||
"view": instance.data.get("colorspaceView")
|
||||
"display": instance.data.get("sourceDisplay"),
|
||||
"view": instance.data.get("sourceView")
|
||||
})
|
||||
|
||||
# Get templated path from absolute config path.
|
||||
|
|
|
|||
|
|
@ -122,7 +122,8 @@ def get_publish_template_name(
|
|||
task_type,
|
||||
project_settings=None,
|
||||
hero=False,
|
||||
logger=None
|
||||
product_base_type: Optional[str] = None,
|
||||
logger=None,
|
||||
):
|
||||
"""Get template name which should be used for passed context.
|
||||
|
||||
|
|
@ -140,17 +141,29 @@ def get_publish_template_name(
|
|||
task_type (str): Task type on which is instance working.
|
||||
project_settings (Dict[str, Any]): Prepared project settings.
|
||||
hero (bool): Template is for hero version publishing.
|
||||
product_base_type (Optional[str]): Product type for which should
|
||||
be found template.
|
||||
logger (logging.Logger): Custom logger used for 'filter_profiles'
|
||||
function.
|
||||
|
||||
Returns:
|
||||
str: Template name which should be used for integration.
|
||||
"""
|
||||
if not product_base_type:
|
||||
msg = (
|
||||
"Argument 'product_base_type' is not provided to"
|
||||
" 'get_publish_template_name' function. This argument"
|
||||
" will be required in future versions."
|
||||
)
|
||||
warnings.warn(msg, DeprecationWarning)
|
||||
if logger:
|
||||
logger.warning(msg)
|
||||
|
||||
template = None
|
||||
filter_criteria = {
|
||||
"hosts": host_name,
|
||||
"product_types": product_type,
|
||||
"product_base_types": product_base_type,
|
||||
"task_names": task_name,
|
||||
"task_types": task_type,
|
||||
}
|
||||
|
|
@ -812,7 +825,22 @@ def replace_with_published_scene_path(instance, replace_in_path=True):
|
|||
template_data["comment"] = None
|
||||
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
template = anatomy.get_template_item("publish", "default", "path")
|
||||
project_name = anatomy.project_name
|
||||
task_name = task_type = None
|
||||
task_entity = instance.data.get("taskEntity")
|
||||
if task_entity:
|
||||
task_name = task_entity["name"]
|
||||
task_type = task_entity["taskType"]
|
||||
project_settings = instance.context.data["project_settings"]
|
||||
template_name = get_publish_template_name(
|
||||
project_name=project_name,
|
||||
host_name=instance.context.data["hostName"],
|
||||
product_type=workfile_instance.data["productType"],
|
||||
task_name=task_name,
|
||||
task_type=task_type,
|
||||
project_settings=project_settings,
|
||||
)
|
||||
template = anatomy.get_template_item("publish", template_name, "path")
|
||||
template_filled = template.format_strict(template_data)
|
||||
file_path = os.path.normpath(template_filled)
|
||||
|
||||
|
|
|
|||
|
|
@ -684,3 +684,20 @@ def get_sdf_format_args(path):
|
|||
"""Return SDF_FORMAT_ARGS parsed to `dict`"""
|
||||
_raw_path, data = Sdf.Layer.SplitIdentifier(path)
|
||||
return data
|
||||
|
||||
|
||||
def get_standard_default_prim_name(folder_path: str) -> str:
|
||||
"""Return the AYON-specified default prim name for a folder path.
|
||||
|
||||
This is used e.g. for the default prim in AYON USD Contribution workflows.
|
||||
"""
|
||||
folder_name: str = folder_path.rsplit("/", 1)[-1]
|
||||
|
||||
# Prim names are not allowed to start with a digit in USD. Authoring them
|
||||
# would mean generating essentially garbage data and may result in
|
||||
# unexpected behavior in certain USD or DCC versions, like failure to
|
||||
# refresh in usdview or crashes in Houdini 21.
|
||||
if folder_name and folder_name[0].isdigit():
|
||||
folder_name = f"_{folder_name}"
|
||||
|
||||
return folder_name
|
||||
|
|
|
|||
|
|
@ -87,15 +87,19 @@ class ExtractOIIOTranscode(publish.Extractor):
|
|||
profile_output_defs = profile["outputs"]
|
||||
new_representations = []
|
||||
repres = instance.data["representations"]
|
||||
for idx, repre in enumerate(list(repres)):
|
||||
# target space, display and view might be defined upstream
|
||||
# TODO: address https://github.com/ynput/ayon-core/pull/1268#discussion_r2156555474
|
||||
# Implement upstream logic to handle target_colorspace,
|
||||
# target_display, target_view in other DCCs
|
||||
target_colorspace = False
|
||||
target_display = instance.data.get("colorspaceDisplay")
|
||||
target_view = instance.data.get("colorspaceView")
|
||||
|
||||
scene_display = instance.data.get(
|
||||
"sceneDisplay",
|
||||
# Backward compatibility
|
||||
instance.data.get("colorspaceDisplay")
|
||||
)
|
||||
scene_view = instance.data.get(
|
||||
"sceneView",
|
||||
# Backward compatibility
|
||||
instance.data.get("colorspaceView")
|
||||
)
|
||||
|
||||
for idx, repre in enumerate(list(repres)):
|
||||
self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"]))
|
||||
if not self._repre_is_valid(repre):
|
||||
continue
|
||||
|
|
@ -142,24 +146,18 @@ class ExtractOIIOTranscode(publish.Extractor):
|
|||
|
||||
transcoding_type = output_def["transcoding_type"]
|
||||
|
||||
# NOTE: we use colorspace_data as the fallback values for
|
||||
# the target colorspace.
|
||||
# Set target colorspace/display/view based on transcoding type
|
||||
target_colorspace = None
|
||||
target_view = None
|
||||
target_display = None
|
||||
if transcoding_type == "colorspace":
|
||||
# TODO: Should we fallback to the colorspace
|
||||
# (which used as source above) ?
|
||||
# or should we compute the target colorspace from
|
||||
# current view and display ?
|
||||
target_colorspace = (output_def["colorspace"] or
|
||||
colorspace_data.get("colorspace"))
|
||||
target_colorspace = output_def["colorspace"]
|
||||
elif transcoding_type == "display_view":
|
||||
display_view = output_def["display_view"]
|
||||
target_view = (
|
||||
display_view["view"]
|
||||
or colorspace_data.get("view"))
|
||||
target_display = (
|
||||
display_view["display"]
|
||||
or colorspace_data.get("display")
|
||||
)
|
||||
# If empty values are provided in output definition,
|
||||
# fallback to scene display/view that is collected from DCC
|
||||
target_view = display_view["view"] or scene_view
|
||||
target_display = display_view["display"] or scene_display
|
||||
|
||||
# both could be already collected by DCC,
|
||||
# but could be overwritten when transcoding
|
||||
|
|
|
|||
353
client/ayon_core/plugins/publish/extract_oiio_postprocess.py
Normal file
353
client/ayon_core/plugins/publish/extract_oiio_postprocess.py
Normal file
|
|
@ -0,0 +1,353 @@
|
|||
from __future__ import annotations
|
||||
from typing import Any, Optional
|
||||
import os
|
||||
import copy
|
||||
import clique
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import (
|
||||
publish,
|
||||
get_temp_dir
|
||||
)
|
||||
from ayon_core.lib import (
|
||||
is_oiio_supported,
|
||||
get_oiio_tool_args,
|
||||
run_subprocess
|
||||
)
|
||||
from ayon_core.lib.transcoding import IMAGE_EXTENSIONS
|
||||
from ayon_core.lib.profiles_filtering import filter_profiles
|
||||
|
||||
|
||||
class ExtractOIIOPostProcess(publish.Extractor):
|
||||
"""Process representations through `oiiotool` with profile defined
|
||||
settings so that e.g. color space conversions can be applied or images
|
||||
could be converted to scanline, resized, etc. regardless of colorspace
|
||||
data.
|
||||
"""
|
||||
|
||||
label = "OIIO Post Process"
|
||||
order = pyblish.api.ExtractorOrder + 0.020
|
||||
|
||||
settings_category = "core"
|
||||
|
||||
optional = True
|
||||
|
||||
# Supported extensions
|
||||
supported_exts = {ext.lstrip(".") for ext in IMAGE_EXTENSIONS}
|
||||
|
||||
# Configurable by Settings
|
||||
profiles = None
|
||||
options = None
|
||||
|
||||
def process(self, instance):
|
||||
if instance.data.get("farm"):
|
||||
self.log.debug("Should be processed on farm, skipping.")
|
||||
return
|
||||
|
||||
if not self.profiles:
|
||||
self.log.debug("No profiles present for OIIO Post Process")
|
||||
return
|
||||
|
||||
if not instance.data.get("representations"):
|
||||
self.log.debug("No representations, skipping.")
|
||||
return
|
||||
|
||||
if not is_oiio_supported():
|
||||
self.log.warning("OIIO not supported, no transcoding possible.")
|
||||
return
|
||||
|
||||
new_representations = []
|
||||
for idx, repre in enumerate(list(instance.data["representations"])):
|
||||
self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"]))
|
||||
if not self._repre_is_valid(repre):
|
||||
continue
|
||||
|
||||
# We check profile per representation name and extension because
|
||||
# it's included in the profile check. As such, an instance may have
|
||||
# a different profile applied per representation.
|
||||
profile = self._get_profile(
|
||||
instance,
|
||||
repre
|
||||
)
|
||||
if not profile:
|
||||
continue
|
||||
|
||||
# Get representation files to convert
|
||||
if isinstance(repre["files"], list):
|
||||
repre_files_to_convert = copy.deepcopy(repre["files"])
|
||||
else:
|
||||
repre_files_to_convert = [repre["files"]]
|
||||
|
||||
added_representations = False
|
||||
added_review = False
|
||||
|
||||
# Process each output definition
|
||||
for output_def in profile["outputs"]:
|
||||
|
||||
# Local copy to avoid accidental mutable changes
|
||||
files_to_convert = list(repre_files_to_convert)
|
||||
|
||||
output_name = output_def["name"]
|
||||
new_repre = copy.deepcopy(repre)
|
||||
|
||||
original_staging_dir = new_repre["stagingDir"]
|
||||
new_staging_dir = get_temp_dir(
|
||||
project_name=instance.context.data["projectName"],
|
||||
use_local_temp=True,
|
||||
)
|
||||
new_repre["stagingDir"] = new_staging_dir
|
||||
|
||||
output_extension = output_def["extension"]
|
||||
output_extension = output_extension.replace('.', '')
|
||||
self._rename_in_representation(new_repre,
|
||||
files_to_convert,
|
||||
output_name,
|
||||
output_extension)
|
||||
|
||||
sequence_files = self._translate_to_sequence(files_to_convert)
|
||||
self.log.debug("Files to convert: {}".format(sequence_files))
|
||||
for file_name in sequence_files:
|
||||
if isinstance(file_name, clique.Collection):
|
||||
# Convert to filepath that can be directly converted
|
||||
# by oiio like `frame.1001-1025%04d.exr`
|
||||
file_name: str = file_name.format(
|
||||
"{head}{range}{padding}{tail}"
|
||||
)
|
||||
|
||||
self.log.debug("Transcoding file: `{}`".format(file_name))
|
||||
input_path = os.path.join(original_staging_dir,
|
||||
file_name)
|
||||
output_path = self._get_output_file_path(input_path,
|
||||
new_staging_dir,
|
||||
output_extension)
|
||||
|
||||
# TODO: Support formatting with dynamic keys from the
|
||||
# representation, like e.g. colorspace config, display,
|
||||
# view, etc.
|
||||
input_arguments: list[str] = output_def.get(
|
||||
"input_arguments", []
|
||||
)
|
||||
output_arguments: list[str] = output_def.get(
|
||||
"output_arguments", []
|
||||
)
|
||||
|
||||
# Prepare subprocess arguments
|
||||
oiio_cmd = get_oiio_tool_args(
|
||||
"oiiotool",
|
||||
*input_arguments,
|
||||
input_path,
|
||||
*output_arguments,
|
||||
"-o",
|
||||
output_path
|
||||
)
|
||||
|
||||
self.log.debug(
|
||||
"Conversion command: {}".format(" ".join(oiio_cmd)))
|
||||
run_subprocess(oiio_cmd, logger=self.log)
|
||||
|
||||
# cleanup temporary transcoded files
|
||||
for file_name in new_repre["files"]:
|
||||
transcoded_file_path = os.path.join(new_staging_dir,
|
||||
file_name)
|
||||
instance.context.data["cleanupFullPaths"].append(
|
||||
transcoded_file_path)
|
||||
|
||||
custom_tags = output_def.get("custom_tags")
|
||||
if custom_tags:
|
||||
if new_repre.get("custom_tags") is None:
|
||||
new_repre["custom_tags"] = []
|
||||
new_repre["custom_tags"].extend(custom_tags)
|
||||
|
||||
# Add additional tags from output definition to representation
|
||||
if new_repre.get("tags") is None:
|
||||
new_repre["tags"] = []
|
||||
for tag in output_def["tags"]:
|
||||
if tag not in new_repre["tags"]:
|
||||
new_repre["tags"].append(tag)
|
||||
|
||||
if tag == "review":
|
||||
added_review = True
|
||||
|
||||
# If there is only 1 file outputted then convert list to
|
||||
# string, because that'll indicate that it is not a sequence.
|
||||
if len(new_repre["files"]) == 1:
|
||||
new_repre["files"] = new_repre["files"][0]
|
||||
|
||||
# If the source representation has "review" tag, but it's not
|
||||
# part of the output definition tags, then both the
|
||||
# representations will be transcoded in ExtractReview and
|
||||
# their outputs will clash in integration.
|
||||
if "review" in repre.get("tags", []):
|
||||
added_review = True
|
||||
|
||||
new_representations.append(new_repre)
|
||||
added_representations = True
|
||||
|
||||
if added_representations:
|
||||
self._mark_original_repre_for_deletion(
|
||||
repre, profile, added_review
|
||||
)
|
||||
|
||||
tags = repre.get("tags") or []
|
||||
if "delete" in tags and "thumbnail" not in tags:
|
||||
instance.data["representations"].remove(repre)
|
||||
|
||||
instance.data["representations"].extend(new_representations)
|
||||
|
||||
def _rename_in_representation(self, new_repre, files_to_convert,
|
||||
output_name, output_extension):
|
||||
"""Replace old extension with new one everywhere in representation.
|
||||
|
||||
Args:
|
||||
new_repre (dict)
|
||||
files_to_convert (list): of filenames from repre["files"],
|
||||
standardized to always list
|
||||
output_name (str): key of output definition from Settings,
|
||||
if "<passthrough>" token used, keep original repre name
|
||||
output_extension (str): extension from output definition
|
||||
"""
|
||||
if output_name != "passthrough":
|
||||
new_repre["name"] = output_name
|
||||
if not output_extension:
|
||||
return
|
||||
|
||||
new_repre["ext"] = output_extension
|
||||
new_repre["outputName"] = output_name
|
||||
|
||||
renamed_files = []
|
||||
for file_name in files_to_convert:
|
||||
file_name, _ = os.path.splitext(file_name)
|
||||
file_name = '{}.{}'.format(file_name,
|
||||
output_extension)
|
||||
renamed_files.append(file_name)
|
||||
new_repre["files"] = renamed_files
|
||||
|
||||
def _translate_to_sequence(self, files_to_convert):
|
||||
"""Returns original list or a clique.Collection of a sequence.
|
||||
|
||||
Uses clique to find frame sequence Collection.
|
||||
If sequence not found, it returns original list.
|
||||
|
||||
Args:
|
||||
files_to_convert (list): list of file names
|
||||
Returns:
|
||||
list[str | clique.Collection]: List of filepaths or a list
|
||||
of Collections (usually one, unless there are holes)
|
||||
"""
|
||||
pattern = [clique.PATTERNS["frames"]]
|
||||
collections, _ = clique.assemble(
|
||||
files_to_convert, patterns=pattern,
|
||||
assume_padded_when_ambiguous=True)
|
||||
if collections:
|
||||
if len(collections) > 1:
|
||||
raise ValueError(
|
||||
"Too many collections {}".format(collections))
|
||||
|
||||
collection = collections[0]
|
||||
# TODO: Technically oiiotool supports holes in the sequence as well
|
||||
# using the dedicated --frames argument to specify the frames.
|
||||
# We may want to use that too so conversions of sequences with
|
||||
# holes will perform faster as well.
|
||||
# Separate the collection so that we have no holes/gaps per
|
||||
# collection.
|
||||
return collection.separate()
|
||||
|
||||
return files_to_convert
|
||||
|
||||
def _get_output_file_path(self, input_path, output_dir,
|
||||
output_extension):
|
||||
"""Create output file name path."""
|
||||
file_name = os.path.basename(input_path)
|
||||
file_name, input_extension = os.path.splitext(file_name)
|
||||
if not output_extension:
|
||||
output_extension = input_extension.replace(".", "")
|
||||
new_file_name = '{}.{}'.format(file_name,
|
||||
output_extension)
|
||||
return os.path.join(output_dir, new_file_name)
|
||||
|
||||
def _get_profile(
|
||||
self,
|
||||
instance: pyblish.api.Instance,
|
||||
repre: dict
|
||||
) -> Optional[dict[str, Any]]:
|
||||
"""Returns profile if it should process this instance."""
|
||||
host_name = instance.context.data["hostName"]
|
||||
product_type = instance.data["productType"]
|
||||
product_name = instance.data["productName"]
|
||||
task_data = instance.data["anatomyData"].get("task", {})
|
||||
task_name = task_data.get("name")
|
||||
task_type = task_data.get("type")
|
||||
repre_name: str = repre["name"]
|
||||
repre_ext: str = repre["ext"]
|
||||
filtering_criteria = {
|
||||
"host_names": host_name,
|
||||
"product_types": product_type,
|
||||
"product_names": product_name,
|
||||
"task_names": task_name,
|
||||
"task_types": task_type,
|
||||
"representation_names": repre_name,
|
||||
"representation_exts": repre_ext,
|
||||
}
|
||||
profile = filter_profiles(self.profiles, filtering_criteria,
|
||||
logger=self.log)
|
||||
|
||||
if not profile:
|
||||
self.log.debug(
|
||||
"Skipped instance. None of profiles in presets are for"
|
||||
f" Host: \"{host_name}\" |"
|
||||
f" Product types: \"{product_type}\" |"
|
||||
f" Product names: \"{product_name}\" |"
|
||||
f" Task name \"{task_name}\" |"
|
||||
f" Task type \"{task_type}\" |"
|
||||
f" Representation: \"{repre_name}\" (.{repre_ext})"
|
||||
)
|
||||
|
||||
return profile
|
||||
|
||||
def _repre_is_valid(self, repre: dict) -> bool:
|
||||
"""Validation if representation should be processed.
|
||||
|
||||
Args:
|
||||
repre (dict): Representation which should be checked.
|
||||
|
||||
Returns:
|
||||
bool: False if can't be processed else True.
|
||||
"""
|
||||
if repre.get("ext") not in self.supported_exts:
|
||||
self.log.debug((
|
||||
"Representation '{}' has unsupported extension: '{}'. Skipped."
|
||||
).format(repre["name"], repre.get("ext")))
|
||||
return False
|
||||
|
||||
if not repre.get("files"):
|
||||
self.log.debug((
|
||||
"Representation '{}' has empty files. Skipped."
|
||||
).format(repre["name"]))
|
||||
return False
|
||||
|
||||
if "delete" in repre.get("tags", []):
|
||||
self.log.debug((
|
||||
"Representation '{}' has 'delete' tag. Skipped."
|
||||
).format(repre["name"]))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _mark_original_repre_for_deletion(
|
||||
self,
|
||||
repre: dict,
|
||||
profile: dict,
|
||||
added_review: bool
|
||||
):
|
||||
"""If new transcoded representation created, delete old."""
|
||||
if not repre.get("tags"):
|
||||
repre["tags"] = []
|
||||
|
||||
delete_original = profile["delete_original"]
|
||||
|
||||
if delete_original:
|
||||
if "delete" not in repre["tags"]:
|
||||
repre["tags"].append("delete")
|
||||
|
||||
if added_review and "review" in repre["tags"]:
|
||||
repre["tags"].remove("review")
|
||||
|
|
@ -163,7 +163,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
"flame",
|
||||
"unreal",
|
||||
"batchdelivery",
|
||||
"photoshop"
|
||||
"photoshop",
|
||||
"substancepainter",
|
||||
]
|
||||
|
||||
settings_category = "core"
|
||||
|
|
@ -400,6 +401,10 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
new_staging_dir,
|
||||
self.log
|
||||
)
|
||||
# The OIIO conversion will remap the RGBA channels just to
|
||||
# `R,G,B,A` so we will pass the intermediate file to FFMPEG
|
||||
# without layer name.
|
||||
layer_name = ""
|
||||
|
||||
try:
|
||||
self._render_output_definitions(
|
||||
|
|
|
|||
|
|
@ -48,6 +48,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
|
|||
"unreal",
|
||||
"houdini",
|
||||
"batchdelivery",
|
||||
"webpublisher",
|
||||
]
|
||||
settings_category = "core"
|
||||
enabled = False
|
||||
|
|
|
|||
|
|
@ -25,7 +25,8 @@ try:
|
|||
variant_nested_prim_path,
|
||||
setup_asset_layer,
|
||||
add_ordered_sublayer,
|
||||
set_layer_defaults
|
||||
set_layer_defaults,
|
||||
get_standard_default_prim_name
|
||||
)
|
||||
except ImportError:
|
||||
pass
|
||||
|
|
@ -176,7 +177,12 @@ def get_instance_uri_path(
|
|||
|
||||
# If for whatever reason we were unable to retrieve from the context
|
||||
# then get the path from an existing database entry
|
||||
path = get_representation_path_by_names(**query)
|
||||
path = get_representation_path_by_names(
|
||||
anatomy=context.data["anatomy"],
|
||||
**names
|
||||
)
|
||||
if not path:
|
||||
raise RuntimeError(f"Unable to resolve publish path for: {names}")
|
||||
|
||||
# Ensure `None` for now is also a string
|
||||
path = str(path)
|
||||
|
|
@ -494,7 +500,7 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
|||
"asset"
|
||||
if profile.get("contribution_target_product") == "usdAsset"
|
||||
else "shot")
|
||||
init_as_visible = False
|
||||
init_as_visible = True
|
||||
|
||||
# Attributes logic
|
||||
publish_attributes = instance["publish_attributes"].get(
|
||||
|
|
@ -640,6 +646,7 @@ class ExtractUSDLayerContribution(publish.Extractor):
|
|||
settings_category = "core"
|
||||
|
||||
use_ayon_entity_uri = False
|
||||
enforce_default_prim = False
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -650,9 +657,18 @@ class ExtractUSDLayerContribution(publish.Extractor):
|
|||
path = get_last_publish(instance)
|
||||
if path and BUILD_INTO_LAST_VERSIONS:
|
||||
sdf_layer = Sdf.Layer.OpenAsAnonymous(path)
|
||||
|
||||
# If enabled in settings, ignore any default prim specified on
|
||||
# older publish versions and always publish with the AYON
|
||||
# standard default prim
|
||||
if self.enforce_default_prim:
|
||||
sdf_layer.defaultPrim = get_standard_default_prim_name(
|
||||
folder_path
|
||||
)
|
||||
|
||||
default_prim = sdf_layer.defaultPrim
|
||||
else:
|
||||
default_prim = folder_path.rsplit("/", 1)[-1] # use folder name
|
||||
default_prim = get_standard_default_prim_name(folder_path)
|
||||
sdf_layer = Sdf.Layer.CreateAnonymous()
|
||||
set_layer_defaults(sdf_layer, default_prim=default_prim)
|
||||
|
||||
|
|
@ -810,7 +826,7 @@ class ExtractUSDAssetContribution(publish.Extractor):
|
|||
folder_path = instance.data["folderPath"]
|
||||
product_name = instance.data["productName"]
|
||||
self.log.debug(f"Building asset: {folder_path} > {product_name}")
|
||||
folder_name = folder_path.rsplit("/", 1)[-1]
|
||||
asset_name = get_standard_default_prim_name(folder_path)
|
||||
|
||||
# Contribute layers to asset
|
||||
# Use existing asset and add to it, or initialize a new asset layer
|
||||
|
|
@ -828,8 +844,9 @@ class ExtractUSDAssetContribution(publish.Extractor):
|
|||
# If no existing publish of this product exists then we initialize
|
||||
# the layer as either a default asset or shot structure.
|
||||
init_type = instance.data["contribution_target_product_init"]
|
||||
self.log.debug("Initializing layer as type: %s", init_type)
|
||||
asset_layer, payload_layer = self.init_layer(
|
||||
asset_name=folder_name, init_type=init_type
|
||||
asset_name=asset_name, init_type=init_type
|
||||
)
|
||||
|
||||
# Author timeCodesPerSecond and framesPerSecond if the asset layer
|
||||
|
|
@ -909,7 +926,7 @@ class ExtractUSDAssetContribution(publish.Extractor):
|
|||
payload_layer.Export(payload_path, args={"format": "usda"})
|
||||
self.add_relative_file(instance, payload_path)
|
||||
|
||||
def init_layer(self, asset_name, init_type):
|
||||
def init_layer(self, asset_name: str, init_type: str):
|
||||
"""Initialize layer if no previous version exists"""
|
||||
|
||||
if init_type == "asset":
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ from ayon_core.pipeline.publish import (
|
|||
KnownPublishError,
|
||||
get_publish_template_name,
|
||||
)
|
||||
from pipeline import is_product_base_type_supported
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -367,6 +368,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
folder_entity = instance.data["folderEntity"]
|
||||
product_name = instance.data["productName"]
|
||||
product_type = instance.data["productType"]
|
||||
product_base_type = instance.data.get("productBaseType")
|
||||
|
||||
self.log.debug("Product: {}".format(product_name))
|
||||
|
||||
# Get existing product if it exists
|
||||
|
|
@ -394,14 +397,33 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
product_id = None
|
||||
if existing_product_entity:
|
||||
product_id = existing_product_entity["id"]
|
||||
product_entity = new_product_entity(
|
||||
product_name,
|
||||
product_type,
|
||||
folder_entity["id"],
|
||||
data=data,
|
||||
attribs=attributes,
|
||||
entity_id=product_id
|
||||
)
|
||||
|
||||
new_product_entity_kwargs = {
|
||||
"product_name": product_name,
|
||||
"product_type": product_type,
|
||||
"folder_id": folder_entity["id"],
|
||||
"data": data,
|
||||
"attribs": attributes,
|
||||
"entity_id": product_id,
|
||||
"product_base_type": product_base_type,
|
||||
}
|
||||
|
||||
if not is_product_base_type_supported():
|
||||
new_product_entity_kwargs.pop("product_base_type")
|
||||
if (
|
||||
product_base_type is not None
|
||||
and product_base_type != product_type):
|
||||
self.log.warning((
|
||||
"Product base type %s is not supported by the server, "
|
||||
"but it's defined - and it differs from product type %s. "
|
||||
"Using product base type as product type."
|
||||
), product_base_type, product_type)
|
||||
|
||||
new_product_entity_kwargs["product_type"] = (
|
||||
product_base_type
|
||||
)
|
||||
|
||||
product_entity = new_product_entity(**new_product_entity_kwargs)
|
||||
|
||||
if existing_product_entity is None:
|
||||
# Create a new product
|
||||
|
|
@ -927,6 +949,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
host_name = context.data["hostName"]
|
||||
anatomy_data = instance.data["anatomyData"]
|
||||
product_type = instance.data["productType"]
|
||||
product_base_type = instance.data.get("productBaseType")
|
||||
task_info = anatomy_data.get("task") or {}
|
||||
|
||||
return get_publish_template_name(
|
||||
|
|
@ -936,7 +959,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
task_name=task_info.get("name"),
|
||||
task_type=task_info.get("type"),
|
||||
project_settings=context.data["project_settings"],
|
||||
logger=self.log
|
||||
logger=self.log,
|
||||
product_base_type=product_base_type
|
||||
)
|
||||
|
||||
def get_rootless_path(self, anatomy, path):
|
||||
|
|
|
|||
|
|
@ -1114,6 +1114,8 @@ class SceneInventoryView(QtWidgets.QTreeView):
|
|||
try:
|
||||
for item_id, item_version in zip(item_ids, versions):
|
||||
container = containers_by_id[item_id]
|
||||
if container.get("version_locked"):
|
||||
continue
|
||||
try:
|
||||
update_container(container, item_version)
|
||||
except Exception as exc:
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON addon 'core' version."""
|
||||
__version__ = "1.6.11+dev"
|
||||
__version__ = "1.6.13+dev"
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ name="core"
|
|||
description="AYON core addon."
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.9.1,<3.10"
|
||||
markdown = "^3.4.1"
|
||||
clique = "1.6.*"
|
||||
jsonschema = "^2.6.0"
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
name = "core"
|
||||
title = "Core"
|
||||
version = "1.6.11+dev"
|
||||
version = "1.6.13+dev"
|
||||
|
||||
client_dir = "ayon_core"
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
[tool.poetry]
|
||||
name = "ayon-core"
|
||||
version = "1.6.11+dev"
|
||||
version = "1.6.13+dev"
|
||||
description = ""
|
||||
authors = ["Ynput Team <team@ynput.io>"]
|
||||
readme = "README.md"
|
||||
|
|
|
|||
|
|
@ -164,5 +164,6 @@ def convert_settings_overrides(
|
|||
) -> dict[str, Any]:
|
||||
_convert_imageio_configs_0_3_1(overrides)
|
||||
_convert_imageio_configs_0_4_5(overrides)
|
||||
_convert_imageio_configs_1_6_5(overrides)
|
||||
_convert_publish_plugins(overrides)
|
||||
return overrides
|
||||
|
|
|
|||
|
|
@ -59,6 +59,7 @@ def _ocio_config_profile_types():
|
|||
{"value": "builtin_path", "label": "AYON built-in OCIO config"},
|
||||
{"value": "custom_path", "label": "Path to OCIO config"},
|
||||
{"value": "published_product", "label": "Published product"},
|
||||
{"value": "disabled", "label": "Disable OCIO management"},
|
||||
]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -251,6 +251,19 @@ class AyonEntityURIModel(BaseSettingsModel):
|
|||
)
|
||||
|
||||
|
||||
class ExtractUSDLayerContributionModel(AyonEntityURIModel):
|
||||
enforce_default_prim: bool = SettingsField(
|
||||
title="Always set default prim to folder name.",
|
||||
description=(
|
||||
"When enabled ignore any default prim specified on older "
|
||||
"published versions of a layer and always override it to the "
|
||||
"AYON standard default prim. When disabled, preserve default prim "
|
||||
"on the layer and then only the initial version would be setting "
|
||||
"the AYON standard default prim."
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class PluginStateByHostModelProfile(BaseSettingsModel):
|
||||
_layout = "expanded"
|
||||
# Filtering
|
||||
|
|
@ -443,7 +456,7 @@ class UseDisplayViewModel(BaseSettingsModel):
|
|||
title="Target Display",
|
||||
description=(
|
||||
"Display of the target transform. If left empty, the"
|
||||
" source Display value will be used."
|
||||
" scene Display value will be used."
|
||||
)
|
||||
)
|
||||
view: str = SettingsField(
|
||||
|
|
@ -451,7 +464,7 @@ class UseDisplayViewModel(BaseSettingsModel):
|
|||
title="Target View",
|
||||
description=(
|
||||
"View of the target transform. If left empty, the"
|
||||
" source View value will be used."
|
||||
" scene View value will be used."
|
||||
)
|
||||
)
|
||||
|
||||
|
|
@ -565,12 +578,125 @@ class ExtractOIIOTranscodeProfileModel(BaseSettingsModel):
|
|||
|
||||
|
||||
class ExtractOIIOTranscodeModel(BaseSettingsModel):
|
||||
"""Color conversion transcoding using OIIO for images mostly aimed at
|
||||
transcoding for reviewables (it'll process and output only RGBA channels).
|
||||
"""
|
||||
enabled: bool = SettingsField(True)
|
||||
profiles: list[ExtractOIIOTranscodeProfileModel] = SettingsField(
|
||||
default_factory=list, title="Profiles"
|
||||
)
|
||||
|
||||
|
||||
class ExtractOIIOPostProcessOutputModel(BaseSettingsModel):
|
||||
_layout = "expanded"
|
||||
name: str = SettingsField(
|
||||
"",
|
||||
title="Name",
|
||||
description="Output name (no space)",
|
||||
regex=r"[a-zA-Z0-9_]([a-zA-Z0-9_\.\-]*[a-zA-Z0-9_])?$",
|
||||
)
|
||||
extension: str = SettingsField(
|
||||
"",
|
||||
title="Extension",
|
||||
description=(
|
||||
"Target extension. If left empty, original"
|
||||
" extension is used."
|
||||
),
|
||||
)
|
||||
input_arguments: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Input arguments",
|
||||
description="Arguments passed prior to the input file argument.",
|
||||
)
|
||||
output_arguments: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Output arguments",
|
||||
description="Arguments passed prior to the -o argument.",
|
||||
)
|
||||
tags: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Tags",
|
||||
description=(
|
||||
"Additional tags that will be added to the created representation."
|
||||
"\nAdd *review* tag to create review from the transcoded"
|
||||
" representation instead of the original."
|
||||
)
|
||||
)
|
||||
custom_tags: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Custom Tags",
|
||||
description=(
|
||||
"Additional custom tags that will be added"
|
||||
" to the created representation."
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class ExtractOIIOPostProcessProfileModel(BaseSettingsModel):
|
||||
host_names: list[str] = SettingsField(
|
||||
section="Profile",
|
||||
default_factory=list,
|
||||
title="Host names"
|
||||
)
|
||||
task_types: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Task types",
|
||||
enum_resolver=task_types_enum
|
||||
)
|
||||
task_names: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Task names"
|
||||
)
|
||||
product_types: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Product types"
|
||||
)
|
||||
product_names: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Product names"
|
||||
)
|
||||
representation_names: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Representation names",
|
||||
)
|
||||
representation_exts: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Representation extensions",
|
||||
)
|
||||
delete_original: bool = SettingsField(
|
||||
True,
|
||||
title="Delete Original Representation",
|
||||
description=(
|
||||
"Choose to preserve or remove the original representation.\n"
|
||||
"Keep in mind that if the transcoded representation includes"
|
||||
" a `review` tag, it will take precedence over"
|
||||
" the original for creating reviews."
|
||||
),
|
||||
section="Conversion Outputs",
|
||||
)
|
||||
outputs: list[ExtractOIIOPostProcessOutputModel] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Output Definitions",
|
||||
)
|
||||
|
||||
@validator("outputs")
|
||||
def validate_unique_outputs(cls, value):
|
||||
ensure_unique_names(value)
|
||||
return value
|
||||
|
||||
|
||||
class ExtractOIIOPostProcessModel(BaseSettingsModel):
|
||||
"""Process representation images with `oiiotool` on publish.
|
||||
|
||||
This could be used to convert images to different formats, convert to
|
||||
scanline images or flatten deep images.
|
||||
"""
|
||||
enabled: bool = SettingsField(True)
|
||||
profiles: list[ExtractOIIOPostProcessProfileModel] = SettingsField(
|
||||
default_factory=list, title="Profiles"
|
||||
)
|
||||
|
||||
|
||||
# --- [START] Extract Review ---
|
||||
class ExtractReviewFFmpegModel(BaseSettingsModel):
|
||||
video_filters: list[str] = SettingsField(
|
||||
|
|
@ -1122,6 +1248,10 @@ class PublishPuginsModel(BaseSettingsModel):
|
|||
default_factory=ExtractOIIOTranscodeModel,
|
||||
title="Extract OIIO Transcode"
|
||||
)
|
||||
ExtractOIIOPostProcess: ExtractOIIOPostProcessModel = SettingsField(
|
||||
default_factory=ExtractOIIOPostProcessModel,
|
||||
title="Extract OIIO Post Process"
|
||||
)
|
||||
ExtractReview: ExtractReviewModel = SettingsField(
|
||||
default_factory=ExtractReviewModel,
|
||||
title="Extract Review"
|
||||
|
|
@ -1134,9 +1264,11 @@ class PublishPuginsModel(BaseSettingsModel):
|
|||
default_factory=AyonEntityURIModel,
|
||||
title="Extract USD Asset Contribution",
|
||||
)
|
||||
ExtractUSDLayerContribution: AyonEntityURIModel = SettingsField(
|
||||
default_factory=AyonEntityURIModel,
|
||||
title="Extract USD Layer Contribution",
|
||||
ExtractUSDLayerContribution: ExtractUSDLayerContributionModel = (
|
||||
SettingsField(
|
||||
default_factory=ExtractUSDLayerContributionModel,
|
||||
title="Extract USD Layer Contribution",
|
||||
)
|
||||
)
|
||||
PreIntegrateThumbnails: PreIntegrateThumbnailsModel = SettingsField(
|
||||
default_factory=PreIntegrateThumbnailsModel,
|
||||
|
|
@ -1347,6 +1479,10 @@ DEFAULT_PUBLISH_VALUES = {
|
|||
"enabled": True,
|
||||
"profiles": []
|
||||
},
|
||||
"ExtractOIIOPostProcess": {
|
||||
"enabled": True,
|
||||
"profiles": []
|
||||
},
|
||||
"ExtractReview": {
|
||||
"enabled": True,
|
||||
"profiles": [
|
||||
|
|
@ -1448,6 +1584,105 @@ DEFAULT_PUBLISH_VALUES = {
|
|||
"fill_missing_frames": "closest_existing"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"product_types": [],
|
||||
"hosts": ["substancepainter"],
|
||||
"task_types": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "png",
|
||||
"ext": "png",
|
||||
"tags": [
|
||||
"ftrackreview",
|
||||
"kitsureview",
|
||||
"webreview"
|
||||
],
|
||||
"burnins": [],
|
||||
"ffmpeg_args": {
|
||||
"video_filters": [],
|
||||
"audio_filters": [],
|
||||
"input": [],
|
||||
"output": []
|
||||
},
|
||||
"filter": {
|
||||
"families": [
|
||||
"render",
|
||||
"review",
|
||||
"ftrack"
|
||||
],
|
||||
"product_names": [],
|
||||
"custom_tags": [],
|
||||
"single_frame_filter": "single_frame"
|
||||
},
|
||||
"overscan_crop": "",
|
||||
# "overscan_color": [0, 0, 0],
|
||||
"overscan_color": [0, 0, 0, 0.0],
|
||||
"width": 1920,
|
||||
"height": 1080,
|
||||
"scale_pixel_aspect": True,
|
||||
"bg_color": [0, 0, 0, 0.0],
|
||||
"letter_box": {
|
||||
"enabled": False,
|
||||
"ratio": 0.0,
|
||||
"fill_color": [0, 0, 0, 1.0],
|
||||
"line_thickness": 0,
|
||||
"line_color": [255, 0, 0, 1.0]
|
||||
},
|
||||
"fill_missing_frames": "only_rendered"
|
||||
},
|
||||
{
|
||||
"name": "h264",
|
||||
"ext": "mp4",
|
||||
"tags": [
|
||||
"burnin",
|
||||
"ftrackreview",
|
||||
"kitsureview",
|
||||
"webreview"
|
||||
],
|
||||
"burnins": [],
|
||||
"ffmpeg_args": {
|
||||
"video_filters": [],
|
||||
"audio_filters": [],
|
||||
"input": [
|
||||
"-apply_trc gamma22"
|
||||
],
|
||||
"output": [
|
||||
"-pix_fmt yuv420p",
|
||||
"-crf 18",
|
||||
"-c:a aac",
|
||||
"-b:a 192k",
|
||||
"-g 1",
|
||||
"-movflags faststart"
|
||||
]
|
||||
},
|
||||
"filter": {
|
||||
"families": [
|
||||
"render",
|
||||
"review",
|
||||
"ftrack"
|
||||
],
|
||||
"product_names": [],
|
||||
"custom_tags": [],
|
||||
"single_frame_filter": "multi_frame"
|
||||
},
|
||||
"overscan_crop": "",
|
||||
# "overscan_color": [0, 0, 0],
|
||||
"overscan_color": [0, 0, 0, 0.0],
|
||||
"width": 0,
|
||||
"height": 0,
|
||||
"scale_pixel_aspect": True,
|
||||
"bg_color": [0, 0, 0, 0.0],
|
||||
"letter_box": {
|
||||
"enabled": False,
|
||||
"ratio": 0.0,
|
||||
"fill_color": [0, 0, 0, 1.0],
|
||||
"line_thickness": 0,
|
||||
"line_color": [255, 0, 0, 1.0]
|
||||
},
|
||||
"fill_missing_frames": "only_rendered"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
@ -1526,6 +1761,7 @@ DEFAULT_PUBLISH_VALUES = {
|
|||
},
|
||||
"ExtractUSDLayerContribution": {
|
||||
"use_ayon_entity_uri": False,
|
||||
"enforce_default_prim": False,
|
||||
},
|
||||
"PreIntegrateThumbnails": {
|
||||
"enabled": True,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue