Merge branch 'develop' of https://github.com/ynput/ayon-core into bugfix/extract_oiio_transcode_apply_scene_display_view

# Conflicts:
#	client/ayon_core/pipeline/farm/pyblish_functions.py
This commit is contained in:
Roy Nieterau 2025-11-28 15:46:11 +01:00
commit f5a139e61e
16 changed files with 732 additions and 58 deletions

View file

@ -35,6 +35,8 @@ body:
label: Version
description: What version are you running? Look to AYON Tray
options:
- 1.6.11
- 1.6.10
- 1.6.9
- 1.6.8
- 1.6.7

View file

@ -602,24 +602,7 @@ def create_instances_for_aov(
"""
# we cannot attach AOVs to other products as we consider every
# AOV product of its own.
log = Logger.get_logger("farm_publishing")
additional_color_data = {
"renderProducts": instance.data["renderProducts"],
"colorspaceConfig": instance.data["colorspaceConfig"],
"display": instance.data.get("sourceDisplay"),
"view": instance.data.get("sourceView")
}
# Get templated path from absolute config path.
anatomy = instance.context.data["anatomy"]
colorspace_template = instance.data["colorspaceConfig"]
try:
additional_color_data["colorspaceTemplate"] = remap_source(
colorspace_template, anatomy)
except ValueError as e:
log.warning(e)
additional_color_data["colorspaceTemplate"] = colorspace_template
# if there are product to attach to and more than one AOV,
# we cannot proceed.
@ -631,6 +614,29 @@ def create_instances_for_aov(
"attaching multiple AOVs or renderable cameras to "
"product is not supported yet.")
additional_data = {
"renderProducts": instance.data["renderProducts"],
}
# Collect color management data if present
colorspace_config = instance.data.get("colorspaceConfig")
if colorspace_config:
additional_data.update({
"colorspaceConfig": colorspace_config,
# Display/View are optional
"display": instance.data.get("sourceDisplay"),
"view": instance.data.get("sourceView")
})
# Get templated path from absolute config path.
anatomy = instance.context.data["anatomy"]
try:
additional_data["colorspaceTemplate"] = remap_source(
colorspace_config, anatomy)
except ValueError as e:
log.warning(e)
additional_data["colorspaceTemplate"] = colorspace_config
# create instances for every AOV we found in expected files.
# NOTE: this is done for every AOV and every render camera (if
# there are multiple renderable cameras in scene)
@ -638,7 +644,7 @@ def create_instances_for_aov(
instance,
skeleton,
aov_filter,
additional_color_data,
additional_data,
skip_integration_repre_list,
do_not_add_review,
frames_to_render
@ -949,16 +955,28 @@ def _create_instances_for_aov(
"stagingDir": staging_dir,
"fps": new_instance.get("fps"),
"tags": ["review"] if preview else [],
"colorspaceData": {
}
if colorspace and additional_data["colorspaceConfig"]:
# Only apply colorspace data if the image has a colorspace
colorspace_data: dict = {
"colorspace": colorspace,
"config": {
"path": additional_data["colorspaceConfig"],
"template": additional_data["colorspaceTemplate"]
},
"display": additional_data["display"],
"view": additional_data["view"]
}
}
# Display/View are optional
display = additional_data.get("display")
if display:
colorspace_data["display"] = display
view = additional_data.get("view")
if view:
colorspace_data["view"] = view
rep["colorspaceData"] = colorspace_data
else:
log.debug("No colorspace data for representation: {}".format(rep))
# support conversion from tiled to scanline
if instance.data.get("convertToScanline"):

View file

@ -684,3 +684,20 @@ def get_sdf_format_args(path):
"""Return SDF_FORMAT_ARGS parsed to `dict`"""
_raw_path, data = Sdf.Layer.SplitIdentifier(path)
return data
def get_standard_default_prim_name(folder_path: str) -> str:
"""Return the AYON-specified default prim name for a folder path.
This is used e.g. for the default prim in AYON USD Contribution workflows.
"""
folder_name: str = folder_path.rsplit("/", 1)[-1]
# Prim names are not allowed to start with a digit in USD. Authoring them
# would mean generating essentially garbage data and may result in
# unexpected behavior in certain USD or DCC versions, like failure to
# refresh in usdview or crashes in Houdini 21.
if folder_name and folder_name[0].isdigit():
folder_name = f"_{folder_name}"
return folder_name

View file

@ -840,14 +840,24 @@ class AbstractTemplateBuilder(ABC):
host_name = self.host_name
task_name = self.current_task_name
task_type = self.current_task_type
folder_path = self.current_folder_path
folder_type = None
folder_entity = self.current_folder_entity
if folder_entity:
folder_type = folder_entity["folderType"]
filter_data = {
"task_types": task_type,
"task_names": task_name,
"folder_types": folder_type,
"folder_paths": folder_path,
}
build_profiles = self._get_build_profiles()
profile = filter_profiles(
build_profiles,
{
"task_types": task_type,
"task_names": task_name
}
filter_data,
logger=self.log
)
if not profile:
raise TemplateProfileNotFound((
@ -1677,6 +1687,8 @@ class PlaceholderLoadMixin(object):
for version in get_last_versions(
project_name, filtered_product_ids, fields={"id"}
).values()
# Version may be none if a product has no versions
if version is not None
)
return list(get_representations(
project_name,

View file

@ -11,20 +11,6 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder + 0.0001
label = "Collect Versions Loaded in Scene"
hosts = [
"aftereffects",
"blender",
"celaction",
"fusion",
"harmony",
"hiero",
"houdini",
"maya",
"nuke",
"photoshop",
"resolve",
"tvpaint"
]
def process(self, context):
host = registered_host()

View file

@ -0,0 +1,353 @@
from __future__ import annotations
from typing import Any, Optional
import os
import copy
import clique
import pyblish.api
from ayon_core.pipeline import (
publish,
get_temp_dir
)
from ayon_core.lib import (
is_oiio_supported,
get_oiio_tool_args,
run_subprocess
)
from ayon_core.lib.transcoding import IMAGE_EXTENSIONS
from ayon_core.lib.profiles_filtering import filter_profiles
class ExtractOIIOPostProcess(publish.Extractor):
"""Process representations through `oiiotool` with profile defined
settings so that e.g. color space conversions can be applied or images
could be converted to scanline, resized, etc. regardless of colorspace
data.
"""
label = "OIIO Post Process"
order = pyblish.api.ExtractorOrder + 0.020
settings_category = "core"
optional = True
# Supported extensions
supported_exts = {ext.lstrip(".") for ext in IMAGE_EXTENSIONS}
# Configurable by Settings
profiles = None
options = None
def process(self, instance):
if instance.data.get("farm"):
self.log.debug("Should be processed on farm, skipping.")
return
if not self.profiles:
self.log.debug("No profiles present for OIIO Post Process")
return
if not instance.data.get("representations"):
self.log.debug("No representations, skipping.")
return
if not is_oiio_supported():
self.log.warning("OIIO not supported, no transcoding possible.")
return
new_representations = []
for idx, repre in enumerate(list(instance.data["representations"])):
self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"]))
if not self._repre_is_valid(repre):
continue
# We check profile per representation name and extension because
# it's included in the profile check. As such, an instance may have
# a different profile applied per representation.
profile = self._get_profile(
instance,
repre
)
if not profile:
continue
# Get representation files to convert
if isinstance(repre["files"], list):
repre_files_to_convert = copy.deepcopy(repre["files"])
else:
repre_files_to_convert = [repre["files"]]
added_representations = False
added_review = False
# Process each output definition
for output_def in profile["outputs"]:
# Local copy to avoid accidental mutable changes
files_to_convert = list(repre_files_to_convert)
output_name = output_def["name"]
new_repre = copy.deepcopy(repre)
original_staging_dir = new_repre["stagingDir"]
new_staging_dir = get_temp_dir(
project_name=instance.context.data["projectName"],
use_local_temp=True,
)
new_repre["stagingDir"] = new_staging_dir
output_extension = output_def["extension"]
output_extension = output_extension.replace('.', '')
self._rename_in_representation(new_repre,
files_to_convert,
output_name,
output_extension)
sequence_files = self._translate_to_sequence(files_to_convert)
self.log.debug("Files to convert: {}".format(sequence_files))
for file_name in sequence_files:
if isinstance(file_name, clique.Collection):
# Convert to filepath that can be directly converted
# by oiio like `frame.1001-1025%04d.exr`
file_name: str = file_name.format(
"{head}{range}{padding}{tail}"
)
self.log.debug("Transcoding file: `{}`".format(file_name))
input_path = os.path.join(original_staging_dir,
file_name)
output_path = self._get_output_file_path(input_path,
new_staging_dir,
output_extension)
# TODO: Support formatting with dynamic keys from the
# representation, like e.g. colorspace config, display,
# view, etc.
input_arguments: list[str] = output_def.get(
"input_arguments", []
)
output_arguments: list[str] = output_def.get(
"output_arguments", []
)
# Prepare subprocess arguments
oiio_cmd = get_oiio_tool_args(
"oiiotool",
*input_arguments,
input_path,
*output_arguments,
"-o",
output_path
)
self.log.debug(
"Conversion command: {}".format(" ".join(oiio_cmd)))
run_subprocess(oiio_cmd, logger=self.log)
# cleanup temporary transcoded files
for file_name in new_repre["files"]:
transcoded_file_path = os.path.join(new_staging_dir,
file_name)
instance.context.data["cleanupFullPaths"].append(
transcoded_file_path)
custom_tags = output_def.get("custom_tags")
if custom_tags:
if new_repre.get("custom_tags") is None:
new_repre["custom_tags"] = []
new_repre["custom_tags"].extend(custom_tags)
# Add additional tags from output definition to representation
if new_repre.get("tags") is None:
new_repre["tags"] = []
for tag in output_def["tags"]:
if tag not in new_repre["tags"]:
new_repre["tags"].append(tag)
if tag == "review":
added_review = True
# If there is only 1 file outputted then convert list to
# string, because that'll indicate that it is not a sequence.
if len(new_repre["files"]) == 1:
new_repre["files"] = new_repre["files"][0]
# If the source representation has "review" tag, but it's not
# part of the output definition tags, then both the
# representations will be transcoded in ExtractReview and
# their outputs will clash in integration.
if "review" in repre.get("tags", []):
added_review = True
new_representations.append(new_repre)
added_representations = True
if added_representations:
self._mark_original_repre_for_deletion(
repre, profile, added_review
)
tags = repre.get("tags") or []
if "delete" in tags and "thumbnail" not in tags:
instance.data["representations"].remove(repre)
instance.data["representations"].extend(new_representations)
def _rename_in_representation(self, new_repre, files_to_convert,
output_name, output_extension):
"""Replace old extension with new one everywhere in representation.
Args:
new_repre (dict)
files_to_convert (list): of filenames from repre["files"],
standardized to always list
output_name (str): key of output definition from Settings,
if "<passthrough>" token used, keep original repre name
output_extension (str): extension from output definition
"""
if output_name != "passthrough":
new_repre["name"] = output_name
if not output_extension:
return
new_repre["ext"] = output_extension
new_repre["outputName"] = output_name
renamed_files = []
for file_name in files_to_convert:
file_name, _ = os.path.splitext(file_name)
file_name = '{}.{}'.format(file_name,
output_extension)
renamed_files.append(file_name)
new_repre["files"] = renamed_files
def _translate_to_sequence(self, files_to_convert):
"""Returns original list or a clique.Collection of a sequence.
Uses clique to find frame sequence Collection.
If sequence not found, it returns original list.
Args:
files_to_convert (list): list of file names
Returns:
list[str | clique.Collection]: List of filepaths or a list
of Collections (usually one, unless there are holes)
"""
pattern = [clique.PATTERNS["frames"]]
collections, _ = clique.assemble(
files_to_convert, patterns=pattern,
assume_padded_when_ambiguous=True)
if collections:
if len(collections) > 1:
raise ValueError(
"Too many collections {}".format(collections))
collection = collections[0]
# TODO: Technically oiiotool supports holes in the sequence as well
# using the dedicated --frames argument to specify the frames.
# We may want to use that too so conversions of sequences with
# holes will perform faster as well.
# Separate the collection so that we have no holes/gaps per
# collection.
return collection.separate()
return files_to_convert
def _get_output_file_path(self, input_path, output_dir,
output_extension):
"""Create output file name path."""
file_name = os.path.basename(input_path)
file_name, input_extension = os.path.splitext(file_name)
if not output_extension:
output_extension = input_extension.replace(".", "")
new_file_name = '{}.{}'.format(file_name,
output_extension)
return os.path.join(output_dir, new_file_name)
def _get_profile(
self,
instance: pyblish.api.Instance,
repre: dict
) -> Optional[dict[str, Any]]:
"""Returns profile if it should process this instance."""
host_name = instance.context.data["hostName"]
product_type = instance.data["productType"]
product_name = instance.data["productName"]
task_data = instance.data["anatomyData"].get("task", {})
task_name = task_data.get("name")
task_type = task_data.get("type")
repre_name: str = repre["name"]
repre_ext: str = repre["ext"]
filtering_criteria = {
"host_names": host_name,
"product_types": product_type,
"product_names": product_name,
"task_names": task_name,
"task_types": task_type,
"representation_names": repre_name,
"representation_exts": repre_ext,
}
profile = filter_profiles(self.profiles, filtering_criteria,
logger=self.log)
if not profile:
self.log.debug(
"Skipped instance. None of profiles in presets are for"
f" Host: \"{host_name}\" |"
f" Product types: \"{product_type}\" |"
f" Product names: \"{product_name}\" |"
f" Task name \"{task_name}\" |"
f" Task type \"{task_type}\" |"
f" Representation: \"{repre_name}\" (.{repre_ext})"
)
return profile
def _repre_is_valid(self, repre: dict) -> bool:
"""Validation if representation should be processed.
Args:
repre (dict): Representation which should be checked.
Returns:
bool: False if can't be processed else True.
"""
if repre.get("ext") not in self.supported_exts:
self.log.debug((
"Representation '{}' has unsupported extension: '{}'. Skipped."
).format(repre["name"], repre.get("ext")))
return False
if not repre.get("files"):
self.log.debug((
"Representation '{}' has empty files. Skipped."
).format(repre["name"]))
return False
if "delete" in repre.get("tags", []):
self.log.debug((
"Representation '{}' has 'delete' tag. Skipped."
).format(repre["name"]))
return False
return True
def _mark_original_repre_for_deletion(
self,
repre: dict,
profile: dict,
added_review: bool
):
"""If new transcoded representation created, delete old."""
if not repre.get("tags"):
repre["tags"] = []
delete_original = profile["delete_original"]
if delete_original:
if "delete" not in repre["tags"]:
repre["tags"].append("delete")
if added_review and "review" in repre["tags"]:
repre["tags"].remove("review")

View file

@ -163,7 +163,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
"flame",
"unreal",
"batchdelivery",
"photoshop"
"photoshop",
"substancepainter",
]
settings_category = "core"

View file

@ -25,7 +25,8 @@ try:
variant_nested_prim_path,
setup_asset_layer,
add_ordered_sublayer,
set_layer_defaults
set_layer_defaults,
get_standard_default_prim_name
)
except ImportError:
pass
@ -176,7 +177,12 @@ def get_instance_uri_path(
# If for whatever reason we were unable to retrieve from the context
# then get the path from an existing database entry
path = get_representation_path_by_names(**query)
path = get_representation_path_by_names(
anatomy=context.data["anatomy"],
**names
)
if not path:
raise RuntimeError(f"Unable to resolve publish path for: {names}")
# Ensure `None` for now is also a string
path = str(path)
@ -640,6 +646,7 @@ class ExtractUSDLayerContribution(publish.Extractor):
settings_category = "core"
use_ayon_entity_uri = False
enforce_default_prim = False
def process(self, instance):
@ -650,9 +657,18 @@ class ExtractUSDLayerContribution(publish.Extractor):
path = get_last_publish(instance)
if path and BUILD_INTO_LAST_VERSIONS:
sdf_layer = Sdf.Layer.OpenAsAnonymous(path)
# If enabled in settings, ignore any default prim specified on
# older publish versions and always publish with the AYON
# standard default prim
if self.enforce_default_prim:
sdf_layer.defaultPrim = get_standard_default_prim_name(
folder_path
)
default_prim = sdf_layer.defaultPrim
else:
default_prim = folder_path.rsplit("/", 1)[-1] # use folder name
default_prim = get_standard_default_prim_name(folder_path)
sdf_layer = Sdf.Layer.CreateAnonymous()
set_layer_defaults(sdf_layer, default_prim=default_prim)
@ -810,7 +826,7 @@ class ExtractUSDAssetContribution(publish.Extractor):
folder_path = instance.data["folderPath"]
product_name = instance.data["productName"]
self.log.debug(f"Building asset: {folder_path} > {product_name}")
folder_name = folder_path.rsplit("/", 1)[-1]
asset_name = get_standard_default_prim_name(folder_path)
# Contribute layers to asset
# Use existing asset and add to it, or initialize a new asset layer
@ -829,7 +845,7 @@ class ExtractUSDAssetContribution(publish.Extractor):
# the layer as either a default asset or shot structure.
init_type = instance.data["contribution_target_product_init"]
asset_layer, payload_layer = self.init_layer(
asset_name=folder_name, init_type=init_type
asset_name=asset_name, init_type=init_type
)
# Author timeCodesPerSecond and framesPerSecond if the asset layer

View file

@ -457,6 +457,9 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
else:
version_data[key] = value
host_name = instance.context.data["hostName"]
version_data["host_name"] = host_name
version_entity = new_version_entity(
version_number,
product_entity["id"],

View file

@ -7,6 +7,7 @@ from typing import TYPE_CHECKING, Iterable, Optional
import arrow
import ayon_api
from ayon_api.graphql_queries import project_graphql_query
from ayon_api.operations import OperationsSession
from ayon_core.lib import NestedCacheItem
@ -202,7 +203,7 @@ class ProductsModel:
cache = self._product_type_items_cache[project_name]
if not cache.is_valid:
icons_mapping = self._get_product_type_icons(project_name)
product_types = ayon_api.get_project_product_types(project_name)
product_types = self._get_project_product_types(project_name)
cache.update_data([
ProductTypeItem(
product_type["name"],
@ -462,6 +463,24 @@ class ProductsModel:
PRODUCTS_MODEL_SENDER
)
def _get_project_product_types(self, project_name: str) -> list[dict]:
"""This is a temporary solution for product types fetching.
There was a bug in ayon_api.get_project(...) which did not use GraphQl
but REST instead. That is fixed in ayon-python-api 1.2.6 that will
be as part of ayon launcher 1.4.3 release.
"""
if not project_name:
return []
query = project_graphql_query({"productTypes.name"})
query.set_variable_value("projectName", project_name)
parsed_data = query.query(ayon_api.get_server_api_connection())
project = parsed_data["project"]
if project is None:
return []
return project["productTypes"]
def _get_product_type_icons(
self, project_name: Optional[str]
) -> ProductTypeIconMapping:

View file

@ -212,6 +212,11 @@ class ContextCardWidget(CardWidget):
icon_widget.setObjectName("ProductTypeIconLabel")
label_widget = QtWidgets.QLabel(f"<span>{CONTEXT_LABEL}</span>", self)
# HTML text will cause that label start catch mouse clicks
# - disabling with changing interaction flag
label_widget.setTextInteractionFlags(
QtCore.Qt.NoTextInteraction
)
icon_layout = QtWidgets.QHBoxLayout()
icon_layout.setContentsMargins(5, 5, 5, 5)

View file

@ -548,11 +548,17 @@ class _IconsCache:
elif icon_type == "ayon_url":
url = icon_def["url"].lstrip("/")
url = f"{ayon_api.get_base_url()}/{url}"
stream = io.BytesIO()
ayon_api.download_file_to_stream(url, stream)
pix = QtGui.QPixmap()
pix.loadFromData(stream.getvalue())
icon = QtGui.QIcon(pix)
try:
stream = io.BytesIO()
ayon_api.download_file_to_stream(url, stream)
pix = QtGui.QPixmap()
pix.loadFromData(stream.getvalue())
icon = QtGui.QIcon(pix)
except Exception:
log.warning(
"Failed to download image '%s'", url, exc_info=True
)
icon = None
elif icon_type == "transparent":
size = icon_def.get("size")

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring AYON addon 'core' version."""
__version__ = "1.6.9+dev"
__version__ = "1.6.11+dev"

View file

@ -1,6 +1,6 @@
name = "core"
title = "Core"
version = "1.6.9+dev"
version = "1.6.11+dev"
client_dir = "ayon_core"

View file

@ -5,7 +5,7 @@
[tool.poetry]
name = "ayon-core"
version = "1.6.9+dev"
version = "1.6.11+dev"
description = ""
authors = ["Ynput Team <team@ynput.io>"]
readme = "README.md"

View file

@ -251,6 +251,19 @@ class AyonEntityURIModel(BaseSettingsModel):
)
class ExtractUSDLayerContributionModel(AyonEntityURIModel):
enforce_default_prim: bool = SettingsField(
title="Always set default prim to folder name.",
description=(
"When enabled ignore any default prim specified on older "
"published versions of a layer and always override it to the "
"AYON standard default prim. When disabled, preserve default prim "
"on the layer and then only the initial version would be setting "
"the AYON standard default prim."
)
)
class PluginStateByHostModelProfile(BaseSettingsModel):
_layout = "expanded"
# Filtering
@ -565,12 +578,125 @@ class ExtractOIIOTranscodeProfileModel(BaseSettingsModel):
class ExtractOIIOTranscodeModel(BaseSettingsModel):
"""Color conversion transcoding using OIIO for images mostly aimed at
transcoding for reviewables (it'll process and output only RGBA channels).
"""
enabled: bool = SettingsField(True)
profiles: list[ExtractOIIOTranscodeProfileModel] = SettingsField(
default_factory=list, title="Profiles"
)
class ExtractOIIOPostProcessOutputModel(BaseSettingsModel):
_layout = "expanded"
name: str = SettingsField(
"",
title="Name",
description="Output name (no space)",
regex=r"[a-zA-Z0-9_]([a-zA-Z0-9_\.\-]*[a-zA-Z0-9_])?$",
)
extension: str = SettingsField(
"",
title="Extension",
description=(
"Target extension. If left empty, original"
" extension is used."
),
)
input_arguments: list[str] = SettingsField(
default_factory=list,
title="Input arguments",
description="Arguments passed prior to the input file argument.",
)
output_arguments: list[str] = SettingsField(
default_factory=list,
title="Output arguments",
description="Arguments passed prior to the -o argument.",
)
tags: list[str] = SettingsField(
default_factory=list,
title="Tags",
description=(
"Additional tags that will be added to the created representation."
"\nAdd *review* tag to create review from the transcoded"
" representation instead of the original."
)
)
custom_tags: list[str] = SettingsField(
default_factory=list,
title="Custom Tags",
description=(
"Additional custom tags that will be added"
" to the created representation."
)
)
class ExtractOIIOPostProcessProfileModel(BaseSettingsModel):
host_names: list[str] = SettingsField(
section="Profile",
default_factory=list,
title="Host names"
)
task_types: list[str] = SettingsField(
default_factory=list,
title="Task types",
enum_resolver=task_types_enum
)
task_names: list[str] = SettingsField(
default_factory=list,
title="Task names"
)
product_types: list[str] = SettingsField(
default_factory=list,
title="Product types"
)
product_names: list[str] = SettingsField(
default_factory=list,
title="Product names"
)
representation_names: list[str] = SettingsField(
default_factory=list,
title="Representation names",
)
representation_exts: list[str] = SettingsField(
default_factory=list,
title="Representation extensions",
)
delete_original: bool = SettingsField(
True,
title="Delete Original Representation",
description=(
"Choose to preserve or remove the original representation.\n"
"Keep in mind that if the transcoded representation includes"
" a `review` tag, it will take precedence over"
" the original for creating reviews."
),
section="Conversion Outputs",
)
outputs: list[ExtractOIIOPostProcessOutputModel] = SettingsField(
default_factory=list,
title="Output Definitions",
)
@validator("outputs")
def validate_unique_outputs(cls, value):
ensure_unique_names(value)
return value
class ExtractOIIOPostProcessModel(BaseSettingsModel):
"""Process representation images with `oiiotool` on publish.
This could be used to convert images to different formats, convert to
scanline images or flatten deep images.
"""
enabled: bool = SettingsField(True)
profiles: list[ExtractOIIOPostProcessProfileModel] = SettingsField(
default_factory=list, title="Profiles"
)
# --- [START] Extract Review ---
class ExtractReviewFFmpegModel(BaseSettingsModel):
video_filters: list[str] = SettingsField(
@ -1122,6 +1248,10 @@ class PublishPuginsModel(BaseSettingsModel):
default_factory=ExtractOIIOTranscodeModel,
title="Extract OIIO Transcode"
)
ExtractOIIOPostProcess: ExtractOIIOPostProcessModel = SettingsField(
default_factory=ExtractOIIOPostProcessModel,
title="Extract OIIO Post Process"
)
ExtractReview: ExtractReviewModel = SettingsField(
default_factory=ExtractReviewModel,
title="Extract Review"
@ -1134,9 +1264,11 @@ class PublishPuginsModel(BaseSettingsModel):
default_factory=AyonEntityURIModel,
title="Extract USD Asset Contribution",
)
ExtractUSDLayerContribution: AyonEntityURIModel = SettingsField(
default_factory=AyonEntityURIModel,
title="Extract USD Layer Contribution",
ExtractUSDLayerContribution: ExtractUSDLayerContributionModel = (
SettingsField(
default_factory=ExtractUSDLayerContributionModel,
title="Extract USD Layer Contribution",
)
)
PreIntegrateThumbnails: PreIntegrateThumbnailsModel = SettingsField(
default_factory=PreIntegrateThumbnailsModel,
@ -1347,6 +1479,10 @@ DEFAULT_PUBLISH_VALUES = {
"enabled": True,
"profiles": []
},
"ExtractOIIOPostProcess": {
"enabled": True,
"profiles": []
},
"ExtractReview": {
"enabled": True,
"profiles": [
@ -1448,6 +1584,105 @@ DEFAULT_PUBLISH_VALUES = {
"fill_missing_frames": "closest_existing"
}
]
},
{
"product_types": [],
"hosts": ["substancepainter"],
"task_types": [],
"outputs": [
{
"name": "png",
"ext": "png",
"tags": [
"ftrackreview",
"kitsureview",
"webreview"
],
"burnins": [],
"ffmpeg_args": {
"video_filters": [],
"audio_filters": [],
"input": [],
"output": []
},
"filter": {
"families": [
"render",
"review",
"ftrack"
],
"product_names": [],
"custom_tags": [],
"single_frame_filter": "single_frame"
},
"overscan_crop": "",
# "overscan_color": [0, 0, 0],
"overscan_color": [0, 0, 0, 0.0],
"width": 1920,
"height": 1080,
"scale_pixel_aspect": True,
"bg_color": [0, 0, 0, 0.0],
"letter_box": {
"enabled": False,
"ratio": 0.0,
"fill_color": [0, 0, 0, 1.0],
"line_thickness": 0,
"line_color": [255, 0, 0, 1.0]
},
"fill_missing_frames": "only_rendered"
},
{
"name": "h264",
"ext": "mp4",
"tags": [
"burnin",
"ftrackreview",
"kitsureview",
"webreview"
],
"burnins": [],
"ffmpeg_args": {
"video_filters": [],
"audio_filters": [],
"input": [
"-apply_trc gamma22"
],
"output": [
"-pix_fmt yuv420p",
"-crf 18",
"-c:a aac",
"-b:a 192k",
"-g 1",
"-movflags faststart"
]
},
"filter": {
"families": [
"render",
"review",
"ftrack"
],
"product_names": [],
"custom_tags": [],
"single_frame_filter": "multi_frame"
},
"overscan_crop": "",
# "overscan_color": [0, 0, 0],
"overscan_color": [0, 0, 0, 0.0],
"width": 0,
"height": 0,
"scale_pixel_aspect": True,
"bg_color": [0, 0, 0, 0.0],
"letter_box": {
"enabled": False,
"ratio": 0.0,
"fill_color": [0, 0, 0, 1.0],
"line_thickness": 0,
"line_color": [255, 0, 0, 1.0]
},
"fill_missing_frames": "only_rendered"
}
]
}
]
},
@ -1526,6 +1761,7 @@ DEFAULT_PUBLISH_VALUES = {
},
"ExtractUSDLayerContribution": {
"use_ayon_entity_uri": False,
"enforce_default_prim": False,
},
"PreIntegrateThumbnails": {
"enabled": True,