Merge branch 'develop' into feature/107-webactions-in-launcher-tool

This commit is contained in:
Jakub Trllo 2025-05-21 17:40:56 +02:00 committed by GitHub
commit c88fc655b2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 660 additions and 108 deletions

View file

@ -35,6 +35,7 @@ body:
label: Version
description: What version are you running? Look to AYON Tray
options:
- 1.3.0
- 1.2.0
- 1.1.9
- 1.1.8

View file

@ -834,7 +834,7 @@ def _get_global_config_data(
if not product_entities_by_name:
# in case no product was found we need to use fallback
fallback_type = fallback_data["type"]
fallback_type = fallback_data["fallback_type"]
return _get_config_path_from_profile_data(
fallback_data, fallback_type, template_data
)

View file

@ -52,15 +52,15 @@ def get_product_name_template(
# TODO remove formatting keys replacement
template = (
matching_profile["template"]
.replace("{task[name]}", "{task}")
.replace("{Task[name]}", "{Task}")
.replace("{TASK[NAME]}", "{TASK}")
.replace("{product[type]}", "{family}")
.replace("{Product[type]}", "{Family}")
.replace("{PRODUCT[TYPE]}", "{FAMILY}")
.replace("{folder[name]}", "{asset}")
.replace("{Folder[name]}", "{Asset}")
.replace("{FOLDER[NAME]}", "{ASSET}")
.replace("{task}", "{task[name]}")
.replace("{Task}", "{Task[name]}")
.replace("{TASK}", "{TASK[NAME]}")
.replace("{family}", "{product[type]}")
.replace("{Family}", "{Product[type]}")
.replace("{FAMILY}", "{PRODUCT[TYPE]}")
.replace("{asset}", "{folder[name]}")
.replace("{Asset}", "{Folder[name]}")
.replace("{ASSET}", "{FOLDER[NAME]}")
)
# Make sure template is set (matching may have empty string)

View file

@ -0,0 +1,106 @@
import pyblish.api
from ayon_core.lib import EnumDef
from ayon_core.pipeline import publish
from ayon_core.pipeline.publish import PublishError
class CollectExplicitResolution(
pyblish.api.InstancePlugin,
publish.AYONPyblishPluginMixin,
):
"""Collect explicit user defined resolution attributes for instances"""
label = "Choose Explicit Resolution"
order = pyblish.api.CollectorOrder - 0.091
settings_category = "core"
enabled = False
default_resolution_item = (None, "Don't override")
# Settings
product_types = []
options = []
# caching resoluton items
resolution_items = None
def process(self, instance):
"""Process the instance and collect explicit resolution attributes"""
# Get the values from the instance data
values = self.get_attr_values_from_data(instance.data)
resolution_value = values.get("explicit_resolution", None)
if resolution_value is None:
return
# Get the width, height and pixel_aspect from the resolution value
resolution_data = self._get_resolution_values(resolution_value)
# Set the values to the instance data
instance.data.update(resolution_data)
def _get_resolution_values(self, resolution_value):
"""
Returns width, height and pixel_aspect from the resolution value
Arguments:
resolution_value (str): resolution value
Returns:
dict: dictionary with width, height and pixel_aspect
"""
resolution_items = self._get_resolution_items()
# ensure resolution_value is part of expected items
item_values = resolution_items.get(resolution_value)
# if the item is in the cache, get the values from it
if item_values:
return {
"resolutionWidth": item_values["width"],
"resolutionHeight": item_values["height"],
"pixelAspect": item_values["pixel_aspect"],
}
raise PublishError(
f"Invalid resolution value: {resolution_value} "
f"expected choices: {resolution_items}"
)
@classmethod
def _get_resolution_items(cls):
if cls.resolution_items is None:
resolution_items = {}
for item in cls.options:
item_text = (
f"{item['width']}x{item['height']} "
f"({item['pixel_aspect']})"
)
resolution_items[item_text] = item
cls.resolution_items = resolution_items
return cls.resolution_items
@classmethod
def get_attr_defs_for_instance(
cls, create_context, instance,
):
if instance.product_type not in cls.product_types:
return []
# Get the resolution items
resolution_items = cls._get_resolution_items()
items = [cls.default_resolution_item]
# Add all cached resolution items to the dropdown options
for item_text in resolution_items:
items.append((item_text, item_text))
return [
EnumDef(
"explicit_resolution",
items,
default="Don't override",
label="Force product resolution",
),
]

View file

@ -54,7 +54,7 @@ class ExtractOTIOReview(
# plugin default attributes
to_width = 1280
to_height = 720
output_ext = ".jpg"
output_ext = ".png"
def process(self, instance):
# Not all hosts can import these modules.
@ -510,6 +510,12 @@ class ExtractOTIOReview(
"-tune", "stillimage"
])
if video or sequence:
command.extend([
"-vf", f"scale={self.to_width}:{self.to_height}:flags=lanczos",
"-compression_level", "5",
])
# add output attributes
command.extend([
"-start_number", str(out_frame_start)
@ -520,9 +526,10 @@ class ExtractOTIOReview(
input_extension
and self.output_ext == input_extension
):
command.extend([
"-c", "copy"
])
command.extend(["-c", "copy"])
else:
# For lossy formats, force re-encode
command.extend(["-pix_fmt", "rgba"])
# add output path at the end
command.append(output_path)

View file

@ -5,11 +5,15 @@ import json
import shutil
import subprocess
from abc import ABC, abstractmethod
from typing import Dict, Any, Optional
import tempfile
import clique
import speedcopy
import pyblish.api
from ayon_api import get_last_version_by_product_name, get_representations
from ayon_core.lib import (
get_ffmpeg_tool_args,
filter_profiles,
@ -400,15 +404,73 @@ class ExtractReview(pyblish.api.InstancePlugin):
)
temp_data = self.prepare_temp_data(instance, repre, output_def)
files_to_clean = []
new_frame_files = {}
if temp_data["input_is_sequence"]:
self.log.debug("Checking sequence to fill gaps in sequence..")
files_to_clean = self.fill_sequence_gaps(
files=temp_data["origin_repre"]["files"],
staging_dir=new_repre["stagingDir"],
start_frame=temp_data["frame_start"],
end_frame=temp_data["frame_end"]
)
files = temp_data["origin_repre"]["files"]
collections = clique.assemble(
files,
)[0]
if len(collections) != 1:
raise KnownPublishError(
"Multiple collections {} found.".format(collections))
collection = collections[0]
fill_missing_frames = _output_def["fill_missing_frames"]
if fill_missing_frames == "closest_existing":
new_frame_files = self.fill_sequence_gaps_from_existing(
collection=collection,
staging_dir=new_repre["stagingDir"],
start_frame=temp_data["frame_start"],
end_frame=temp_data["frame_end"],
)
elif fill_missing_frames == "blank":
new_frame_files = self.fill_sequence_gaps_with_blanks(
collection=collection,
staging_dir=new_repre["stagingDir"],
start_frame=temp_data["frame_start"],
end_frame=temp_data["frame_end"],
resolution_width=temp_data["resolution_width"],
resolution_height=temp_data["resolution_height"],
extension=temp_data["input_ext"],
temp_data=temp_data
)
elif fill_missing_frames == "previous_version":
new_frame_files = self.fill_sequence_gaps_with_previous(
collection=collection,
staging_dir=new_repre["stagingDir"],
instance=instance,
current_repre_name=repre["name"],
start_frame=temp_data["frame_start"],
end_frame=temp_data["frame_end"],
)
# fallback to original workflow
if new_frame_files is None:
new_frame_files = (
self.fill_sequence_gaps_from_existing(
collection=collection,
staging_dir=new_repre["stagingDir"],
start_frame=temp_data["frame_start"],
end_frame=temp_data["frame_end"],
))
elif fill_missing_frames == "only_rendered":
temp_data["explicit_input_paths"] = [
os.path.join(
new_repre["stagingDir"], file
).replace("\\", "/")
for file in files
]
frame_start = min(collection.indexes)
frame_end = max(collection.indexes)
# modify range for burnins
instance.data["frameStart"] = frame_start
instance.data["frameEnd"] = frame_end
temp_data["frame_start"] = frame_start
temp_data["frame_end"] = frame_end
temp_data["filled_files"] = new_frame_files
# create or update outputName
output_name = new_repre.get("outputName", "")
@ -465,9 +527,12 @@ class ExtractReview(pyblish.api.InstancePlugin):
run_subprocess(subprcs_cmd, shell=True, logger=self.log)
# delete files added to fill gaps
if files_to_clean:
for f in files_to_clean:
os.unlink(f)
if new_frame_files:
for filepath in new_frame_files.values():
os.unlink(filepath)
for filepath in temp_data["paths_to_remove"]:
os.unlink(filepath)
new_repre.update({
"fps": temp_data["fps"],
@ -560,6 +625,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
input_is_sequence = self.input_is_sequence(repre)
input_allow_bg = False
first_sequence_frame = None
if input_is_sequence and repre["files"]:
# Calculate first frame that should be used
cols, _ = clique.assemble(repre["files"])
@ -578,6 +644,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
ext = os.path.splitext(repre["files"][0])[1].replace(".", "")
if ext.lower() in self.alpha_exts:
input_allow_bg = True
else:
ext = os.path.splitext(repre["files"])[1].replace(".", "")
return {
"fps": float(instance.data["fps"]),
@ -598,7 +666,10 @@ class ExtractReview(pyblish.api.InstancePlugin):
"input_allow_bg": input_allow_bg,
"with_audio": with_audio,
"without_handles": without_handles,
"handles_are_set": handles_are_set
"handles_are_set": handles_are_set,
"input_ext": ext,
"explicit_input_paths": [], # absolute paths to rendered files
"paths_to_remove": []
}
def _ffmpeg_arguments(
@ -680,7 +751,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
if layer_name:
ffmpeg_input_args.extend(["-layer", layer_name])
if temp_data["input_is_sequence"]:
explicit_input_paths = temp_data["explicit_input_paths"]
if temp_data["input_is_sequence"] and not explicit_input_paths:
# Set start frame of input sequence (just frame in filename)
# - definition of input filepath
# - add handle start if output should be without handles
@ -707,7 +779,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
"-to", "{:0.10f}".format(duration_seconds)
])
if temp_data["output_is_sequence"]:
if temp_data["output_is_sequence"] and not explicit_input_paths:
# Set start frame of output sequence (just frame in filename)
# - this is definition of an output
ffmpeg_output_args.extend([
@ -738,10 +810,34 @@ class ExtractReview(pyblish.api.InstancePlugin):
"-frames:v", str(output_frames_len)
])
# Add video/image input path
ffmpeg_input_args.extend([
"-i", path_to_subprocess_arg(temp_data["full_input_path"])
])
if not explicit_input_paths:
# Add video/image input path
ffmpeg_input_args.extend([
"-i", path_to_subprocess_arg(temp_data["full_input_path"])
])
else:
frame_duration = 1 / temp_data["fps"]
explicit_frames_meta = tempfile.NamedTemporaryFile(
mode="w", prefix="explicit_frames", suffix=".txt", delete=False
)
explicit_frames_meta.close()
explicit_frames_path = explicit_frames_meta.name
with open(explicit_frames_path, "w") as fp:
lines = [
f"file '{path}'{os.linesep}duration {frame_duration}"
for path in temp_data["explicit_input_paths"]
]
fp.write("\n".join(lines))
temp_data["paths_to_remove"].append(explicit_frames_path)
# let ffmpeg use only rendered files, might have gaps
ffmpeg_input_args.extend([
"-f", "concat",
"-safe", "0",
"-i", path_to_subprocess_arg(explicit_frames_path),
"-r", str(temp_data["fps"])
])
# Add audio arguments if there are any. Skipped when output are images.
if not temp_data["output_ext_is_image"] and temp_data["with_audio"]:
@ -881,8 +977,159 @@ class ExtractReview(pyblish.api.InstancePlugin):
return all_args
def fill_sequence_gaps(self, files, staging_dir, start_frame, end_frame):
# type: (list, str, int, int) -> list
def fill_sequence_gaps_with_previous(
self,
collection: str,
staging_dir: str,
instance: pyblish.plugin.Instance,
current_repre_name: str,
start_frame: int,
end_frame: int
) -> Optional[Dict[int, str]]:
"""Tries to replace missing frames from ones from last version"""
repre_file_paths = self._get_last_version_files(
instance, current_repre_name)
if repre_file_paths is None:
# issues in getting last version files, falling back
return None
prev_collection = clique.assemble(
repre_file_paths,
patterns=[clique.PATTERNS["frames"]],
minimum_items=1
)[0][0]
prev_col_format = prev_collection.format("{head}{padding}{tail}")
added_files = {}
anatomy = instance.context.data["anatomy"]
col_format = collection.format("{head}{padding}{tail}")
for frame in range(start_frame, end_frame + 1):
if frame in collection.indexes:
continue
hole_fpath = os.path.join(staging_dir, col_format % frame)
previous_version_path = prev_col_format % frame
previous_version_path = anatomy.fill_root(previous_version_path)
if not os.path.exists(previous_version_path):
self.log.warning(
"Missing frame should be replaced from "
f"'{previous_version_path}' but that doesn't exist. "
"Falling back to filling from currently last rendered."
)
return None
self.log.warning(
f"Replacing missing '{hole_fpath}' with "
f"'{previous_version_path}'"
)
speedcopy.copyfile(previous_version_path, hole_fpath)
added_files[frame] = hole_fpath
return added_files
def _get_last_version_files(
self,
instance: pyblish.plugin.Instance,
current_repre_name: str,
):
product_name = instance.data["productName"]
project_name = instance.data["projectEntity"]["name"]
folder_entity = instance.data["folderEntity"]
version_entity = get_last_version_by_product_name(
project_name,
product_name,
folder_entity["id"],
fields={"id"}
)
if not version_entity:
return None
matching_repres = get_representations(
project_name,
version_ids=[version_entity["id"]],
representation_names=[current_repre_name],
fields={"files"}
)
if not matching_repres:
return None
matching_repre = list(matching_repres)[0]
repre_file_paths = [
file_info["path"]
for file_info in matching_repre["files"]
]
return repre_file_paths
def fill_sequence_gaps_with_blanks(
self,
collection: str,
staging_dir: str,
start_frame: int,
end_frame: int,
resolution_width: int,
resolution_height: int,
extension: str,
temp_data: Dict[str, Any]
) -> Optional[Dict[int, str]]:
"""Fills missing files by blank frame."""
blank_frame_path = None
added_files = {}
col_format = collection.format("{head}{padding}{tail}")
for frame in range(start_frame, end_frame + 1):
if frame in collection.indexes:
continue
hole_fpath = os.path.join(staging_dir, col_format % frame)
if blank_frame_path is None:
blank_frame_path = self._create_blank_frame(
staging_dir, extension, resolution_width, resolution_height
)
temp_data["paths_to_remove"].append(blank_frame_path)
speedcopy.copyfile(blank_frame_path, hole_fpath)
added_files[frame] = hole_fpath
return added_files
def _create_blank_frame(
self,
staging_dir,
extension,
resolution_width,
resolution_height
):
blank_frame_path = os.path.join(staging_dir, f"blank.{extension}")
command = get_ffmpeg_tool_args(
"ffmpeg",
"-f", "lavfi",
"-i", "color=c=black:s={}x{}:d=1".format(
resolution_width, resolution_height
),
"-tune", "stillimage",
"-frames:v", "1",
blank_frame_path
)
self.log.debug("Executing: {}".format(" ".join(command)))
output = run_subprocess(
command, logger=self.log
)
self.log.debug("Output: {}".format(output))
return blank_frame_path
def fill_sequence_gaps_from_existing(
self,
collection,
staging_dir: str,
start_frame: int,
end_frame: int
) -> Dict[int, str]:
"""Fill missing files in sequence by duplicating existing ones.
This will take nearest frame file and copy it with so as to fill
@ -890,40 +1137,33 @@ class ExtractReview(pyblish.api.InstancePlugin):
hole ahead.
Args:
files (list): List of representation files.
collection (clique.collection)
staging_dir (str): Path to staging directory.
start_frame (int): Sequence start (no matter what files are there)
end_frame (int): Sequence end (no matter what files are there)
Returns:
list of added files. Those should be cleaned after work
dict[int, str] of added files. Those should be cleaned after work
is done.
Raises:
KnownPublishError: if more than one collection is obtained.
"""
collections = clique.assemble(files)[0]
if len(collections) != 1:
raise KnownPublishError(
"Multiple collections {} found.".format(collections))
col = collections[0]
# Prepare which hole is filled with what frame
# - the frame is filled only with already existing frames
prev_frame = next(iter(col.indexes))
prev_frame = next(iter(collection.indexes))
hole_frame_to_nearest = {}
for frame in range(int(start_frame), int(end_frame) + 1):
if frame in col.indexes:
if frame in collection.indexes:
prev_frame = frame
else:
# Use previous frame as source for hole
hole_frame_to_nearest[frame] = prev_frame
# Calculate paths
added_files = []
col_format = col.format("{head}{padding}{tail}")
added_files = {}
col_format = collection.format("{head}{padding}{tail}")
for hole_frame, src_frame in hole_frame_to_nearest.items():
hole_fpath = os.path.join(staging_dir, col_format % hole_frame)
src_fpath = os.path.join(staging_dir, col_format % src_frame)
@ -932,7 +1172,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
"Missing previously detected file: {}".format(src_fpath))
speedcopy.copyfile(src_fpath, hole_fpath)
added_files.append(hole_fpath)
added_files[hole_frame] = hole_fpath
return added_files
@ -978,6 +1218,14 @@ class ExtractReview(pyblish.api.InstancePlugin):
# Make sure to have full path to one input file
full_input_path_single_file = full_input_path
filled_files = temp_data["filled_files"]
if filled_files:
first_frame, first_file = next(iter(filled_files.items()))
if first_file < full_input_path_single_file:
self.log.warning(f"Using filled frame: '{first_file}'")
full_input_path_single_file = first_file
temp_data["first_sequence_frame"] = first_frame
filename_suffix = output_def["filename_suffix"]
output_ext = output_def.get("ext")

View file

@ -506,27 +506,36 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# Set video input attributes
max_int = str(2147483647)
video_data = get_ffprobe_data(video_file_path, logger=self.log)
# Use duration of the individual streams since it is returned with
# higher decimal precision than 'format.duration'. We need this
# more precise value for calculating the correct amount of frames
# for higher FPS ranges or decimal ranges, e.g. 29.97 FPS
duration = max(
float(stream.get("duration", 0))
for stream in video_data["streams"]
if stream.get("codec_type") == "video"
)
cmd_args = [
"-y",
"-ss", str(duration * self.duration_split),
# Get duration or use a safe default (single frame)
duration = 0
for stream in video_data["streams"]:
if stream.get("codec_type") == "video":
stream_duration = float(stream.get("duration", 0))
if stream_duration > duration:
duration = stream_duration
# For very short videos, just use the first frame
# Calculate seek position safely
seek_position = 0.0
# Only use timestamp calculation for videos longer than 0.1 seconds
if duration > 0.1:
seek_position = duration * self.duration_split
# Build command args
cmd_args = []
if seek_position > 0.0:
cmd_args.extend(["-ss", str(seek_position)])
# Add generic ffmpeg commands
cmd_args.extend([
"-i", video_file_path,
"-analyzeduration", max_int,
"-probesize", max_int,
"-frames:v", "1"
]
# add output file path
cmd_args.append(output_thumb_file_path)
"-y",
"-frames:v", "1",
output_thumb_file_path
])
# create ffmpeg command
cmd = get_ffmpeg_tool_args(
@ -537,15 +546,53 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# run subprocess
self.log.debug("Executing: {}".format(" ".join(cmd)))
run_subprocess(cmd, logger=self.log)
self.log.debug(
"Thumbnail created: {}".format(output_thumb_file_path))
return output_thumb_file_path
# Verify the output file was created
if (
os.path.exists(output_thumb_file_path)
and os.path.getsize(output_thumb_file_path) > 0
):
self.log.debug(
"Thumbnail created: {}".format(output_thumb_file_path))
return output_thumb_file_path
self.log.warning("Output file was not created or is empty")
# Try to create thumbnail without offset
# - skip if offset did not happen
if "-ss" not in cmd_args:
return None
self.log.debug("Trying fallback without offset")
# Remove -ss and its value
ss_index = cmd_args.index("-ss")
cmd_args.pop(ss_index) # Remove -ss
cmd_args.pop(ss_index) # Remove the timestamp value
# Create new command and try again
cmd = get_ffmpeg_tool_args("ffmpeg", *cmd_args)
self.log.debug("Fallback command: {}".format(" ".join(cmd)))
run_subprocess(cmd, logger=self.log)
if (
os.path.exists(output_thumb_file_path)
and os.path.getsize(output_thumb_file_path) > 0
):
self.log.debug("Fallback thumbnail created")
return output_thumb_file_path
return None
except RuntimeError as error:
self.log.warning(
"Failed intermediate thumb source using ffmpeg: {}".format(
error)
)
return None
finally:
# Remove output file if is empty
if (
os.path.exists(output_thumb_file_path)
and os.path.getsize(output_thumb_file_path) == 0
):
os.remove(output_thumb_file_path)
def _get_resolution_arg(
self,

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring AYON addon 'core' version."""
__version__ = "1.2.0+dev"
__version__ = "1.3.0+dev"

View file

@ -1,6 +1,6 @@
name = "core"
title = "Core"
version = "1.2.0+dev"
version = "1.3.0+dev"
client_dir = "ayon_core"
@ -10,6 +10,7 @@ ayon_server_version = ">=1.7.6,<2.0.0"
ayon_launcher_version = ">=1.0.2"
ayon_required_addons = {}
ayon_compatible_addons = {
"ayon_ocio": ">=1.2.1",
"harmony": ">0.4.0",
"fusion": ">=0.3.3",
"openrv": ">=1.0.2",

View file

@ -5,7 +5,7 @@
[tool.poetry]
name = "ayon-core"
version = "1.2.0+dev"
version = "1.3.0+dev"
description = ""
authors = ["Ynput Team <team@ynput.io>"]
readme = "README.md"

View file

@ -71,6 +71,24 @@ def _fallback_ocio_config_profile_types():
def _ocio_built_in_paths():
return [
{
"value": "{BUILTIN_OCIO_ROOT}/aces_2.0/studio-config-v3.0.0_aces-v2.0_ocio-v2.4.ocio", # noqa: E501
"label": "ACES 2.0 Studio (OCIO v2.4)",
"description": (
"Aces 2.0 Studio OCIO config file. Requires OCIO v2.4.")
},
{
"value": "{BUILTIN_OCIO_ROOT}/aces_1.3/studio-config-v1.0.0_aces-v1.3_ocio-v2.1.ocio", # noqa: E501
"label": "ACES 1.3 Studio (OCIO v2.1)",
"description": (
"Aces 1.3 Studio OCIO config file. Requires OCIO v2.1.")
},
{
"value": "{BUILTIN_OCIO_ROOT}/aces_1.3/studio-config-v1.0.0_aces-v1.3_ocio-v2.0.ocio", # noqa: E501
"label": "ACES 1.3 Studio (OCIO v2)",
"description": (
"Aces 1.3 Studio OCIO config file. Requires OCIO v2.")
},
{
"value": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio",
"label": "ACES 1.2",

View file

@ -1,4 +1,5 @@
from pydantic import validator
from typing import Any
from ayon_server.settings import (
BaseSettingsModel,
@ -9,10 +10,19 @@ from ayon_server.settings import (
task_types_enum,
anatomy_template_items_enum
)
from ayon_server.exceptions import BadRequestException
from ayon_server.types import ColorRGBA_uint8
def _handle_missing_frames_enum():
return [
{"value": "closest_existing", "label": "Use closest existing"},
{"value": "blank", "label": "Generate blank frame"},
{"value": "previous_version", "label": "Use previous version"},
{"value": "only_rendered", "label": "Use only rendered"},
]
class EnabledModel(BaseSettingsModel):
enabled: bool = SettingsField(True)
@ -158,6 +168,78 @@ class CollectUSDLayerContributionsModel(BaseSettingsModel):
return value
class ResolutionOptionsModel(BaseSettingsModel):
_layout = "compact"
width: int = SettingsField(
1920,
ge=0,
le=100000,
title="Width",
description=(
"Width resolution number value"),
placeholder="Width"
)
height: int = SettingsField(
1080,
title="Height",
ge=0,
le=100000,
description=(
"Height resolution number value"),
placeholder="Height"
)
pixel_aspect: float = SettingsField(
1.0,
title="Pixel aspect",
ge=0.0,
le=100000.0,
description=(
"Pixel Aspect resolution decimal number value"),
placeholder="Pixel aspect"
)
def ensure_unique_resolution_option(
objects: list[Any], field_name: str | None = None) -> None: # noqa: C901
"""Ensure a list of objects have unique option attributes.
This function checks if the list of objects has unique 'width',
'height' and 'pixel_aspect' properties.
"""
options = set()
for obj in objects:
item_test_text = f"{obj.width}x{obj.height}x{obj.pixel_aspect}"
if item_test_text in options:
raise BadRequestException(
f"Duplicate option '{item_test_text}'")
options.add(item_test_text)
class CollectExplicitResolutionModel(BaseSettingsModel):
enabled: bool = SettingsField(True, title="Enabled")
product_types: list[str] = SettingsField(
default_factory=list,
title="Product types",
description=(
"Only activate the attribute for following product types."
)
)
options: list[ResolutionOptionsModel] = SettingsField(
default_factory=list,
title="Resolution choices",
description=(
"Available resolution choices to be displayed in "
"the publishers attribute."
)
)
@validator("options")
def validate_unique_resolution_options(cls, value):
ensure_unique_resolution_option(value)
return value
class AyonEntityURIModel(BaseSettingsModel):
use_ayon_entity_uri: bool = SettingsField(
title="Use AYON Entity URI",
@ -643,6 +725,12 @@ class ExtractReviewOutputDefModel(BaseSettingsModel):
default_factory=ExtractReviewLetterBox,
title="Letter Box"
)
fill_missing_frames: str = SettingsField(
title="Handle missing frames",
default="closest_existing",
description="How to handle gaps in sequence frame ranges.",
enum_resolver=_handle_missing_frames_enum
)
@validator("name")
def validate_name(cls, value):
@ -997,6 +1085,10 @@ class PublishPuginsModel(BaseSettingsModel):
title="Collect USD Layer Contributions",
)
)
CollectExplicitResolution: CollectExplicitResolutionModel = SettingsField(
default_factory=CollectExplicitResolutionModel,
title="Collect Explicit Resolution"
)
ValidateEditorialAssetName: ValidateBaseModel = SettingsField(
default_factory=ValidateBaseModel,
title="Validate Editorial Asset Name"
@ -1171,6 +1263,13 @@ DEFAULT_PUBLISH_VALUES = {
},
]
},
"CollectExplicitResolution": {
"enabled": True,
"product_types": [
"shot"
],
"options": []
},
"ValidateEditorialAssetName": {
"enabled": True,
"optional": False,
@ -1288,7 +1387,8 @@ DEFAULT_PUBLISH_VALUES = {
"fill_color": [0, 0, 0, 1.0],
"line_thickness": 0,
"line_color": [255, 0, 0, 1.0]
}
},
"fill_missing_frames": "closest_existing"
},
{
"name": "h264",
@ -1338,7 +1438,8 @@ DEFAULT_PUBLISH_VALUES = {
"fill_color": [0, 0, 0, 1.0],
"line_thickness": 0,
"line_color": [255, 0, 0, 1.0]
}
},
"fill_missing_frames": "closest_existing"
}
]
}

View file

@ -103,17 +103,18 @@ def test_image_sequence_with_embedded_tc_and_handles_out_of_range():
# 10 head black handles generated from gap (991-1000)
"/path/to/ffmpeg -t 0.4166666666666667 -r 24.0 -f lavfi -i "
"color=c=black:s=1280x720 -tune stillimage -start_number 991 "
"C:/result/output.%04d.jpg",
"-pix_fmt rgba C:/result/output.%04d.png",
# 10 tail black handles generated from gap (1102-1111)
"/path/to/ffmpeg -t 0.4166666666666667 -r 24.0 -f lavfi -i "
"color=c=black:s=1280x720 -tune stillimage -start_number 1102 "
"C:/result/output.%04d.jpg",
"-pix_fmt rgba C:/result/output.%04d.png",
# Report from source exr (1001-1101) with enforce framerate
"/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i "
f"C:\\exr_embedded_tc{os.sep}output.%04d.exr -start_number 1001 "
"C:/result/output.%04d.jpg"
f"C:\\exr_embedded_tc{os.sep}output.%04d.exr "
"-vf scale=1280:720:flags=lanczos -compression_level 5 "
"-start_number 1001 -pix_fmt rgba C:/result/output.%04d.png"
]
assert calls == expected
@ -130,20 +131,23 @@ def test_image_sequence_and_handles_out_of_range():
expected = [
# 5 head black frames generated from gap (991-995)
"/path/to/ffmpeg -t 0.2 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
" -tune stillimage -start_number 991 C:/result/output.%04d.jpg",
"/path/to/ffmpeg -t 0.2 -r 25.0 -f lavfi -i color=c=black:s=1280x720 "
"-tune stillimage -start_number 991 -pix_fmt rgba "
"C:/result/output.%04d.png",
# 9 tail back frames generated from gap (1097-1105)
"/path/to/ffmpeg -t 0.36 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
" -tune stillimage -start_number 1097 C:/result/output.%04d.jpg",
"/path/to/ffmpeg -t 0.36 -r 25.0 -f lavfi -i color=c=black:s=1280x720 "
"-tune stillimage -start_number 1097 -pix_fmt rgba "
"C:/result/output.%04d.png",
# Report from source tiff (996-1096)
# 996-1000 = additional 5 head frames
# 1001-1095 = source range conformed to 25fps
# 1096-1096 = additional 1 tail frames
"/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i "
f"C:\\tif_seq{os.sep}output.%04d.tif -start_number 996"
f" C:/result/output.%04d.jpg"
f"C:\\tif_seq{os.sep}output.%04d.tif "
"-vf scale=1280:720:flags=lanczos -compression_level 5 "
"-start_number 996 -pix_fmt rgba C:/result/output.%04d.png"
]
assert calls == expected
@ -163,8 +167,9 @@ def test_movie_with_embedded_tc_no_gap_handles():
# - first_frame = 14 src - 10 (head tail) = frame 4 = 0.1666s
# - duration = 68fr (source) + 20fr (handles) = 88frames = 3.666s
"/path/to/ffmpeg -ss 0.16666666666666666 -t 3.6666666666666665 "
"-i C:\\data\\qt_embedded_tc.mov -start_number 991 "
"C:/result/output.%04d.jpg"
"-i C:\\data\\qt_embedded_tc.mov -vf scale=1280:720:flags=lanczos "
"-compression_level 5 -start_number 991 -pix_fmt rgba "
"C:/result/output.%04d.png"
]
assert calls == expected
@ -181,12 +186,14 @@ def test_short_movie_head_gap_handles():
expected = [
# 10 head black frames generated from gap (991-1000)
"/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
" -tune stillimage -start_number 991 C:/result/output.%04d.jpg",
" -tune stillimage -start_number 991 -pix_fmt rgba "
"C:/result/output.%04d.png",
# source range + 10 tail frames
# duration = 50fr (source) + 10fr (tail handle) = 60 fr = 2.4s
"/path/to/ffmpeg -ss 0.0 -t 2.4 -i C:\\data\\movie.mp4"
" -start_number 1001 C:/result/output.%04d.jpg"
"/path/to/ffmpeg -ss 0.0 -t 2.4 -i C:\\data\\movie.mp4 -vf "
"scale=1280:720:flags=lanczos -compression_level 5 "
"-start_number 1001 -pix_fmt rgba C:/result/output.%04d.png"
]
assert calls == expected
@ -204,13 +211,14 @@ def test_short_movie_tail_gap_handles():
# 10 tail black frames generated from gap (1067-1076)
"/path/to/ffmpeg -t 0.4166666666666667 -r 24.0 -f lavfi -i "
"color=c=black:s=1280x720 -tune stillimage -start_number 1067 "
"C:/result/output.%04d.jpg",
"-pix_fmt rgba C:/result/output.%04d.png",
# 10 head frames + source range
# duration = 10fr (head handle) + 66fr (source) = 76fr = 3.16s
"/path/to/ffmpeg -ss 1.0416666666666667 -t 3.1666666666666665 -i "
"C:\\data\\qt_no_tc_24fps.mov -start_number 991"
" C:/result/output.%04d.jpg"
"C:\\data\\qt_no_tc_24fps.mov -vf scale=1280:720:flags=lanczos "
"-compression_level 5 -start_number 991 -pix_fmt rgba "
"C:/result/output.%04d.png"
]
assert calls == expected
@ -239,62 +247,75 @@ def test_multiple_review_clips_no_gap():
# 10 head black frames generated from gap (991-1000)
'/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi'
' -i color=c=black:s=1280x720 -tune '
'stillimage -start_number 991 C:/result/output.%04d.jpg',
'stillimage -start_number 991 -pix_fmt rgba C:/result/output.%04d.png',
# Alternance 25fps tiff sequence and 24fps exr sequence
# for 100 frames each
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1001 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1001 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
f'C:\\with_tc{os.sep}output.%04d.exr '
'-start_number 1102 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1102 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1198 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1198 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
f'C:\\with_tc{os.sep}output.%04d.exr '
'-start_number 1299 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1299 -pix_fmt rgba C:/result/output.%04d.png',
# Repeated 25fps tiff sequence multiple times till the end
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1395 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1395 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1496 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1496 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1597 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1597 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1698 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1698 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1799 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1799 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1900 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1900 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 2001 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 2001 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 2102 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 2102 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 2203 C:/result/output.%04d.jpg'
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 2203 -pix_fmt rgba C:/result/output.%04d.png'
]
assert calls == expected
@ -323,15 +344,17 @@ def test_multiple_review_clips_with_gap():
# Gap on review track (12 frames)
'/path/to/ffmpeg -t 0.5 -r 24.0 -f lavfi'
' -i color=c=black:s=1280x720 -tune '
'stillimage -start_number 991 C:/result/output.%04d.jpg',
'stillimage -start_number 991 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
f'C:\\with_tc{os.sep}output.%04d.exr '
'-start_number 1003 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1003 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
f'C:\\with_tc{os.sep}output.%04d.exr '
'-start_number 1091 C:/result/output.%04d.jpg'
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1091 -pix_fmt rgba C:/result/output.%04d.png'
]
assert calls == expected