Merge branch 'develop' of https://github.com/ynput/ayon-core into enhancement/transcoding_oiio_tool_for_ffmpeg_one_call

# Conflicts:
#	client/ayon_core/lib/transcoding.py
#	client/ayon_core/plugins/publish/extract_color_transcode.py
This commit is contained in:
Roy Nieterau 2025-06-06 14:50:06 +02:00
commit 6061e8a82b
33 changed files with 1734 additions and 305 deletions

View file

@ -35,6 +35,20 @@ body:
label: Version
description: What version are you running? Look to AYON Tray
options:
- 1.3.2
- 1.3.1
- 1.3.0
- 1.2.0
- 1.1.9
- 1.1.8
- 1.1.7
- 1.1.6
- 1.1.5
- 1.1.4
- 1.1.3
- 1.1.2
- 1.1.1
- 1.1.0
- 1.0.14
- 1.0.13
- 1.0.12

View file

@ -1,10 +1,11 @@
name: 🐞 Update Bug Report
on:
workflow_run:
workflows: ["🚀 Release Trigger"]
types:
- completed
workflow_dispatch:
release:
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#release
types: [published]
jobs:
update-bug-report:

View file

@ -0,0 +1,30 @@
""""Pre launch hook to remove launcher paths from the system."""
import os
from ayon_applications import PreLaunchHook
class PreRemoveLauncherPaths(PreLaunchHook):
"""Remove launcher paths from the system.
This hook is used to remove launcher paths from the system before launching
an application. It is used to ensure that the application is launched with
the correct environment variables. Especially for Windows, where
paths in `PATH` are used to load DLLs. This is important to avoid
conflicts with other applications that may have the same DLLs in their
paths.
"""
order = 1
def execute(self) -> None:
"""Execute the hook."""
# Remove launcher paths from the system
ayon_root = os.path.normpath(os.environ["AYON_ROOT"])
paths = [
path
for path in self.launch_context.env.get(
"PATH", "").split(os.pathsep)
if not os.path.normpath(path).startswith(ayon_root)
]
self.launch_context.env["PATH"] = os.pathsep.join(paths)

View file

@ -1,15 +1,13 @@
import concurrent.futures
import os
import logging
import sys
import errno
from concurrent.futures import ThreadPoolExecutor, Future
from typing import List, Optional
from ayon_core.lib import create_hard_link
# this is needed until speedcopy for linux is fixed
if sys.platform == "win32":
from speedcopy import copyfile
else:
from shutil import copyfile
from speedcopy import copyfile
class DuplicateDestinationError(ValueError):
@ -109,41 +107,52 @@ class FileTransaction:
self._transfers[dst] = (src, opts)
def process(self):
# Backup any existing files
for dst, (src, _) in self._transfers.items():
self.log.debug("Checking file ... {} -> {}".format(src, dst))
path_same = self._same_paths(src, dst)
if path_same or not os.path.exists(dst):
continue
with ThreadPoolExecutor(max_workers=8) as executor:
# Submit backup tasks
backup_futures = [
executor.submit(self._backup_file, dst, src)
for dst, (src, _) in self._transfers.items()
]
wait_for_future_errors(
executor, backup_futures, logger=self.log)
# Backup original file
# todo: add timestamp or uuid to ensure unique
backup = dst + ".bak"
self._backup_to_original[backup] = dst
# Submit transfer tasks
transfer_futures = [
executor.submit(self._transfer_file, dst, src, opts)
for dst, (src, opts) in self._transfers.items()
]
wait_for_future_errors(
executor, transfer_futures, logger=self.log)
def _backup_file(self, dst, src):
self.log.debug(f"Checking file ... {src} -> {dst}")
path_same = self._same_paths(src, dst)
if path_same or not os.path.exists(dst):
return
# Backup original file
backup = dst + ".bak"
self._backup_to_original[backup] = dst
self.log.debug(f"Backup existing file: {dst} -> {backup}")
os.rename(dst, backup)
def _transfer_file(self, dst, src, opts):
path_same = self._same_paths(src, dst)
if path_same:
self.log.debug(
"Backup existing file: {} -> {}".format(dst, backup))
os.rename(dst, backup)
f"Source and destination are same files {src} -> {dst}")
return
# Copy the files to transfer
for dst, (src, opts) in self._transfers.items():
path_same = self._same_paths(src, dst)
if path_same:
self.log.debug(
"Source and destination are same files {} -> {}".format(
src, dst))
continue
self._create_folder_for_file(dst)
self._create_folder_for_file(dst)
if opts["mode"] == self.MODE_COPY:
self.log.debug(f"Copying file ... {src} -> {dst}")
copyfile(src, dst)
elif opts["mode"] == self.MODE_HARDLINK:
self.log.debug(f"Hardlinking file ... {src} -> {dst}")
create_hard_link(src, dst)
if opts["mode"] == self.MODE_COPY:
self.log.debug("Copying file ... {} -> {}".format(src, dst))
copyfile(src, dst)
elif opts["mode"] == self.MODE_HARDLINK:
self.log.debug("Hardlinking file ... {} -> {}".format(
src, dst))
create_hard_link(src, dst)
self._transferred.append(dst)
self._transferred.append(dst)
def finalize(self):
# Delete any backed up files
@ -212,3 +221,46 @@ class FileTransaction:
return os.stat(src) == os.stat(dst)
return src == dst
def wait_for_future_errors(
executor: ThreadPoolExecutor,
futures: List[Future],
logger: Optional[logging.Logger] = None):
"""For the ThreadPoolExecutor shutdown and cancel futures as soon one of
the workers raises an error as they complete.
The ThreadPoolExecutor only cancels pending futures on exception but will
still complete those that are running - each which also themselves could
fail. We log all exceptions but re-raise the last exception only.
"""
if logger is None:
logger = logging.getLogger(__name__)
for future in concurrent.futures.as_completed(futures):
exception = future.exception()
if exception:
# As soon as an error occurs, stop executing more futures.
# Running workers, however, will still be complete, so we also want
# to log those errors if any occurred on them.
executor.shutdown(wait=True, cancel_futures=True)
break
else:
# Futures are completed, no exceptions occurred
return
# An exception occurred in at least one future. Get exceptions from
# all futures that are done and ended up failing until that point.
exceptions = []
for future in futures:
if not future.cancelled() and future.done():
exception = future.exception()
if exception:
exceptions.append(exception)
# Log any exceptions that occurred in all workers
for exception in exceptions:
logger.error("Error occurred in worker", exc_info=exception)
# Raise the last exception
raise exceptions[-1]

View file

@ -567,7 +567,7 @@ def convert_input_paths_for_ffmpeg(
):
"""Convert source file to format supported in ffmpeg.
Currently, can convert only exrs. The input filepaths should be files
Can currently convert only EXRs. The input filepaths should be files
with same type. Information about input is loaded only from first found
file.
@ -594,10 +594,10 @@ def convert_input_paths_for_ffmpeg(
ext = os.path.splitext(first_input_path)[1].lower()
if ext != ".exr":
raise ValueError((
"Function 'convert_for_ffmpeg' currently support only"
" \".exr\" extension. Got \"{}\"."
).format(ext))
raise ValueError(
"Function 'convert_input_paths_for_ffmpeg' currently supports"
f" only \".exr\" extension. Got \"{ext}\"."
)
input_info = get_oiio_info_for_input(first_input_path, logger=logger)

View file

@ -462,8 +462,8 @@ class Anatomy(BaseAnatomy):
Union[Dict[str, str], None]): Local root overrides.
"""
if not project_name:
return
return ayon_api.get_project_roots_for_site(
return None
return ayon_api.get_project_root_overrides_by_site_id(
project_name, get_local_site_id()
)

View file

@ -834,7 +834,7 @@ def _get_global_config_data(
if not product_entities_by_name:
# in case no product was found we need to use fallback
fallback_type = fallback_data["type"]
fallback_type = fallback_data["fallback_type"]
return _get_config_path_from_profile_data(
fallback_data, fallback_type, template_data
)

View file

@ -52,15 +52,15 @@ def get_product_name_template(
# TODO remove formatting keys replacement
template = (
matching_profile["template"]
.replace("{task[name]}", "{task}")
.replace("{Task[name]}", "{Task}")
.replace("{TASK[NAME]}", "{TASK}")
.replace("{product[type]}", "{family}")
.replace("{Product[type]}", "{Family}")
.replace("{PRODUCT[TYPE]}", "{FAMILY}")
.replace("{folder[name]}", "{asset}")
.replace("{Folder[name]}", "{Asset}")
.replace("{FOLDER[NAME]}", "{ASSET}")
.replace("{task}", "{task[name]}")
.replace("{Task}", "{Task[name]}")
.replace("{TASK}", "{TASK[NAME]}")
.replace("{family}", "{product[type]}")
.replace("{Family}", "{Product[type]}")
.replace("{FAMILY}", "{PRODUCT[TYPE]}")
.replace("{asset}", "{folder[name]}")
.replace("{Asset}", "{Folder[name]}")
.replace("{ASSET}", "{FOLDER[NAME]}")
)
# Make sure template is set (matching may have empty string)

View file

@ -221,19 +221,6 @@ class LoaderPlugin(list):
"""
return cls.options or []
@property
def fname(self):
"""Backwards compatibility with deprecation warning"""
self.log.warning((
"DEPRECATION WARNING: Source - Loader plugin {}."
" The 'fname' property on the Loader plugin will be removed in"
" future versions of OpenPype. Planned version to drop the support"
" is 3.16.6 or 3.17.0."
).format(self.__class__.__name__))
if hasattr(self, "_fname"):
return self._fname
@classmethod
def get_representation_name_aliases(cls, representation_name: str):
"""Return representation names to which switching is allowed from

View file

@ -316,12 +316,6 @@ def load_with_repre_context(
)
loader = Loader()
# Backwards compatibility: Originally the loader's __init__ required the
# representation context to set `fname` attribute to the filename to load
# Deprecated - to be removed in OpenPype 3.16.6 or 3.17.0.
loader._fname = get_representation_path_from_context(repre_context)
return loader.load(repre_context, name, namespace, options)

View file

@ -1,11 +1,14 @@
# -*- coding: utf-8 -*-
"""Cleanup leftover files from publish."""
import os
import shutil
import pyblish.api
import re
import shutil
import tempfile
import pyblish.api
from ayon_core.lib import is_in_tests
from ayon_core.pipeline import PublishError
class CleanUp(pyblish.api.InstancePlugin):
@ -48,17 +51,15 @@ class CleanUp(pyblish.api.InstancePlugin):
if is_in_tests():
# let automatic test process clean up temporary data
return
# Get the errored instances
failed = []
# If instance has errors, do not clean up
for result in instance.context.data["results"]:
if (result["error"] is not None and result["instance"] is not None
and result["instance"] not in failed):
failed.append(result["instance"])
assert instance not in failed, (
"Result of '{}' instance were not success".format(
instance.data["name"]
)
)
if result["error"] is not None and result["instance"] is instance:
raise PublishError(
"Result of '{}' instance were not success".format(
instance.data["name"]
)
)
_skip_cleanup_filepaths = instance.context.data.get(
"skipCleanupFilepaths"
@ -71,10 +72,17 @@ class CleanUp(pyblish.api.InstancePlugin):
self.log.debug("Cleaning renders new...")
self.clean_renders(instance, skip_cleanup_filepaths)
if [ef for ef in self.exclude_families
if instance.data["productType"] in ef]:
# TODO: Figure out whether this could be refactored to just a
# product_type in self.exclude_families check.
product_type = instance.data["productType"]
if any(
product_type in exclude_family
for exclude_family in self.exclude_families
):
self.log.debug(
"Skipping cleanup for instance because product "
f"type is excluded from cleanup: {product_type}")
return
import tempfile
temp_root = tempfile.gettempdir()
staging_dir = instance.data.get("stagingDir", None)

View file

@ -0,0 +1,106 @@
import pyblish.api
from ayon_core.lib import EnumDef
from ayon_core.pipeline import publish
from ayon_core.pipeline.publish import PublishError
class CollectExplicitResolution(
pyblish.api.InstancePlugin,
publish.AYONPyblishPluginMixin,
):
"""Collect explicit user defined resolution attributes for instances"""
label = "Choose Explicit Resolution"
order = pyblish.api.CollectorOrder - 0.091
settings_category = "core"
enabled = False
default_resolution_item = (None, "Don't override")
# Settings
product_types = []
options = []
# caching resoluton items
resolution_items = None
def process(self, instance):
"""Process the instance and collect explicit resolution attributes"""
# Get the values from the instance data
values = self.get_attr_values_from_data(instance.data)
resolution_value = values.get("explicit_resolution", None)
if resolution_value is None:
return
# Get the width, height and pixel_aspect from the resolution value
resolution_data = self._get_resolution_values(resolution_value)
# Set the values to the instance data
instance.data.update(resolution_data)
def _get_resolution_values(self, resolution_value):
"""
Returns width, height and pixel_aspect from the resolution value
Arguments:
resolution_value (str): resolution value
Returns:
dict: dictionary with width, height and pixel_aspect
"""
resolution_items = self._get_resolution_items()
# ensure resolution_value is part of expected items
item_values = resolution_items.get(resolution_value)
# if the item is in the cache, get the values from it
if item_values:
return {
"resolutionWidth": item_values["width"],
"resolutionHeight": item_values["height"],
"pixelAspect": item_values["pixel_aspect"],
}
raise PublishError(
f"Invalid resolution value: {resolution_value} "
f"expected choices: {resolution_items}"
)
@classmethod
def _get_resolution_items(cls):
if cls.resolution_items is None:
resolution_items = {}
for item in cls.options:
item_text = (
f"{item['width']}x{item['height']} "
f"({item['pixel_aspect']})"
)
resolution_items[item_text] = item
cls.resolution_items = resolution_items
return cls.resolution_items
@classmethod
def get_attr_defs_for_instance(
cls, create_context, instance,
):
if instance.product_type not in cls.product_types:
return []
# Get the resolution items
resolution_items = cls._get_resolution_items()
items = [cls.default_resolution_item]
# Add all cached resolution items to the dropdown options
for item_text in resolution_items:
items.append((item_text, item_text))
return [
EnumDef(
"explicit_resolution",
items,
default="Don't override",
label="Force product resolution",
),
]

View file

@ -32,16 +32,16 @@ class CollectManagedStagingDir(pyblish.api.InstancePlugin):
label = "Collect Managed Staging Directory"
order = pyblish.api.CollectorOrder + 0.4990
def process(self, instance):
def process(self, instance: pyblish.api.Instance):
""" Collect the staging data and stores it to the instance.
Args:
instance (object): The instance to inspect.
"""
staging_dir_path = get_instance_staging_dir(instance)
persistance = instance.data.get("stagingDir_persistent", False)
persistence: bool = instance.data.get("stagingDir_persistent", False)
self.log.info((
self.log.debug(
f"Instance staging dir was set to `{staging_dir_path}` "
f"and persistence is set to `{persistance}`"
))
f"and persistence is set to `{persistence}`"
)

View file

@ -54,7 +54,7 @@ class ExtractOTIOReview(
# plugin default attributes
to_width = 1280
to_height = 720
output_ext = ".jpg"
output_ext = ".png"
def process(self, instance):
# Not all hosts can import these modules.
@ -510,6 +510,12 @@ class ExtractOTIOReview(
"-tune", "stillimage"
])
if video or sequence:
command.extend([
"-vf", f"scale={self.to_width}:{self.to_height}:flags=lanczos",
"-compression_level", "5",
])
# add output attributes
command.extend([
"-start_number", str(out_frame_start)
@ -520,9 +526,10 @@ class ExtractOTIOReview(
input_extension
and self.output_ext == input_extension
):
command.extend([
"-c", "copy"
])
command.extend(["-c", "copy"])
else:
# For lossy formats, force re-encode
command.extend(["-pix_fmt", "rgba"])
# add output path at the end
command.append(output_path)

View file

@ -5,11 +5,15 @@ import json
import shutil
import subprocess
from abc import ABC, abstractmethod
from typing import Dict, Any, Optional
import tempfile
import clique
import speedcopy
import pyblish.api
from ayon_api import get_last_version_by_product_name, get_representations
from ayon_core.lib import (
get_ffmpeg_tool_args,
filter_profiles,
@ -400,15 +404,73 @@ class ExtractReview(pyblish.api.InstancePlugin):
)
temp_data = self.prepare_temp_data(instance, repre, output_def)
files_to_clean = []
new_frame_files = {}
if temp_data["input_is_sequence"]:
self.log.debug("Checking sequence to fill gaps in sequence..")
files_to_clean = self.fill_sequence_gaps(
files=temp_data["origin_repre"]["files"],
staging_dir=new_repre["stagingDir"],
start_frame=temp_data["frame_start"],
end_frame=temp_data["frame_end"]
)
files = temp_data["origin_repre"]["files"]
collections = clique.assemble(
files,
)[0]
if len(collections) != 1:
raise KnownPublishError(
"Multiple collections {} found.".format(collections))
collection = collections[0]
fill_missing_frames = _output_def["fill_missing_frames"]
if fill_missing_frames == "closest_existing":
new_frame_files = self.fill_sequence_gaps_from_existing(
collection=collection,
staging_dir=new_repre["stagingDir"],
start_frame=temp_data["frame_start"],
end_frame=temp_data["frame_end"],
)
elif fill_missing_frames == "blank":
new_frame_files = self.fill_sequence_gaps_with_blanks(
collection=collection,
staging_dir=new_repre["stagingDir"],
start_frame=temp_data["frame_start"],
end_frame=temp_data["frame_end"],
resolution_width=temp_data["resolution_width"],
resolution_height=temp_data["resolution_height"],
extension=temp_data["input_ext"],
temp_data=temp_data
)
elif fill_missing_frames == "previous_version":
new_frame_files = self.fill_sequence_gaps_with_previous(
collection=collection,
staging_dir=new_repre["stagingDir"],
instance=instance,
current_repre_name=repre["name"],
start_frame=temp_data["frame_start"],
end_frame=temp_data["frame_end"],
)
# fallback to original workflow
if new_frame_files is None:
new_frame_files = (
self.fill_sequence_gaps_from_existing(
collection=collection,
staging_dir=new_repre["stagingDir"],
start_frame=temp_data["frame_start"],
end_frame=temp_data["frame_end"],
))
elif fill_missing_frames == "only_rendered":
temp_data["explicit_input_paths"] = [
os.path.join(
new_repre["stagingDir"], file
).replace("\\", "/")
for file in files
]
frame_start = min(collection.indexes)
frame_end = max(collection.indexes)
# modify range for burnins
instance.data["frameStart"] = frame_start
instance.data["frameEnd"] = frame_end
temp_data["frame_start"] = frame_start
temp_data["frame_end"] = frame_end
temp_data["filled_files"] = new_frame_files
# create or update outputName
output_name = new_repre.get("outputName", "")
@ -465,9 +527,12 @@ class ExtractReview(pyblish.api.InstancePlugin):
run_subprocess(subprcs_cmd, shell=True, logger=self.log)
# delete files added to fill gaps
if files_to_clean:
for f in files_to_clean:
os.unlink(f)
if new_frame_files:
for filepath in new_frame_files.values():
os.unlink(filepath)
for filepath in temp_data["paths_to_remove"]:
os.unlink(filepath)
new_repre.update({
"fps": temp_data["fps"],
@ -560,6 +625,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
input_is_sequence = self.input_is_sequence(repre)
input_allow_bg = False
first_sequence_frame = None
if input_is_sequence and repre["files"]:
# Calculate first frame that should be used
cols, _ = clique.assemble(repre["files"])
@ -578,6 +644,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
ext = os.path.splitext(repre["files"][0])[1].replace(".", "")
if ext.lower() in self.alpha_exts:
input_allow_bg = True
else:
ext = os.path.splitext(repre["files"])[1].replace(".", "")
return {
"fps": float(instance.data["fps"]),
@ -598,7 +666,10 @@ class ExtractReview(pyblish.api.InstancePlugin):
"input_allow_bg": input_allow_bg,
"with_audio": with_audio,
"without_handles": without_handles,
"handles_are_set": handles_are_set
"handles_are_set": handles_are_set,
"input_ext": ext,
"explicit_input_paths": [], # absolute paths to rendered files
"paths_to_remove": []
}
def _ffmpeg_arguments(
@ -680,7 +751,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
if layer_name:
ffmpeg_input_args.extend(["-layer", layer_name])
if temp_data["input_is_sequence"]:
explicit_input_paths = temp_data["explicit_input_paths"]
if temp_data["input_is_sequence"] and not explicit_input_paths:
# Set start frame of input sequence (just frame in filename)
# - definition of input filepath
# - add handle start if output should be without handles
@ -707,7 +779,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
"-to", "{:0.10f}".format(duration_seconds)
])
if temp_data["output_is_sequence"]:
if temp_data["output_is_sequence"] and not explicit_input_paths:
# Set start frame of output sequence (just frame in filename)
# - this is definition of an output
ffmpeg_output_args.extend([
@ -738,10 +810,34 @@ class ExtractReview(pyblish.api.InstancePlugin):
"-frames:v", str(output_frames_len)
])
# Add video/image input path
ffmpeg_input_args.extend([
"-i", path_to_subprocess_arg(temp_data["full_input_path"])
])
if not explicit_input_paths:
# Add video/image input path
ffmpeg_input_args.extend([
"-i", path_to_subprocess_arg(temp_data["full_input_path"])
])
else:
frame_duration = 1 / temp_data["fps"]
explicit_frames_meta = tempfile.NamedTemporaryFile(
mode="w", prefix="explicit_frames", suffix=".txt", delete=False
)
explicit_frames_meta.close()
explicit_frames_path = explicit_frames_meta.name
with open(explicit_frames_path, "w") as fp:
lines = [
f"file '{path}'{os.linesep}duration {frame_duration}"
for path in temp_data["explicit_input_paths"]
]
fp.write("\n".join(lines))
temp_data["paths_to_remove"].append(explicit_frames_path)
# let ffmpeg use only rendered files, might have gaps
ffmpeg_input_args.extend([
"-f", "concat",
"-safe", "0",
"-i", path_to_subprocess_arg(explicit_frames_path),
"-r", str(temp_data["fps"])
])
# Add audio arguments if there are any. Skipped when output are images.
if not temp_data["output_ext_is_image"] and temp_data["with_audio"]:
@ -881,8 +977,159 @@ class ExtractReview(pyblish.api.InstancePlugin):
return all_args
def fill_sequence_gaps(self, files, staging_dir, start_frame, end_frame):
# type: (list, str, int, int) -> list
def fill_sequence_gaps_with_previous(
self,
collection: str,
staging_dir: str,
instance: pyblish.plugin.Instance,
current_repre_name: str,
start_frame: int,
end_frame: int
) -> Optional[Dict[int, str]]:
"""Tries to replace missing frames from ones from last version"""
repre_file_paths = self._get_last_version_files(
instance, current_repre_name)
if repre_file_paths is None:
# issues in getting last version files, falling back
return None
prev_collection = clique.assemble(
repre_file_paths,
patterns=[clique.PATTERNS["frames"]],
minimum_items=1
)[0][0]
prev_col_format = prev_collection.format("{head}{padding}{tail}")
added_files = {}
anatomy = instance.context.data["anatomy"]
col_format = collection.format("{head}{padding}{tail}")
for frame in range(start_frame, end_frame + 1):
if frame in collection.indexes:
continue
hole_fpath = os.path.join(staging_dir, col_format % frame)
previous_version_path = prev_col_format % frame
previous_version_path = anatomy.fill_root(previous_version_path)
if not os.path.exists(previous_version_path):
self.log.warning(
"Missing frame should be replaced from "
f"'{previous_version_path}' but that doesn't exist. "
"Falling back to filling from currently last rendered."
)
return None
self.log.warning(
f"Replacing missing '{hole_fpath}' with "
f"'{previous_version_path}'"
)
speedcopy.copyfile(previous_version_path, hole_fpath)
added_files[frame] = hole_fpath
return added_files
def _get_last_version_files(
self,
instance: pyblish.plugin.Instance,
current_repre_name: str,
):
product_name = instance.data["productName"]
project_name = instance.data["projectEntity"]["name"]
folder_entity = instance.data["folderEntity"]
version_entity = get_last_version_by_product_name(
project_name,
product_name,
folder_entity["id"],
fields={"id"}
)
if not version_entity:
return None
matching_repres = get_representations(
project_name,
version_ids=[version_entity["id"]],
representation_names=[current_repre_name],
fields={"files"}
)
if not matching_repres:
return None
matching_repre = list(matching_repres)[0]
repre_file_paths = [
file_info["path"]
for file_info in matching_repre["files"]
]
return repre_file_paths
def fill_sequence_gaps_with_blanks(
self,
collection: str,
staging_dir: str,
start_frame: int,
end_frame: int,
resolution_width: int,
resolution_height: int,
extension: str,
temp_data: Dict[str, Any]
) -> Optional[Dict[int, str]]:
"""Fills missing files by blank frame."""
blank_frame_path = None
added_files = {}
col_format = collection.format("{head}{padding}{tail}")
for frame in range(start_frame, end_frame + 1):
if frame in collection.indexes:
continue
hole_fpath = os.path.join(staging_dir, col_format % frame)
if blank_frame_path is None:
blank_frame_path = self._create_blank_frame(
staging_dir, extension, resolution_width, resolution_height
)
temp_data["paths_to_remove"].append(blank_frame_path)
speedcopy.copyfile(blank_frame_path, hole_fpath)
added_files[frame] = hole_fpath
return added_files
def _create_blank_frame(
self,
staging_dir,
extension,
resolution_width,
resolution_height
):
blank_frame_path = os.path.join(staging_dir, f"blank.{extension}")
command = get_ffmpeg_tool_args(
"ffmpeg",
"-f", "lavfi",
"-i", "color=c=black:s={}x{}:d=1".format(
resolution_width, resolution_height
),
"-tune", "stillimage",
"-frames:v", "1",
blank_frame_path
)
self.log.debug("Executing: {}".format(" ".join(command)))
output = run_subprocess(
command, logger=self.log
)
self.log.debug("Output: {}".format(output))
return blank_frame_path
def fill_sequence_gaps_from_existing(
self,
collection,
staging_dir: str,
start_frame: int,
end_frame: int
) -> Dict[int, str]:
"""Fill missing files in sequence by duplicating existing ones.
This will take nearest frame file and copy it with so as to fill
@ -890,40 +1137,33 @@ class ExtractReview(pyblish.api.InstancePlugin):
hole ahead.
Args:
files (list): List of representation files.
collection (clique.collection)
staging_dir (str): Path to staging directory.
start_frame (int): Sequence start (no matter what files are there)
end_frame (int): Sequence end (no matter what files are there)
Returns:
list of added files. Those should be cleaned after work
dict[int, str] of added files. Those should be cleaned after work
is done.
Raises:
KnownPublishError: if more than one collection is obtained.
"""
collections = clique.assemble(files)[0]
if len(collections) != 1:
raise KnownPublishError(
"Multiple collections {} found.".format(collections))
col = collections[0]
# Prepare which hole is filled with what frame
# - the frame is filled only with already existing frames
prev_frame = next(iter(col.indexes))
prev_frame = next(iter(collection.indexes))
hole_frame_to_nearest = {}
for frame in range(int(start_frame), int(end_frame) + 1):
if frame in col.indexes:
if frame in collection.indexes:
prev_frame = frame
else:
# Use previous frame as source for hole
hole_frame_to_nearest[frame] = prev_frame
# Calculate paths
added_files = []
col_format = col.format("{head}{padding}{tail}")
added_files = {}
col_format = collection.format("{head}{padding}{tail}")
for hole_frame, src_frame in hole_frame_to_nearest.items():
hole_fpath = os.path.join(staging_dir, col_format % hole_frame)
src_fpath = os.path.join(staging_dir, col_format % src_frame)
@ -932,7 +1172,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
"Missing previously detected file: {}".format(src_fpath))
speedcopy.copyfile(src_fpath, hole_fpath)
added_files.append(hole_fpath)
added_files[hole_frame] = hole_fpath
return added_files
@ -978,6 +1218,14 @@ class ExtractReview(pyblish.api.InstancePlugin):
# Make sure to have full path to one input file
full_input_path_single_file = full_input_path
filled_files = temp_data["filled_files"]
if filled_files:
first_frame, first_file = next(iter(filled_files.items()))
if first_file < full_input_path_single_file:
self.log.warning(f"Using filled frame: '{first_file}'")
full_input_path_single_file = first_file
temp_data["first_sequence_frame"] = first_frame
filename_suffix = output_def["filename_suffix"]
output_ext = output_def.get("ext")

View file

@ -17,7 +17,7 @@ from ayon_core.lib import (
)
from ayon_core.lib.transcoding import convert_colorspace
from ayon_core.lib.transcoding import VIDEO_EXTENSIONS
from ayon_core.lib.transcoding import VIDEO_EXTENSIONS, IMAGE_EXTENSIONS
class ExtractThumbnail(pyblish.api.InstancePlugin):
@ -336,7 +336,8 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
return need_thumb_repres
def _get_filtered_repres(self, instance):
filtered_repres = []
review_repres = []
other_repres = []
src_repres = instance.data.get("representations") or []
for repre in src_repres:
@ -348,17 +349,36 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# to be published locally
continue
if "review" not in tags:
continue
if not repre.get("files"):
self.log.debug((
"Representation \"{}\" doesn't have files. Skipping"
).format(repre["name"]))
continue
filtered_repres.append(repre)
return filtered_repres
if "review" in tags:
review_repres.append(repre)
elif self._is_valid_images_repre(repre):
other_repres.append(repre)
return review_repres + other_repres
def _is_valid_images_repre(self, repre):
"""Check if representation contains valid image files
Args:
repre (dict): representation
Returns:
bool: whether the representation has the valid image content
"""
# Get first file's extension
first_file = repre["files"]
if isinstance(first_file, (list, tuple)):
first_file = first_file[0]
ext = os.path.splitext(first_file)[1].lower()
return ext in IMAGE_EXTENSIONS or ext in VIDEO_EXTENSIONS
def _create_thumbnail_oiio(
self,
@ -486,27 +506,36 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# Set video input attributes
max_int = str(2147483647)
video_data = get_ffprobe_data(video_file_path, logger=self.log)
# Use duration of the individual streams since it is returned with
# higher decimal precision than 'format.duration'. We need this
# more precise value for calculating the correct amount of frames
# for higher FPS ranges or decimal ranges, e.g. 29.97 FPS
duration = max(
float(stream.get("duration", 0))
for stream in video_data["streams"]
if stream.get("codec_type") == "video"
)
cmd_args = [
"-y",
"-ss", str(duration * self.duration_split),
# Get duration or use a safe default (single frame)
duration = 0
for stream in video_data["streams"]:
if stream.get("codec_type") == "video":
stream_duration = float(stream.get("duration", 0))
if stream_duration > duration:
duration = stream_duration
# For very short videos, just use the first frame
# Calculate seek position safely
seek_position = 0.0
# Only use timestamp calculation for videos longer than 0.1 seconds
if duration > 0.1:
seek_position = duration * self.duration_split
# Build command args
cmd_args = []
if seek_position > 0.0:
cmd_args.extend(["-ss", str(seek_position)])
# Add generic ffmpeg commands
cmd_args.extend([
"-i", video_file_path,
"-analyzeduration", max_int,
"-probesize", max_int,
"-frames:v", "1"
]
# add output file path
cmd_args.append(output_thumb_file_path)
"-y",
"-frames:v", "1",
output_thumb_file_path
])
# create ffmpeg command
cmd = get_ffmpeg_tool_args(
@ -517,15 +546,53 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# run subprocess
self.log.debug("Executing: {}".format(" ".join(cmd)))
run_subprocess(cmd, logger=self.log)
self.log.debug(
"Thumbnail created: {}".format(output_thumb_file_path))
return output_thumb_file_path
# Verify the output file was created
if (
os.path.exists(output_thumb_file_path)
and os.path.getsize(output_thumb_file_path) > 0
):
self.log.debug(
"Thumbnail created: {}".format(output_thumb_file_path))
return output_thumb_file_path
self.log.warning("Output file was not created or is empty")
# Try to create thumbnail without offset
# - skip if offset did not happen
if "-ss" not in cmd_args:
return None
self.log.debug("Trying fallback without offset")
# Remove -ss and its value
ss_index = cmd_args.index("-ss")
cmd_args.pop(ss_index) # Remove -ss
cmd_args.pop(ss_index) # Remove the timestamp value
# Create new command and try again
cmd = get_ffmpeg_tool_args("ffmpeg", *cmd_args)
self.log.debug("Fallback command: {}".format(" ".join(cmd)))
run_subprocess(cmd, logger=self.log)
if (
os.path.exists(output_thumb_file_path)
and os.path.getsize(output_thumb_file_path) > 0
):
self.log.debug("Fallback thumbnail created")
return output_thumb_file_path
return None
except RuntimeError as error:
self.log.warning(
"Failed intermediate thumb source using ffmpeg: {}".format(
error)
)
return None
finally:
# Remove output file if is empty
if (
os.path.exists(output_thumb_file_path)
and os.path.getsize(output_thumb_file_path) == 0
):
os.remove(output_thumb_file_path)
def _get_resolution_arg(
self,

View file

@ -1,7 +1,11 @@
import os
import copy
import errno
import itertools
import shutil
from concurrent.futures import ThreadPoolExecutor
from speedcopy import copyfile
import clique
import pyblish.api
@ -13,6 +17,7 @@ from ayon_api.operations import (
from ayon_api.utils import create_entity_id
from ayon_core.lib import create_hard_link, source_hash
from ayon_core.lib.file_transaction import wait_for_future_errors
from ayon_core.pipeline.publish import (
get_publish_template_name,
OptionalPyblishPluginMixin,
@ -415,11 +420,14 @@ class IntegrateHeroVersion(
# Copy(hardlink) paths of source and destination files
# TODO should we *only* create hardlinks?
# TODO should we keep files for deletion until this is successful?
for src_path, dst_path in src_to_dst_file_paths:
self.copy_file(src_path, dst_path)
for src_path, dst_path in other_file_paths_mapping:
self.copy_file(src_path, dst_path)
with ThreadPoolExecutor(max_workers=8) as executor:
futures = [
executor.submit(self.copy_file, src_path, dst_path)
for src_path, dst_path in itertools.chain(
src_to_dst_file_paths, other_file_paths_mapping
)
]
wait_for_future_errors(executor, futures)
# Update prepared representation etity data with files
# and integrate it to server.
@ -648,7 +656,7 @@ class IntegrateHeroVersion(
src_path, dst_path
))
shutil.copy(src_path, dst_path)
copyfile(src_path, dst_path)
def version_from_representations(self, project_name, repres):
for repre in repres:

View file

@ -1,4 +1,5 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass, asdict
from typing import (
Optional,
Dict,
@ -28,6 +29,19 @@ if TYPE_CHECKING:
from .models import CreatorItem, PublishErrorInfo, InstanceItem
@dataclass
class CommentDef:
"""Comment attribute definition."""
minimum_chars_required: int
def to_data(self):
return asdict(self)
@classmethod
def from_data(cls, data):
return cls(**data)
class CardMessageTypes:
standard = None
info = "info"
@ -135,6 +149,17 @@ class AbstractPublisherCommon(ABC):
pass
@abstractmethod
def get_comment_def(self) -> CommentDef:
"""Get comment attribute definition.
This can define how the Comment field should behave, like having
a minimum amount of required characters before being allowed to
publish.
"""
pass
class AbstractPublisherBackend(AbstractPublisherCommon):
@abstractmethod

View file

@ -20,7 +20,8 @@ from .models import (
from .abstract import (
AbstractPublisherBackend,
AbstractPublisherFrontend,
CardMessageTypes
CardMessageTypes,
CommentDef,
)
@ -601,3 +602,17 @@ class PublisherController(
def _start_publish(self, up_validation):
self._publish_model.set_publish_up_validation(up_validation)
self._publish_model.start_publish(wait=True)
def get_comment_def(self) -> CommentDef:
# Take the cached settings from the Create Context
settings = self.get_create_context().get_current_project_settings()
comment_minimum_required_chars: int = (
settings
.get("core", {})
.get("tools", {})
.get("publish", {})
.get("comment_minimum_required_chars", 0)
)
return CommentDef(
minimum_chars_required=comment_minimum_required_chars
)

View file

@ -245,6 +245,13 @@ class PublisherWindow(QtWidgets.QDialog):
show_timer.setInterval(1)
show_timer.timeout.connect(self._on_show_timer)
comment_invalid_timer = QtCore.QTimer()
comment_invalid_timer.setSingleShot(True)
comment_invalid_timer.setInterval(2500)
comment_invalid_timer.timeout.connect(
self._on_comment_invalid_timeout
)
errors_dialog_message_timer = QtCore.QTimer()
errors_dialog_message_timer.setInterval(100)
errors_dialog_message_timer.timeout.connect(
@ -395,6 +402,7 @@ class PublisherWindow(QtWidgets.QDialog):
self._app_event_listener_installed = False
self._show_timer = show_timer
self._comment_invalid_timer = comment_invalid_timer
self._show_counter = 0
self._window_is_visible = False
@ -823,15 +831,45 @@ class PublisherWindow(QtWidgets.QDialog):
self._controller.set_comment(self._comment_input.text())
def _on_validate_clicked(self):
if self._save_changes(False):
if self._validate_comment() and self._save_changes(False):
self._set_publish_comment()
self._controller.validate()
def _on_publish_clicked(self):
if self._save_changes(False):
if self._validate_comment() and self._save_changes(False):
self._set_publish_comment()
self._controller.publish()
def _validate_comment(self) -> bool:
# Validate comment length
comment_def = self._controller.get_comment_def()
char_count = len(self._comment_input.text().strip())
if (
comment_def.minimum_chars_required
and char_count < comment_def.minimum_chars_required
):
self._overlay_object.add_message(
"Please enter a comment of at least "
f"{comment_def.minimum_chars_required} characters",
message_type="error"
)
self._invalidate_comment_field()
return False
return True
def _invalidate_comment_field(self):
self._comment_invalid_timer.start()
self._comment_input.setStyleSheet("border-color: #DD2020")
# Set focus so user can start typing and is pointed towards the field
self._comment_input.setFocus()
self._comment_input.setCursorPosition(
len(self._comment_input.text())
)
def _on_comment_invalid_timeout(self):
# Reset style
self._comment_input.setStyleSheet("")
def _set_footer_enabled(self, enabled):
self._save_btn.setEnabled(True)
self._reset_btn.setEnabled(True)

View file

@ -5,12 +5,32 @@ CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
def get_font_filepath(
font_name: Optional[str] = "MaterialSymbolsOutlined"
font_name: Optional[str] = "MaterialSymbolsOutlined-Regular"
) -> str:
return os.path.join(CURRENT_DIR, f"{font_name}.ttf")
def get_mapping_filepath(
font_name: Optional[str] = "MaterialSymbolsOutlined"
font_name: Optional[str] = "MaterialSymbolsOutlined-Regular"
) -> str:
return os.path.join(CURRENT_DIR, f"{font_name}.json")
def regenerate_mapping():
"""Regenerate the MaterialSymbolsOutlined.json file, assuming
MaterialSymbolsOutlined.codepoints and the TrueType font file have been
updated to support the new symbols.
"""
import json
jfile = get_mapping_filepath()
cpfile = jfile.replace(".json", ".codepoints")
with open(cpfile, "r") as cpf:
codepoints = cpf.read()
mapping = {}
for cp in codepoints.splitlines():
name, code = cp.split()
mapping[name] = int(f"0x{code}", 16)
with open(jfile, "w") as jf:
json.dump(mapping, jf, indent=4)

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring AYON addon 'core' version."""
__version__ = "1.1.8+dev"
__version__ = "1.3.2+dev"

View file

@ -1,14 +1,17 @@
name = "core"
title = "Core"
version = "1.1.8+dev"
version = "1.3.2+dev"
client_dir = "ayon_core"
plugin_for = ["ayon_server"]
ayon_server_version = ">=1.0.3,<2.0.0"
ayon_server_version = ">=1.7.6,<2.0.0"
ayon_launcher_version = ">=1.0.2"
ayon_required_addons = {}
ayon_compatible_addons = {
"ayon_ocio": ">=1.2.1",
"harmony": ">0.4.0",
"fusion": ">=0.3.3",
"openrv": ">=1.0.2",
}

View file

@ -5,7 +5,7 @@
[tool.poetry]
name = "ayon-core"
version = "1.1.8+dev"
version = "1.3.2+dev"
description = ""
authors = ["Ynput Team <team@ynput.io>"]
readme = "README.md"
@ -20,7 +20,7 @@ pytest = "^8.0"
pytest-print = "^1.0"
ayon-python-api = "^1.0"
# linting dependencies
ruff = "^0.3.3"
ruff = "0.11.7"
pre-commit = "^3.6.2"
codespell = "^2.2.6"
semver = "^3.0.2"
@ -41,82 +41,6 @@ pymdown-extensions = "^10.14.3"
mike = "^2.1.3"
mkdocstrings-shell = "^1.0.2"
[tool.ruff]
# Exclude a variety of commonly ignored directories.
exclude = [
".bzr",
".direnv",
".eggs",
".git",
".git-rewrite",
".hg",
".ipynb_checkpoints",
".mypy_cache",
".nox",
".pants.d",
".pyenv",
".pytest_cache",
".pytype",
".ruff_cache",
".svn",
".tox",
".venv",
".vscode",
"__pypackages__",
"_build",
"buck-out",
"build",
"dist",
"node_modules",
"site-packages",
"venv",
"vendor",
"generated",
]
# Same as Black.
line-length = 79
indent-width = 4
# Assume Python 3.9
target-version = "py39"
[tool.ruff.lint]
preview = true
pydocstyle.convention = "google"
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
select = ["E", "F", "W"]
ignore = []
# Allow fix for all enabled rules (when `--fix`) is provided.
fixable = ["ALL"]
unfixable = []
# Allow unused variables when underscore-prefixed.
dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
exclude = [
"client/ayon_core/modules/click_wrap.py",
"client/ayon_core/scripts/slates/__init__.py"
]
[tool.ruff.lint.per-file-ignores]
"client/ayon_core/lib/__init__.py" = ["E402"]
[tool.ruff.format]
# Like Black, use double quotes for strings.
quote-style = "double"
# Like Black, indent with spaces, rather than tabs.
indent-style = "space"
# Like Black, respect magic trailing commas.
skip-magic-trailing-comma = false
# Like Black, automatically detect the appropriate line ending.
line-ending = "auto"
[tool.codespell]
# Ignore words that are not in the dictionary.
ignore-words-list = "ayon,ynput,parms,parm,hda,developpement"
@ -125,7 +49,7 @@ ignore-words-list = "ayon,ynput,parms,parm,hda,developpement"
# Remove with next codespell release (>2.2.6)
ignore-regex = ".*codespell:ignore.*"
skip = "./.*,./package/*,*/vendor/*,*/unreal/integration/*,*/aftereffects/api/extension/js/libs/*"
skip = "./.*,./package/*,*/client/ayon_core/vendor/*"
count = true
quiet-level = 3

86
ruff.toml Normal file
View file

@ -0,0 +1,86 @@
# Exclude a variety of commonly ignored directories.
exclude = [
".bzr",
".direnv",
".eggs",
".git",
".git-rewrite",
".hg",
".ipynb_checkpoints",
".mypy_cache",
".nox",
".pants.d",
".pyenv",
".pytest_cache",
".pytype",
".ruff_cache",
".svn",
".tox",
".venv",
".vscode",
"__pypackages__",
"_build",
"buck-out",
"build",
"dist",
"node_modules",
"site-packages",
"venv",
"vendor",
"generated",
]
# Same as Black.
line-length = 79
indent-width = 4
# Assume Python 3.9
target-version = "py39"
[lint]
preview = true
pydocstyle.convention = "google"
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
select = ["E", "F", "W"]
ignore = []
# Allow fix for all enabled rules (when `--fix`) is provided.
fixable = ["ALL"]
unfixable = []
# Allow unused variables when underscore-prefixed.
dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
exclude = [
"client/ayon_core/scripts/slates/__init__.py"
]
[lint.per-file-ignores]
"client/ayon_core/lib/__init__.py" = ["E402"]
[format]
# Like Black, use double quotes for strings.
quote-style = "double"
# Like Black, indent with spaces, rather than tabs.
indent-style = "space"
# Like Black, respect magic trailing commas.
skip-magic-trailing-comma = false
# Like Black, automatically detect the appropriate line ending.
line-ending = "auto"
# Enable auto-formatting of code examples in docstrings. Markdown,
# reStructuredText code/literal blocks and doctests are all supported.
#
# This is currently disabled by default, but it is planned for this
# to be opt-out in the future.
docstring-code-format = false
# Set the line length limit used when formatting code snippets in
# docstrings.
#
# This only has an effect when the `docstring-code-format` setting is
# enabled.
docstring-code-line-length = "dynamic"

View file

@ -71,6 +71,24 @@ def _fallback_ocio_config_profile_types():
def _ocio_built_in_paths():
return [
{
"value": "{BUILTIN_OCIO_ROOT}/aces_2.0/studio-config-v3.0.0_aces-v2.0_ocio-v2.4.ocio", # noqa: E501
"label": "ACES 2.0 Studio (OCIO v2.4)",
"description": (
"Aces 2.0 Studio OCIO config file. Requires OCIO v2.4.")
},
{
"value": "{BUILTIN_OCIO_ROOT}/aces_1.3/studio-config-v1.0.0_aces-v1.3_ocio-v2.1.ocio", # noqa: E501
"label": "ACES 1.3 Studio (OCIO v2.1)",
"description": (
"Aces 1.3 Studio OCIO config file. Requires OCIO v2.1.")
},
{
"value": "{BUILTIN_OCIO_ROOT}/aces_1.3/studio-config-v1.0.0_aces-v1.3_ocio-v2.0.ocio", # noqa: E501
"label": "ACES 1.3 Studio (OCIO v2)",
"description": (
"Aces 1.3 Studio OCIO config file. Requires OCIO v2.")
},
{
"value": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio",
"label": "ACES 1.2",

View file

@ -1,4 +1,5 @@
from pydantic import validator
from typing import Any
from ayon_server.settings import (
BaseSettingsModel,
@ -7,11 +8,21 @@ from ayon_server.settings import (
normalize_name,
ensure_unique_names,
task_types_enum,
anatomy_template_items_enum
)
from ayon_server.exceptions import BadRequestException
from ayon_server.types import ColorRGBA_uint8
def _handle_missing_frames_enum():
return [
{"value": "closest_existing", "label": "Use closest existing"},
{"value": "blank", "label": "Generate blank frame"},
{"value": "previous_version", "label": "Use previous version"},
{"value": "only_rendered", "label": "Use only rendered"},
]
class EnabledModel(BaseSettingsModel):
enabled: bool = SettingsField(True)
@ -157,6 +168,78 @@ class CollectUSDLayerContributionsModel(BaseSettingsModel):
return value
class ResolutionOptionsModel(BaseSettingsModel):
_layout = "compact"
width: int = SettingsField(
1920,
ge=0,
le=100000,
title="Width",
description=(
"Width resolution number value"),
placeholder="Width"
)
height: int = SettingsField(
1080,
title="Height",
ge=0,
le=100000,
description=(
"Height resolution number value"),
placeholder="Height"
)
pixel_aspect: float = SettingsField(
1.0,
title="Pixel aspect",
ge=0.0,
le=100000.0,
description=(
"Pixel Aspect resolution decimal number value"),
placeholder="Pixel aspect"
)
def ensure_unique_resolution_option(
objects: list[Any], field_name: str | None = None) -> None: # noqa: C901
"""Ensure a list of objects have unique option attributes.
This function checks if the list of objects has unique 'width',
'height' and 'pixel_aspect' properties.
"""
options = set()
for obj in objects:
item_test_text = f"{obj.width}x{obj.height}x{obj.pixel_aspect}"
if item_test_text in options:
raise BadRequestException(
f"Duplicate option '{item_test_text}'")
options.add(item_test_text)
class CollectExplicitResolutionModel(BaseSettingsModel):
enabled: bool = SettingsField(True, title="Enabled")
product_types: list[str] = SettingsField(
default_factory=list,
title="Product types",
description=(
"Only activate the attribute for following product types."
)
)
options: list[ResolutionOptionsModel] = SettingsField(
default_factory=list,
title="Resolution choices",
description=(
"Available resolution choices to be displayed in "
"the publishers attribute."
)
)
@validator("options")
def validate_unique_resolution_options(cls, value):
ensure_unique_resolution_option(value)
return value
class AyonEntityURIModel(BaseSettingsModel):
use_ayon_entity_uri: bool = SettingsField(
title="Use AYON Entity URI",
@ -642,6 +725,12 @@ class ExtractReviewOutputDefModel(BaseSettingsModel):
default_factory=ExtractReviewLetterBox,
title="Letter Box"
)
fill_missing_frames: str = SettingsField(
title="Handle missing frames",
default="closest_existing",
description="How to handle gaps in sequence frame ranges.",
enum_resolver=_handle_missing_frames_enum
)
@validator("name")
def validate_name(cls, value):
@ -889,7 +978,11 @@ class IntegrateANTemplateNameProfileModel(BaseSettingsModel):
default_factory=list,
title="Task names"
)
template_name: str = SettingsField("", title="Template name")
template_name: str = SettingsField(
"",
title="Template name",
enum_resolver=anatomy_template_items_enum(category="publish")
)
class IntegrateHeroTemplateNameProfileModel(BaseSettingsModel):
@ -910,7 +1003,11 @@ class IntegrateHeroTemplateNameProfileModel(BaseSettingsModel):
default_factory=list,
title="Task names"
)
template_name: str = SettingsField("", title="Template name")
template_name: str = SettingsField(
"",
title="Template name",
enum_resolver=anatomy_template_items_enum(category="hero")
)
class IntegrateHeroVersionModel(BaseSettingsModel):
@ -988,6 +1085,10 @@ class PublishPuginsModel(BaseSettingsModel):
title="Collect USD Layer Contributions",
)
)
CollectExplicitResolution: CollectExplicitResolutionModel = SettingsField(
default_factory=CollectExplicitResolutionModel,
title="Collect Explicit Resolution"
)
ValidateEditorialAssetName: ValidateBaseModel = SettingsField(
default_factory=ValidateBaseModel,
title="Validate Editorial Asset Name"
@ -1162,6 +1263,13 @@ DEFAULT_PUBLISH_VALUES = {
},
]
},
"CollectExplicitResolution": {
"enabled": True,
"product_types": [
"shot"
],
"options": []
},
"ValidateEditorialAssetName": {
"enabled": True,
"optional": False,
@ -1279,7 +1387,8 @@ DEFAULT_PUBLISH_VALUES = {
"fill_color": [0, 0, 0, 1.0],
"line_thickness": 0,
"line_color": [255, 0, 0, 1.0]
}
},
"fill_missing_frames": "closest_existing"
},
{
"name": "h264",
@ -1329,7 +1438,8 @@ DEFAULT_PUBLISH_VALUES = {
"fill_color": [0, 0, 0, 1.0],
"line_thickness": 0,
"line_color": [255, 0, 0, 1.0]
}
},
"fill_missing_frames": "closest_existing"
}
]
}

View file

@ -5,6 +5,7 @@ from ayon_server.settings import (
normalize_name,
ensure_unique_names,
task_types_enum,
anatomy_template_items_enum
)
@ -283,7 +284,34 @@ class PublishTemplateNameProfile(BaseSettingsModel):
task_names: list[str] = SettingsField(
default_factory=list, title="Task names"
)
template_name: str = SettingsField("", title="Template name")
template_name: str = SettingsField(
"",
title="Template name",
enum_resolver=anatomy_template_items_enum(category="publish")
)
class HeroTemplateNameProfile(BaseSettingsModel):
_layout = "expanded"
product_types: list[str] = SettingsField(
default_factory=list,
title="Product types"
)
# TODO this should use hosts enum
hosts: list[str] = SettingsField(default_factory=list, title="Hosts")
task_types: list[str] = SettingsField(
default_factory=list,
title="Task types",
enum_resolver=task_types_enum
)
task_names: list[str] = SettingsField(
default_factory=list, title="Task names"
)
template_name: str = SettingsField(
"",
title="Template name",
enum_resolver=anatomy_template_items_enum(category="hero")
)
class CustomStagingDirProfileModel(BaseSettingsModel):
@ -306,7 +334,11 @@ class CustomStagingDirProfileModel(BaseSettingsModel):
custom_staging_dir_persistent: bool = SettingsField(
False, title="Custom Staging Folder Persistent"
)
template_name: str = SettingsField("", title="Template Name")
template_name: str = SettingsField(
"",
title="Template name",
enum_resolver=anatomy_template_items_enum(category="staging")
)
class PublishToolModel(BaseSettingsModel):
@ -314,7 +346,7 @@ class PublishToolModel(BaseSettingsModel):
default_factory=list,
title="Template name profiles"
)
hero_template_name_profiles: list[PublishTemplateNameProfile] = (
hero_template_name_profiles: list[HeroTemplateNameProfile] = (
SettingsField(
default_factory=list,
title="Hero template name profiles"
@ -326,6 +358,14 @@ class PublishToolModel(BaseSettingsModel):
title="Custom Staging Dir Profiles"
)
)
comment_minimum_required_chars: int = SettingsField(
0,
title="Publish comment minimum required characters",
description=(
"Minimum number of characters required in the comment field "
"before the publisher UI is allowed to continue publishing"
)
)
class GlobalToolsModel(BaseSettingsModel):
@ -639,6 +679,7 @@ DEFAULT_TOOLS_VALUES = {
"task_names": [],
"template_name": "simpleUnrealTextureHero"
}
]
],
"comment_minimum_required_chars": 0,
}
}

View file

@ -103,17 +103,18 @@ def test_image_sequence_with_embedded_tc_and_handles_out_of_range():
# 10 head black handles generated from gap (991-1000)
"/path/to/ffmpeg -t 0.4166666666666667 -r 24.0 -f lavfi -i "
"color=c=black:s=1280x720 -tune stillimage -start_number 991 "
"C:/result/output.%04d.jpg",
"-pix_fmt rgba C:/result/output.%04d.png",
# 10 tail black handles generated from gap (1102-1111)
"/path/to/ffmpeg -t 0.4166666666666667 -r 24.0 -f lavfi -i "
"color=c=black:s=1280x720 -tune stillimage -start_number 1102 "
"C:/result/output.%04d.jpg",
"-pix_fmt rgba C:/result/output.%04d.png",
# Report from source exr (1001-1101) with enforce framerate
"/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i "
f"C:\\exr_embedded_tc{os.sep}output.%04d.exr -start_number 1001 "
"C:/result/output.%04d.jpg"
f"C:\\exr_embedded_tc{os.sep}output.%04d.exr "
"-vf scale=1280:720:flags=lanczos -compression_level 5 "
"-start_number 1001 -pix_fmt rgba C:/result/output.%04d.png"
]
assert calls == expected
@ -130,20 +131,23 @@ def test_image_sequence_and_handles_out_of_range():
expected = [
# 5 head black frames generated from gap (991-995)
"/path/to/ffmpeg -t 0.2 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
" -tune stillimage -start_number 991 C:/result/output.%04d.jpg",
"/path/to/ffmpeg -t 0.2 -r 25.0 -f lavfi -i color=c=black:s=1280x720 "
"-tune stillimage -start_number 991 -pix_fmt rgba "
"C:/result/output.%04d.png",
# 9 tail back frames generated from gap (1097-1105)
"/path/to/ffmpeg -t 0.36 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
" -tune stillimage -start_number 1097 C:/result/output.%04d.jpg",
"/path/to/ffmpeg -t 0.36 -r 25.0 -f lavfi -i color=c=black:s=1280x720 "
"-tune stillimage -start_number 1097 -pix_fmt rgba "
"C:/result/output.%04d.png",
# Report from source tiff (996-1096)
# 996-1000 = additional 5 head frames
# 1001-1095 = source range conformed to 25fps
# 1096-1096 = additional 1 tail frames
"/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i "
f"C:\\tif_seq{os.sep}output.%04d.tif -start_number 996"
f" C:/result/output.%04d.jpg"
f"C:\\tif_seq{os.sep}output.%04d.tif "
"-vf scale=1280:720:flags=lanczos -compression_level 5 "
"-start_number 996 -pix_fmt rgba C:/result/output.%04d.png"
]
assert calls == expected
@ -163,8 +167,9 @@ def test_movie_with_embedded_tc_no_gap_handles():
# - first_frame = 14 src - 10 (head tail) = frame 4 = 0.1666s
# - duration = 68fr (source) + 20fr (handles) = 88frames = 3.666s
"/path/to/ffmpeg -ss 0.16666666666666666 -t 3.6666666666666665 "
"-i C:\\data\\qt_embedded_tc.mov -start_number 991 "
"C:/result/output.%04d.jpg"
"-i C:\\data\\qt_embedded_tc.mov -vf scale=1280:720:flags=lanczos "
"-compression_level 5 -start_number 991 -pix_fmt rgba "
"C:/result/output.%04d.png"
]
assert calls == expected
@ -181,12 +186,14 @@ def test_short_movie_head_gap_handles():
expected = [
# 10 head black frames generated from gap (991-1000)
"/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
" -tune stillimage -start_number 991 C:/result/output.%04d.jpg",
" -tune stillimage -start_number 991 -pix_fmt rgba "
"C:/result/output.%04d.png",
# source range + 10 tail frames
# duration = 50fr (source) + 10fr (tail handle) = 60 fr = 2.4s
"/path/to/ffmpeg -ss 0.0 -t 2.4 -i C:\\data\\movie.mp4"
" -start_number 1001 C:/result/output.%04d.jpg"
"/path/to/ffmpeg -ss 0.0 -t 2.4 -i C:\\data\\movie.mp4 -vf "
"scale=1280:720:flags=lanczos -compression_level 5 "
"-start_number 1001 -pix_fmt rgba C:/result/output.%04d.png"
]
assert calls == expected
@ -204,13 +211,14 @@ def test_short_movie_tail_gap_handles():
# 10 tail black frames generated from gap (1067-1076)
"/path/to/ffmpeg -t 0.4166666666666667 -r 24.0 -f lavfi -i "
"color=c=black:s=1280x720 -tune stillimage -start_number 1067 "
"C:/result/output.%04d.jpg",
"-pix_fmt rgba C:/result/output.%04d.png",
# 10 head frames + source range
# duration = 10fr (head handle) + 66fr (source) = 76fr = 3.16s
"/path/to/ffmpeg -ss 1.0416666666666667 -t 3.1666666666666665 -i "
"C:\\data\\qt_no_tc_24fps.mov -start_number 991"
" C:/result/output.%04d.jpg"
"C:\\data\\qt_no_tc_24fps.mov -vf scale=1280:720:flags=lanczos "
"-compression_level 5 -start_number 991 -pix_fmt rgba "
"C:/result/output.%04d.png"
]
assert calls == expected
@ -239,62 +247,75 @@ def test_multiple_review_clips_no_gap():
# 10 head black frames generated from gap (991-1000)
'/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi'
' -i color=c=black:s=1280x720 -tune '
'stillimage -start_number 991 C:/result/output.%04d.jpg',
'stillimage -start_number 991 -pix_fmt rgba C:/result/output.%04d.png',
# Alternance 25fps tiff sequence and 24fps exr sequence
# for 100 frames each
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1001 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1001 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
f'C:\\with_tc{os.sep}output.%04d.exr '
'-start_number 1102 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1102 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1198 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1198 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
f'C:\\with_tc{os.sep}output.%04d.exr '
'-start_number 1299 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1299 -pix_fmt rgba C:/result/output.%04d.png',
# Repeated 25fps tiff sequence multiple times till the end
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1395 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1395 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1496 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1496 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1597 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1597 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1698 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1698 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1799 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1799 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1900 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1900 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 2001 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 2001 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 2102 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 2102 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 2203 C:/result/output.%04d.jpg'
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 2203 -pix_fmt rgba C:/result/output.%04d.png'
]
assert calls == expected
@ -323,15 +344,17 @@ def test_multiple_review_clips_with_gap():
# Gap on review track (12 frames)
'/path/to/ffmpeg -t 0.5 -r 24.0 -f lavfi'
' -i color=c=black:s=1280x720 -tune '
'stillimage -start_number 991 C:/result/output.%04d.jpg',
'stillimage -start_number 991 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
f'C:\\with_tc{os.sep}output.%04d.exr '
'-start_number 1003 C:/result/output.%04d.jpg',
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1003 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
f'C:\\with_tc{os.sep}output.%04d.exr '
'-start_number 1091 C:/result/output.%04d.jpg'
'-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1091 -pix_fmt rgba C:/result/output.%04d.png'
]
assert calls == expected