Merge remote-tracking branch 'origin/develop' into feature/911-new-traits-based-integrator

This commit is contained in:
Ondřej Samohel 2025-05-12 17:15:32 +02:00
commit 371bd0eb2a
No known key found for this signature in database
GPG key ID: 02376E18990A97C6
24 changed files with 547 additions and 339 deletions

View file

@ -35,6 +35,17 @@ body:
label: Version label: Version
description: What version are you running? Look to AYON Tray description: What version are you running? Look to AYON Tray
options: options:
- 1.2.0
- 1.1.9
- 1.1.8
- 1.1.7
- 1.1.6
- 1.1.5
- 1.1.4
- 1.1.3
- 1.1.2
- 1.1.1
- 1.1.0
- 1.0.14 - 1.0.14
- 1.0.13 - 1.0.13
- 1.0.12 - 1.0.12

View file

@ -1,10 +1,11 @@
name: 🐞 Update Bug Report name: 🐞 Update Bug Report
on: on:
workflow_run:
workflows: ["🚀 Release Trigger"]
types:
- completed
workflow_dispatch: workflow_dispatch:
release:
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#release
types: [published]
jobs: jobs:
update-bug-report: update-bug-report:

View file

@ -0,0 +1,30 @@
""""Pre launch hook to remove launcher paths from the system."""
import os
from ayon_applications import PreLaunchHook
class PreRemoveLauncherPaths(PreLaunchHook):
"""Remove launcher paths from the system.
This hook is used to remove launcher paths from the system before launching
an application. It is used to ensure that the application is launched with
the correct environment variables. Especially for Windows, where
paths in `PATH` are used to load DLLs. This is important to avoid
conflicts with other applications that may have the same DLLs in their
paths.
"""
order = 1
def execute(self) -> None:
"""Execute the hook."""
# Remove launcher paths from the system
ayon_root = os.path.normpath(os.environ["AYON_ROOT"])
paths = [
path
for path in self.launch_context.env.get(
"PATH", "").split(os.pathsep)
if not os.path.normpath(path).startswith(ayon_root)
]
self.launch_context.env["PATH"] = os.pathsep.join(paths)

View file

@ -98,7 +98,6 @@ from .profiles_filtering import (
from .transcoding import ( from .transcoding import (
get_transcode_temp_directory, get_transcode_temp_directory,
should_convert_for_ffmpeg, should_convert_for_ffmpeg,
convert_for_ffmpeg,
convert_input_paths_for_ffmpeg, convert_input_paths_for_ffmpeg,
get_ffprobe_data, get_ffprobe_data,
get_ffprobe_streams, get_ffprobe_streams,
@ -198,7 +197,6 @@ __all__ = [
"get_transcode_temp_directory", "get_transcode_temp_directory",
"should_convert_for_ffmpeg", "should_convert_for_ffmpeg",
"convert_for_ffmpeg",
"convert_input_paths_for_ffmpeg", "convert_input_paths_for_ffmpeg",
"get_ffprobe_data", "get_ffprobe_data",
"get_ffprobe_streams", "get_ffprobe_streams",

View file

@ -526,137 +526,6 @@ def should_convert_for_ffmpeg(src_filepath):
return False return False
# Deprecated since 2022 4 20
# - Reason - Doesn't convert sequences right way: Can't handle gaps, reuse
# first frame for all frames and changes filenames when input
# is sequence.
# - use 'convert_input_paths_for_ffmpeg' instead
def convert_for_ffmpeg(
first_input_path,
output_dir,
input_frame_start=None,
input_frame_end=None,
logger=None
):
"""Convert source file to format supported in ffmpeg.
Currently can convert only exrs.
Args:
first_input_path (str): Path to first file of a sequence or a single
file path for non-sequential input.
output_dir (str): Path to directory where output will be rendered.
Must not be same as input's directory.
input_frame_start (int): Frame start of input.
input_frame_end (int): Frame end of input.
logger (logging.Logger): Logger used for logging.
Raises:
ValueError: If input filepath has extension not supported by function.
Currently is supported only ".exr" extension.
"""
if logger is None:
logger = logging.getLogger(__name__)
logger.warning((
"DEPRECATED: 'ayon_core.lib.transcoding.convert_for_ffmpeg' is"
" deprecated function of conversion for FFMpeg. Please replace usage"
" with 'ayon_core.lib.transcoding.convert_input_paths_for_ffmpeg'"
))
ext = os.path.splitext(first_input_path)[1].lower()
if ext != ".exr":
raise ValueError((
"Function 'convert_for_ffmpeg' currently support only"
" \".exr\" extension. Got \"{}\"."
).format(ext))
is_sequence = False
if input_frame_start is not None and input_frame_end is not None:
is_sequence = int(input_frame_end) != int(input_frame_start)
input_info = get_oiio_info_for_input(first_input_path, logger=logger)
# Change compression only if source compression is "dwaa" or "dwab"
# - they're not supported in ffmpeg
compression = input_info["attribs"].get("compression")
if compression in ("dwaa", "dwab"):
compression = "none"
# Prepare subprocess arguments
oiio_cmd = get_oiio_tool_args(
"oiiotool",
# Don't add any additional attributes
"--nosoftwareattrib",
)
# Add input compression if available
if compression:
oiio_cmd.extend(["--compression", compression])
# Collect channels to export
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
oiio_cmd.extend([
input_arg, first_input_path,
# Tell oiiotool which channels should be put to top stack (and output)
"--ch", channels_arg,
# Use first subimage
"--subimage", "0"
])
# Add frame definitions to arguments
if is_sequence:
oiio_cmd.extend([
"--frames", "{}-{}".format(input_frame_start, input_frame_end)
])
for attr_name, attr_value in input_info["attribs"].items():
if not isinstance(attr_value, str):
continue
# Remove attributes that have string value longer than allowed length
# for ffmpeg or when contain prohibited symbols
erase_reason = "Missing reason"
erase_attribute = False
if len(attr_value) > MAX_FFMPEG_STRING_LEN:
erase_reason = "has too long value ({} chars).".format(
len(attr_value)
)
erase_attribute = True
if not erase_attribute:
for char in NOT_ALLOWED_FFMPEG_CHARS:
if char in attr_value:
erase_attribute = True
erase_reason = (
"contains unsupported character \"{}\"."
).format(char)
break
if erase_attribute:
# Set attribute to empty string
logger.info((
"Removed attribute \"{}\" from metadata because {}."
).format(attr_name, erase_reason))
oiio_cmd.extend(["--eraseattrib", attr_name])
# Add last argument - path to output
if is_sequence:
ext = os.path.splitext(first_input_path)[1]
base_filename = "tmp.%{:0>2}d{}".format(
len(str(input_frame_end)), ext
)
else:
base_filename = os.path.basename(first_input_path)
output_path = os.path.join(output_dir, base_filename)
oiio_cmd.extend([
"-o", output_path
])
logger.debug("Conversion command: {}".format(" ".join(oiio_cmd)))
run_subprocess(oiio_cmd, logger=logger)
def convert_input_paths_for_ffmpeg( def convert_input_paths_for_ffmpeg(
input_paths, input_paths,
output_dir, output_dir,
@ -664,7 +533,7 @@ def convert_input_paths_for_ffmpeg(
): ):
"""Convert source file to format supported in ffmpeg. """Convert source file to format supported in ffmpeg.
Currently can convert only exrs. The input filepaths should be files Can currently convert only EXRs. The input filepaths should be files
with same type. Information about input is loaded only from first found with same type. Information about input is loaded only from first found
file. file.
@ -691,10 +560,10 @@ def convert_input_paths_for_ffmpeg(
ext = os.path.splitext(first_input_path)[1].lower() ext = os.path.splitext(first_input_path)[1].lower()
if ext != ".exr": if ext != ".exr":
raise ValueError(( raise ValueError(
"Function 'convert_for_ffmpeg' currently support only" "Function 'convert_input_paths_for_ffmpeg' currently supports"
" \".exr\" extension. Got \"{}\"." f" only \".exr\" extension. Got \"{ext}\"."
).format(ext)) )
input_info = get_oiio_info_for_input(first_input_path, logger=logger) input_info = get_oiio_info_for_input(first_input_path, logger=logger)

View file

@ -52,15 +52,15 @@ def get_product_name_template(
# TODO remove formatting keys replacement # TODO remove formatting keys replacement
template = ( template = (
matching_profile["template"] matching_profile["template"]
.replace("{task[name]}", "{task}") .replace("{task}", "{task[name]}")
.replace("{Task[name]}", "{Task}") .replace("{Task}", "{Task[name]}")
.replace("{TASK[NAME]}", "{TASK}") .replace("{TASK}", "{TASK[NAME]}")
.replace("{product[type]}", "{family}") .replace("{family}", "{product[type]}")
.replace("{Product[type]}", "{Family}") .replace("{Family}", "{Product[type]}")
.replace("{PRODUCT[TYPE]}", "{FAMILY}") .replace("{FAMILY}", "{PRODUCT[TYPE]}")
.replace("{folder[name]}", "{asset}") .replace("{asset}", "{folder[name]}")
.replace("{Folder[name]}", "{Asset}") .replace("{Asset}", "{Folder[name]}")
.replace("{FOLDER[NAME]}", "{ASSET}") .replace("{ASSET}", "{FOLDER[NAME]}")
) )
# Make sure template is set (matching may have empty string) # Make sure template is set (matching may have empty string)

View file

@ -660,14 +660,6 @@ def _get_legacy_product_name_and_group(
warnings.warn("Using legacy product name for renders", warnings.warn("Using legacy product name for renders",
DeprecationWarning) DeprecationWarning)
if not source_product_name.startswith(product_type):
resulting_group_name = '{}{}{}{}{}'.format(
product_type,
task_name[0].upper(), task_name[1:],
source_product_name[0].upper(), source_product_name[1:])
else:
resulting_group_name = source_product_name
# create product name `<product type><Task><Product name>` # create product name `<product type><Task><Product name>`
if not source_product_name.startswith(product_type): if not source_product_name.startswith(product_type):
resulting_group_name = '{}{}{}{}{}'.format( resulting_group_name = '{}{}{}{}{}'.format(

View file

@ -221,19 +221,6 @@ class LoaderPlugin(list):
""" """
return cls.options or [] return cls.options or []
@property
def fname(self):
"""Backwards compatibility with deprecation warning"""
self.log.warning((
"DEPRECATION WARNING: Source - Loader plugin {}."
" The 'fname' property on the Loader plugin will be removed in"
" future versions of OpenPype. Planned version to drop the support"
" is 3.16.6 or 3.17.0."
).format(self.__class__.__name__))
if hasattr(self, "_fname"):
return self._fname
@classmethod @classmethod
def get_representation_name_aliases(cls, representation_name: str): def get_representation_name_aliases(cls, representation_name: str):
"""Return representation names to which switching is allowed from """Return representation names to which switching is allowed from

View file

@ -316,12 +316,6 @@ def load_with_repre_context(
) )
loader = Loader() loader = Loader()
# Backwards compatibility: Originally the loader's __init__ required the
# representation context to set `fname` attribute to the filename to load
# Deprecated - to be removed in OpenPype 3.16.6 or 3.17.0.
loader._fname = get_representation_path_from_context(repre_context)
return loader.load(repre_context, name, namespace, options) return loader.load(repre_context, name, namespace, options)

View file

@ -0,0 +1,106 @@
import pyblish.api
from ayon_core.lib import EnumDef
from ayon_core.pipeline import publish
from ayon_core.pipeline.publish import PublishError
class CollectExplicitResolution(
pyblish.api.InstancePlugin,
publish.AYONPyblishPluginMixin,
):
"""Collect explicit user defined resolution attributes for instances"""
label = "Choose Explicit Resolution"
order = pyblish.api.CollectorOrder - 0.091
settings_category = "core"
enabled = False
default_resolution_item = (None, "Don't override")
# Settings
product_types = []
options = []
# caching resoluton items
resolution_items = None
def process(self, instance):
"""Process the instance and collect explicit resolution attributes"""
# Get the values from the instance data
values = self.get_attr_values_from_data(instance.data)
resolution_value = values.get("explicit_resolution", None)
if resolution_value is None:
return
# Get the width, height and pixel_aspect from the resolution value
resolution_data = self._get_resolution_values(resolution_value)
# Set the values to the instance data
instance.data.update(resolution_data)
def _get_resolution_values(self, resolution_value):
"""
Returns width, height and pixel_aspect from the resolution value
Arguments:
resolution_value (str): resolution value
Returns:
dict: dictionary with width, height and pixel_aspect
"""
resolution_items = self._get_resolution_items()
# ensure resolution_value is part of expected items
item_values = resolution_items.get(resolution_value)
# if the item is in the cache, get the values from it
if item_values:
return {
"resolutionWidth": item_values["width"],
"resolutionHeight": item_values["height"],
"pixelAspect": item_values["pixel_aspect"],
}
raise PublishError(
f"Invalid resolution value: {resolution_value} "
f"expected choices: {resolution_items}"
)
@classmethod
def _get_resolution_items(cls):
if cls.resolution_items is None:
resolution_items = {}
for item in cls.options:
item_text = (
f"{item['width']}x{item['height']} "
f"({item['pixel_aspect']})"
)
resolution_items[item_text] = item
cls.resolution_items = resolution_items
return cls.resolution_items
@classmethod
def get_attr_defs_for_instance(
cls, create_context, instance,
):
if instance.product_type not in cls.product_types:
return []
# Get the resolution items
resolution_items = cls._get_resolution_items()
items = [cls.default_resolution_item]
# Add all cached resolution items to the dropdown options
for item_text in resolution_items:
items.append((item_text, item_text))
return [
EnumDef(
"explicit_resolution",
items,
default="Don't override",
label="Force product resolution",
),
]

View file

@ -31,6 +31,9 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
# Keep "filesequence" for backwards compatibility of older jobs # Keep "filesequence" for backwards compatibility of older jobs
targets = ["filesequence", "farm"] targets = ["filesequence", "farm"]
label = "Collect rendered frames" label = "Collect rendered frames"
settings_category = "core"
remove_files = False
_context = None _context = None
@ -120,7 +123,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
self._fill_staging_dir(repre_data, anatomy) self._fill_staging_dir(repre_data, anatomy)
representations.append(repre_data) representations.append(repre_data)
if not staging_dir_persistent: if self.remove_files and not staging_dir_persistent:
add_repre_files_for_cleanup(instance, repre_data) add_repre_files_for_cleanup(instance, repre_data)
instance.data["representations"] = representations instance.data["representations"] = representations
@ -170,7 +173,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
os.environ.update(session_data) os.environ.update(session_data)
staging_dir_persistent = self._process_path(data, anatomy) staging_dir_persistent = self._process_path(data, anatomy)
if not staging_dir_persistent: if self.remove_files and not staging_dir_persistent:
context.data["cleanupFullPaths"].append(path) context.data["cleanupFullPaths"].append(path)
context.data["cleanupEmptyDirs"].append( context.data["cleanupEmptyDirs"].append(
os.path.dirname(path) os.path.dirname(path)

View file

@ -280,7 +280,7 @@ class ExtractOIIOTranscode(publish.Extractor):
collection = collections[0] collection = collections[0]
frames = list(collection.indexes) frames = list(collection.indexes)
if collection.holes(): if collection.holes().indexes:
return files_to_convert return files_to_convert
frame_str = "{}-{}#".format(frames[0], frames[-1]) frame_str = "{}-{}#".format(frames[0], frames[-1])

View file

@ -54,7 +54,7 @@ class ExtractOTIOReview(
# plugin default attributes # plugin default attributes
to_width = 1280 to_width = 1280
to_height = 720 to_height = 720
output_ext = ".jpg" output_ext = ".png"
def process(self, instance): def process(self, instance):
# Not all hosts can import these modules. # Not all hosts can import these modules.
@ -510,6 +510,12 @@ class ExtractOTIOReview(
"-tune", "stillimage" "-tune", "stillimage"
]) ])
if video or sequence:
command.extend([
"-vf", f"scale={self.to_width}:{self.to_height}:flags=lanczos",
"-compression_level", "5",
])
# add output attributes # add output attributes
command.extend([ command.extend([
"-start_number", str(out_frame_start) "-start_number", str(out_frame_start)
@ -520,9 +526,10 @@ class ExtractOTIOReview(
input_extension input_extension
and self.output_ext == input_extension and self.output_ext == input_extension
): ):
command.extend([ command.extend(["-c", "copy"])
"-c", "copy" else:
]) # For lossy formats, force re-encode
command.extend(["-pix_fmt", "rgba"])
# add output path at the end # add output path at the end
command.append(output_path) command.append(output_path)

View file

@ -17,7 +17,7 @@ from ayon_core.lib import (
) )
from ayon_core.lib.transcoding import convert_colorspace from ayon_core.lib.transcoding import convert_colorspace
from ayon_core.lib.transcoding import VIDEO_EXTENSIONS from ayon_core.lib.transcoding import VIDEO_EXTENSIONS, IMAGE_EXTENSIONS
class ExtractThumbnail(pyblish.api.InstancePlugin): class ExtractThumbnail(pyblish.api.InstancePlugin):
@ -336,7 +336,8 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
return need_thumb_repres return need_thumb_repres
def _get_filtered_repres(self, instance): def _get_filtered_repres(self, instance):
filtered_repres = [] review_repres = []
other_repres = []
src_repres = instance.data.get("representations") or [] src_repres = instance.data.get("representations") or []
for repre in src_repres: for repre in src_repres:
@ -348,17 +349,36 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# to be published locally # to be published locally
continue continue
if "review" not in tags:
continue
if not repre.get("files"): if not repre.get("files"):
self.log.debug(( self.log.debug((
"Representation \"{}\" doesn't have files. Skipping" "Representation \"{}\" doesn't have files. Skipping"
).format(repre["name"])) ).format(repre["name"]))
continue continue
filtered_repres.append(repre) if "review" in tags:
return filtered_repres review_repres.append(repre)
elif self._is_valid_images_repre(repre):
other_repres.append(repre)
return review_repres + other_repres
def _is_valid_images_repre(self, repre):
"""Check if representation contains valid image files
Args:
repre (dict): representation
Returns:
bool: whether the representation has the valid image content
"""
# Get first file's extension
first_file = repre["files"]
if isinstance(first_file, (list, tuple)):
first_file = first_file[0]
ext = os.path.splitext(first_file)[1].lower()
return ext in IMAGE_EXTENSIONS or ext in VIDEO_EXTENSIONS
def _create_thumbnail_oiio( def _create_thumbnail_oiio(
self, self,

View file

@ -1,12 +0,0 @@
import warnings
from .broker import StdOutBroker
warnings.warn(
(
"Import of 'StdOutBroker' from 'ayon_core.tools.stdout_broker.app'"
" is deprecated. Please use 'ayon_core.tools.stdout_broker' instead."
),
DeprecationWarning
)
__all__ = ("StdOutBroker", )

View file

@ -350,21 +350,21 @@ class ProjectSortFilterProxy(QtCore.QSortFilterProxyModel):
if project_name is None: if project_name is None:
return True return True
string_pattern = self.filterRegularExpression().pattern()
if string_pattern:
return string_pattern.lower() in project_name.lower()
# Current project keep always visible
default = super(ProjectSortFilterProxy, self).filterAcceptsRow(
source_row, source_parent
)
if not default:
return default
# Make sure current project is visible # Make sure current project is visible
if index.data(PROJECT_IS_CURRENT_ROLE): if index.data(PROJECT_IS_CURRENT_ROLE):
return True return True
default = super().filterAcceptsRow(source_row, source_parent)
if not default:
return default
string_pattern = self.filterRegularExpression().pattern()
if (
string_pattern
and string_pattern.lower() not in project_name.lower()
):
return False
if ( if (
self._filter_inactive self._filter_inactive
and not index.data(PROJECT_IS_ACTIVE_ROLE) and not index.data(PROJECT_IS_ACTIVE_ROLE)

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
"""Package declaring AYON addon 'core' version.""" """Package declaring AYON addon 'core' version."""
__version__ = "1.1.8+dev" __version__ = "1.2.0+dev"

View file

@ -1,12 +1,17 @@
name = "core" name = "core"
title = "Core" title = "Core"
version = "1.1.8+dev" version = "1.2.0+dev"
client_dir = "ayon_core" client_dir = "ayon_core"
plugin_for = ["ayon_server"] plugin_for = ["ayon_server"]
ayon_server_version = ">=1.0.3,<2.0.0" ayon_server_version = ">=1.7.6,<2.0.0"
ayon_launcher_version = ">=1.0.2" ayon_launcher_version = ">=1.0.2"
ayon_required_addons = {} ayon_required_addons = {}
ayon_compatible_addons = {} ayon_compatible_addons = {
"ayon_ocio": ">=1.2.1",
"harmony": ">0.4.0",
"fusion": ">=0.3.3",
"openrv": ">=1.0.2",
}

View file

@ -5,7 +5,7 @@
[tool.poetry] [tool.poetry]
name = "ayon-core" name = "ayon-core"
version = "1.1.8+dev" version = "1.2.0+dev"
description = "" description = ""
authors = ["Ynput Team <team@ynput.io>"] authors = ["Ynput Team <team@ynput.io>"]
readme = "README.md" readme = "README.md"
@ -20,7 +20,7 @@ pytest = "^8.0"
pytest-print = "^1.0" pytest-print = "^1.0"
ayon-python-api = "^1.0" ayon-python-api = "^1.0"
# linting dependencies # linting dependencies
ruff = "^0.11.4" ruff = "^0.11.7"
pre-commit = "^4" pre-commit = "^4"
codespell = "^2.2.6" codespell = "^2.2.6"
semver = "^3.0.2" semver = "^3.0.2"
@ -48,82 +48,6 @@ qtpy="^2.4.3"
pyside6 = "^6.5.2" pyside6 = "^6.5.2"
pytest-ayon = { git = "https://github.com/ynput/pytest-ayon.git", branch = "chore/align-dependencies" } pytest-ayon = { git = "https://github.com/ynput/pytest-ayon.git", branch = "chore/align-dependencies" }
[tool.ruff]
# Exclude a variety of commonly ignored directories.
exclude = [
".bzr",
".direnv",
".eggs",
".git",
".git-rewrite",
".hg",
".ipynb_checkpoints",
".mypy_cache",
".nox",
".pants.d",
".pyenv",
".pytest_cache",
".pytype",
".ruff_cache",
".svn",
".tox",
".venv",
".vscode",
"__pypackages__",
"_build",
"buck-out",
"build",
"dist",
"node_modules",
"site-packages",
"venv",
"vendor",
"generated",
]
# Same as Black.
line-length = 79
indent-width = 4
# Assume Python 3.9
target-version = "py39"
[tool.ruff.lint]
pydocstyle.convention = "google"
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
select = ["E", "F", "W"]
ignore = []
# Allow fix for all enabled rules (when `--fix`) is provided.
fixable = ["ALL"]
unfixable = []
# Allow unused variables when underscore-prefixed.
dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
exclude = [
"client/ayon_core/modules/click_wrap.py",
"client/ayon_core/scripts/slates/__init__.py"
]
[tool.ruff.lint.per-file-ignores]
"client/ayon_core/lib/__init__.py" = ["E402"]
"tests/*.py" = ["S101", "PLR2004"] # allow asserts and magical values
[tool.ruff.format]
# Like Black, use double quotes for strings.
quote-style = "double"
# Like Black, indent with spaces, rather than tabs.
indent-style = "space"
# Like Black, respect magic trailing commas.
skip-magic-trailing-comma = false
# Like Black, automatically detect the appropriate line ending.
line-ending = "auto"
[tool.codespell] [tool.codespell]
# Ignore words that are not in the dictionary. # Ignore words that are not in the dictionary.
ignore-words-list = "ayon,ynput,parms,parm,hda,developpement" ignore-words-list = "ayon,ynput,parms,parm,hda,developpement"
@ -132,7 +56,7 @@ ignore-words-list = "ayon,ynput,parms,parm,hda,developpement"
# Remove with next codespell release (>2.2.6) # Remove with next codespell release (>2.2.6)
ignore-regex = ".*codespell:ignore.*" ignore-regex = ".*codespell:ignore.*"
skip = "./.*,./package/*,*/vendor/*,*/unreal/integration/*,*/aftereffects/api/extension/js/libs/*" skip = "./.*,./package/*,*/client/ayon_core/vendor/*"
count = true count = true
quiet-level = 3 quiet-level = 3
@ -143,7 +67,6 @@ mypy_path = "$MYPY_CONFIG_FILE_DIR/client"
requires = ["poetry-core"] requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api" build-backend = "poetry.core.masonry.api"
[tool.pytest.ini_options] [tool.pytest.ini_options]
log_cli = true log_cli = true
log_cli_level = "INFO" log_cli_level = "INFO"

87
ruff.toml Normal file
View file

@ -0,0 +1,87 @@
# Exclude a variety of commonly ignored directories.
exclude = [
".bzr",
".direnv",
".eggs",
".git",
".git-rewrite",
".hg",
".ipynb_checkpoints",
".mypy_cache",
".nox",
".pants.d",
".pyenv",
".pytest_cache",
".pytype",
".ruff_cache",
".svn",
".tox",
".venv",
".vscode",
"__pypackages__",
"_build",
"buck-out",
"build",
"dist",
"node_modules",
"site-packages",
"venv",
"vendor",
"generated",
]
# Same as Black.
line-length = 79
indent-width = 4
# Assume Python 3.9
target-version = "py39"
[lint]
preview = true
pydocstyle.convention = "google"
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
select = ["E", "F", "W"]
ignore = []
# Allow fix for all enabled rules (when `--fix`) is provided.
fixable = ["ALL"]
unfixable = []
# Allow unused variables when underscore-prefixed.
dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
exclude = [
"client/ayon_core/scripts/slates/__init__.py"
]
[lint.per-file-ignores]
"client/ayon_core/lib/__init__.py" = ["E402"]
"tests/*.py" = ["S101", "PLR2004"] # allow asserts and magical values
[format]
# Like Black, use double quotes for strings.
quote-style = "double"
# Like Black, indent with spaces, rather than tabs.
indent-style = "space"
# Like Black, respect magic trailing commas.
skip-magic-trailing-comma = false
# Like Black, automatically detect the appropriate line ending.
line-ending = "auto"
# Enable auto-formatting of code examples in docstrings. Markdown,
# reStructuredText code/literal blocks and doctests are all supported.
#
# This is currently disabled by default, but it is planned for this
# to be opt-out in the future.
docstring-code-format = false
# Set the line length limit used when formatting code snippets in
# docstrings.
#
# This only has an effect when the `docstring-code-format` setting is
# enabled.
docstring-code-line-length = "dynamic"

View file

@ -71,6 +71,24 @@ def _fallback_ocio_config_profile_types():
def _ocio_built_in_paths(): def _ocio_built_in_paths():
return [ return [
{
"value": "{BUILTIN_OCIO_ROOT}/aces_2.0/studio-config-v3.0.0_aces-v2.0_ocio-v2.4.ocio", # noqa: E501
"label": "ACES 2.0 Studio (OCIO v2.4)",
"description": (
"Aces 2.0 Studio OCIO config file. Requires OCIO v2.4.")
},
{
"value": "{BUILTIN_OCIO_ROOT}/aces_1.3/studio-config-v1.0.0_aces-v1.3_ocio-v2.1.ocio", # noqa: E501
"label": "ACES 1.3 Studio (OCIO v2.1)",
"description": (
"Aces 1.3 Studio OCIO config file. Requires OCIO v2.1.")
},
{
"value": "{BUILTIN_OCIO_ROOT}/aces_1.3/studio-config-v1.0.0_aces-v1.3_ocio-v2.0.ocio", # noqa: E501
"label": "ACES 1.3 Studio (OCIO v2)",
"description": (
"Aces 1.3 Studio OCIO config file. Requires OCIO v2.")
},
{ {
"value": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio", "value": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio",
"label": "ACES 1.2", "label": "ACES 1.2",

View file

@ -1,4 +1,5 @@
from pydantic import validator from pydantic import validator
from typing import Any
from ayon_server.settings import ( from ayon_server.settings import (
BaseSettingsModel, BaseSettingsModel,
@ -7,8 +8,9 @@ from ayon_server.settings import (
normalize_name, normalize_name,
ensure_unique_names, ensure_unique_names,
task_types_enum, task_types_enum,
anatomy_template_items_enum
) )
from ayon_server.exceptions import BadRequestException
from ayon_server.types import ColorRGBA_uint8 from ayon_server.types import ColorRGBA_uint8
@ -157,6 +159,78 @@ class CollectUSDLayerContributionsModel(BaseSettingsModel):
return value return value
class ResolutionOptionsModel(BaseSettingsModel):
_layout = "compact"
width: int = SettingsField(
1920,
ge=0,
le=100000,
title="Width",
description=(
"Width resolution number value"),
placeholder="Width"
)
height: int = SettingsField(
1080,
title="Height",
ge=0,
le=100000,
description=(
"Height resolution number value"),
placeholder="Height"
)
pixel_aspect: float = SettingsField(
1.0,
title="Pixel aspect",
ge=0.0,
le=100000.0,
description=(
"Pixel Aspect resolution decimal number value"),
placeholder="Pixel aspect"
)
def ensure_unique_resolution_option(
objects: list[Any], field_name: str | None = None) -> None: # noqa: C901
"""Ensure a list of objects have unique option attributes.
This function checks if the list of objects has unique 'width',
'height' and 'pixel_aspect' properties.
"""
options = set()
for obj in objects:
item_test_text = f"{obj.width}x{obj.height}x{obj.pixel_aspect}"
if item_test_text in options:
raise BadRequestException(
f"Duplicate option '{item_test_text}'")
options.add(item_test_text)
class CollectExplicitResolutionModel(BaseSettingsModel):
enabled: bool = SettingsField(True, title="Enabled")
product_types: list[str] = SettingsField(
default_factory=list,
title="Product types",
description=(
"Only activate the attribute for following product types."
)
)
options: list[ResolutionOptionsModel] = SettingsField(
default_factory=list,
title="Resolution choices",
description=(
"Available resolution choices to be displayed in "
"the publishers attribute."
)
)
@validator("options")
def validate_unique_resolution_options(cls, value):
ensure_unique_resolution_option(value)
return value
class AyonEntityURIModel(BaseSettingsModel): class AyonEntityURIModel(BaseSettingsModel):
use_ayon_entity_uri: bool = SettingsField( use_ayon_entity_uri: bool = SettingsField(
title="Use AYON Entity URI", title="Use AYON Entity URI",
@ -889,7 +963,11 @@ class IntegrateANTemplateNameProfileModel(BaseSettingsModel):
default_factory=list, default_factory=list,
title="Task names" title="Task names"
) )
template_name: str = SettingsField("", title="Template name") template_name: str = SettingsField(
"",
title="Template name",
enum_resolver=anatomy_template_items_enum(category="publish")
)
class IntegrateHeroTemplateNameProfileModel(BaseSettingsModel): class IntegrateHeroTemplateNameProfileModel(BaseSettingsModel):
@ -910,7 +988,11 @@ class IntegrateHeroTemplateNameProfileModel(BaseSettingsModel):
default_factory=list, default_factory=list,
title="Task names" title="Task names"
) )
template_name: str = SettingsField("", title="Template name") template_name: str = SettingsField(
"",
title="Template name",
enum_resolver=anatomy_template_items_enum(category="hero")
)
class IntegrateHeroVersionModel(BaseSettingsModel): class IntegrateHeroVersionModel(BaseSettingsModel):
@ -929,6 +1011,20 @@ class IntegrateHeroVersionModel(BaseSettingsModel):
"hero versions.") "hero versions.")
class CollectRenderedFilesModel(BaseSettingsModel):
remove_files: bool = SettingsField(
False,
title="Remove rendered files",
description=(
"Remove rendered files and metadata json on publish.\n\n"
"Note that when enabled but the render is to a configured "
"persistent staging directory the files will not be removed. "
"However with this disabled the files will **not** be removed in "
"either case."
)
)
class CleanUpModel(BaseSettingsModel): class CleanUpModel(BaseSettingsModel):
_isGroup = True _isGroup = True
paterns: list[str] = SettingsField( # codespell:ignore paterns paterns: list[str] = SettingsField( # codespell:ignore paterns
@ -974,6 +1070,10 @@ class PublishPuginsModel(BaseSettingsModel):
title="Collect USD Layer Contributions", title="Collect USD Layer Contributions",
) )
) )
CollectExplicitResolution: CollectExplicitResolutionModel = SettingsField(
default_factory=CollectExplicitResolutionModel,
title="Collect Explicit Resolution"
)
ValidateEditorialAssetName: ValidateBaseModel = SettingsField( ValidateEditorialAssetName: ValidateBaseModel = SettingsField(
default_factory=ValidateBaseModel, default_factory=ValidateBaseModel,
title="Validate Editorial Asset Name" title="Validate Editorial Asset Name"
@ -1041,6 +1141,10 @@ class PublishPuginsModel(BaseSettingsModel):
"published as a render/review product of its own." "published as a render/review product of its own."
) )
) )
CollectRenderedFiles: CollectRenderedFilesModel = SettingsField(
default_factory=CollectRenderedFilesModel,
title="Clean up farm rendered files"
)
CleanUp: CleanUpModel = SettingsField( CleanUp: CleanUpModel = SettingsField(
default_factory=CleanUpModel, default_factory=CleanUpModel,
title="Clean Up" title="Clean Up"
@ -1144,6 +1248,13 @@ DEFAULT_PUBLISH_VALUES = {
}, },
] ]
}, },
"CollectExplicitResolution": {
"enabled": True,
"product_types": [
"shot"
],
"options": []
},
"ValidateEditorialAssetName": { "ValidateEditorialAssetName": {
"enabled": True, "enabled": True,
"optional": False, "optional": False,
@ -1428,6 +1539,9 @@ DEFAULT_PUBLISH_VALUES = {
"AttachReviewables": { "AttachReviewables": {
"enabled": True, "enabled": True,
}, },
"CollectRenderedFiles": {
"remove_files": False
},
"CleanUp": { "CleanUp": {
"paterns": [], # codespell:ignore paterns "paterns": [], # codespell:ignore paterns
"remove_temp_renders": False "remove_temp_renders": False

View file

@ -5,6 +5,7 @@ from ayon_server.settings import (
normalize_name, normalize_name,
ensure_unique_names, ensure_unique_names,
task_types_enum, task_types_enum,
anatomy_template_items_enum
) )
@ -283,7 +284,34 @@ class PublishTemplateNameProfile(BaseSettingsModel):
task_names: list[str] = SettingsField( task_names: list[str] = SettingsField(
default_factory=list, title="Task names" default_factory=list, title="Task names"
) )
template_name: str = SettingsField("", title="Template name") template_name: str = SettingsField(
"",
title="Template name",
enum_resolver=anatomy_template_items_enum(category="publish")
)
class HeroTemplateNameProfile(BaseSettingsModel):
_layout = "expanded"
product_types: list[str] = SettingsField(
default_factory=list,
title="Product types"
)
# TODO this should use hosts enum
hosts: list[str] = SettingsField(default_factory=list, title="Hosts")
task_types: list[str] = SettingsField(
default_factory=list,
title="Task types",
enum_resolver=task_types_enum
)
task_names: list[str] = SettingsField(
default_factory=list, title="Task names"
)
template_name: str = SettingsField(
"",
title="Template name",
enum_resolver=anatomy_template_items_enum(category="hero")
)
class CustomStagingDirProfileModel(BaseSettingsModel): class CustomStagingDirProfileModel(BaseSettingsModel):
@ -306,7 +334,11 @@ class CustomStagingDirProfileModel(BaseSettingsModel):
custom_staging_dir_persistent: bool = SettingsField( custom_staging_dir_persistent: bool = SettingsField(
False, title="Custom Staging Folder Persistent" False, title="Custom Staging Folder Persistent"
) )
template_name: str = SettingsField("", title="Template Name") template_name: str = SettingsField(
"",
title="Template name",
enum_resolver=anatomy_template_items_enum(category="staging")
)
class PublishToolModel(BaseSettingsModel): class PublishToolModel(BaseSettingsModel):
@ -314,7 +346,7 @@ class PublishToolModel(BaseSettingsModel):
default_factory=list, default_factory=list,
title="Template name profiles" title="Template name profiles"
) )
hero_template_name_profiles: list[PublishTemplateNameProfile] = ( hero_template_name_profiles: list[HeroTemplateNameProfile] = (
SettingsField( SettingsField(
default_factory=list, default_factory=list,
title="Hero template name profiles" title="Hero template name profiles"

View file

@ -103,17 +103,18 @@ def test_image_sequence_with_embedded_tc_and_handles_out_of_range():
# 10 head black handles generated from gap (991-1000) # 10 head black handles generated from gap (991-1000)
"/path/to/ffmpeg -t 0.4166666666666667 -r 24.0 -f lavfi -i " "/path/to/ffmpeg -t 0.4166666666666667 -r 24.0 -f lavfi -i "
"color=c=black:s=1280x720 -tune stillimage -start_number 991 " "color=c=black:s=1280x720 -tune stillimage -start_number 991 "
"C:/result/output.%04d.jpg", "-pix_fmt rgba C:/result/output.%04d.png",
# 10 tail black handles generated from gap (1102-1111) # 10 tail black handles generated from gap (1102-1111)
"/path/to/ffmpeg -t 0.4166666666666667 -r 24.0 -f lavfi -i " "/path/to/ffmpeg -t 0.4166666666666667 -r 24.0 -f lavfi -i "
"color=c=black:s=1280x720 -tune stillimage -start_number 1102 " "color=c=black:s=1280x720 -tune stillimage -start_number 1102 "
"C:/result/output.%04d.jpg", "-pix_fmt rgba C:/result/output.%04d.png",
# Report from source exr (1001-1101) with enforce framerate # Report from source exr (1001-1101) with enforce framerate
"/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i " "/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i "
f"C:\\exr_embedded_tc{os.sep}output.%04d.exr -start_number 1001 " f"C:\\exr_embedded_tc{os.sep}output.%04d.exr "
"C:/result/output.%04d.jpg" "-vf scale=1280:720:flags=lanczos -compression_level 5 "
"-start_number 1001 -pix_fmt rgba C:/result/output.%04d.png"
] ]
assert calls == expected assert calls == expected
@ -130,20 +131,23 @@ def test_image_sequence_and_handles_out_of_range():
expected = [ expected = [
# 5 head black frames generated from gap (991-995) # 5 head black frames generated from gap (991-995)
"/path/to/ffmpeg -t 0.2 -r 25.0 -f lavfi -i color=c=black:s=1280x720" "/path/to/ffmpeg -t 0.2 -r 25.0 -f lavfi -i color=c=black:s=1280x720 "
" -tune stillimage -start_number 991 C:/result/output.%04d.jpg", "-tune stillimage -start_number 991 -pix_fmt rgba "
"C:/result/output.%04d.png",
# 9 tail back frames generated from gap (1097-1105) # 9 tail back frames generated from gap (1097-1105)
"/path/to/ffmpeg -t 0.36 -r 25.0 -f lavfi -i color=c=black:s=1280x720" "/path/to/ffmpeg -t 0.36 -r 25.0 -f lavfi -i color=c=black:s=1280x720 "
" -tune stillimage -start_number 1097 C:/result/output.%04d.jpg", "-tune stillimage -start_number 1097 -pix_fmt rgba "
"C:/result/output.%04d.png",
# Report from source tiff (996-1096) # Report from source tiff (996-1096)
# 996-1000 = additional 5 head frames # 996-1000 = additional 5 head frames
# 1001-1095 = source range conformed to 25fps # 1001-1095 = source range conformed to 25fps
# 1096-1096 = additional 1 tail frames # 1096-1096 = additional 1 tail frames
"/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i " "/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i "
f"C:\\tif_seq{os.sep}output.%04d.tif -start_number 996" f"C:\\tif_seq{os.sep}output.%04d.tif "
f" C:/result/output.%04d.jpg" "-vf scale=1280:720:flags=lanczos -compression_level 5 "
"-start_number 996 -pix_fmt rgba C:/result/output.%04d.png"
] ]
assert calls == expected assert calls == expected
@ -163,8 +167,9 @@ def test_movie_with_embedded_tc_no_gap_handles():
# - first_frame = 14 src - 10 (head tail) = frame 4 = 0.1666s # - first_frame = 14 src - 10 (head tail) = frame 4 = 0.1666s
# - duration = 68fr (source) + 20fr (handles) = 88frames = 3.666s # - duration = 68fr (source) + 20fr (handles) = 88frames = 3.666s
"/path/to/ffmpeg -ss 0.16666666666666666 -t 3.6666666666666665 " "/path/to/ffmpeg -ss 0.16666666666666666 -t 3.6666666666666665 "
"-i C:\\data\\qt_embedded_tc.mov -start_number 991 " "-i C:\\data\\qt_embedded_tc.mov -vf scale=1280:720:flags=lanczos "
"C:/result/output.%04d.jpg" "-compression_level 5 -start_number 991 -pix_fmt rgba "
"C:/result/output.%04d.png"
] ]
assert calls == expected assert calls == expected
@ -181,12 +186,14 @@ def test_short_movie_head_gap_handles():
expected = [ expected = [
# 10 head black frames generated from gap (991-1000) # 10 head black frames generated from gap (991-1000)
"/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720" "/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
" -tune stillimage -start_number 991 C:/result/output.%04d.jpg", " -tune stillimage -start_number 991 -pix_fmt rgba "
"C:/result/output.%04d.png",
# source range + 10 tail frames # source range + 10 tail frames
# duration = 50fr (source) + 10fr (tail handle) = 60 fr = 2.4s # duration = 50fr (source) + 10fr (tail handle) = 60 fr = 2.4s
"/path/to/ffmpeg -ss 0.0 -t 2.4 -i C:\\data\\movie.mp4" "/path/to/ffmpeg -ss 0.0 -t 2.4 -i C:\\data\\movie.mp4 -vf "
" -start_number 1001 C:/result/output.%04d.jpg" "scale=1280:720:flags=lanczos -compression_level 5 "
"-start_number 1001 -pix_fmt rgba C:/result/output.%04d.png"
] ]
assert calls == expected assert calls == expected
@ -204,13 +211,14 @@ def test_short_movie_tail_gap_handles():
# 10 tail black frames generated from gap (1067-1076) # 10 tail black frames generated from gap (1067-1076)
"/path/to/ffmpeg -t 0.4166666666666667 -r 24.0 -f lavfi -i " "/path/to/ffmpeg -t 0.4166666666666667 -r 24.0 -f lavfi -i "
"color=c=black:s=1280x720 -tune stillimage -start_number 1067 " "color=c=black:s=1280x720 -tune stillimage -start_number 1067 "
"C:/result/output.%04d.jpg", "-pix_fmt rgba C:/result/output.%04d.png",
# 10 head frames + source range # 10 head frames + source range
# duration = 10fr (head handle) + 66fr (source) = 76fr = 3.16s # duration = 10fr (head handle) + 66fr (source) = 76fr = 3.16s
"/path/to/ffmpeg -ss 1.0416666666666667 -t 3.1666666666666665 -i " "/path/to/ffmpeg -ss 1.0416666666666667 -t 3.1666666666666665 -i "
"C:\\data\\qt_no_tc_24fps.mov -start_number 991" "C:\\data\\qt_no_tc_24fps.mov -vf scale=1280:720:flags=lanczos "
" C:/result/output.%04d.jpg" "-compression_level 5 -start_number 991 -pix_fmt rgba "
"C:/result/output.%04d.png"
] ]
assert calls == expected assert calls == expected
@ -239,62 +247,75 @@ def test_multiple_review_clips_no_gap():
# 10 head black frames generated from gap (991-1000) # 10 head black frames generated from gap (991-1000)
'/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi' '/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi'
' -i color=c=black:s=1280x720 -tune ' ' -i color=c=black:s=1280x720 -tune '
'stillimage -start_number 991 C:/result/output.%04d.jpg', 'stillimage -start_number 991 -pix_fmt rgba C:/result/output.%04d.png',
# Alternance 25fps tiff sequence and 24fps exr sequence # Alternance 25fps tiff sequence and 24fps exr sequence
# for 100 frames each # for 100 frames each
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif ' f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1001 C:/result/output.%04d.jpg', '-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1001 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i ' '/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
f'C:\\with_tc{os.sep}output.%04d.exr ' f'C:\\with_tc{os.sep}output.%04d.exr '
'-start_number 1102 C:/result/output.%04d.jpg', '-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1102 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif ' f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1198 C:/result/output.%04d.jpg', '-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1198 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i ' '/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
f'C:\\with_tc{os.sep}output.%04d.exr ' f'C:\\with_tc{os.sep}output.%04d.exr '
'-start_number 1299 C:/result/output.%04d.jpg', '-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1299 -pix_fmt rgba C:/result/output.%04d.png',
# Repeated 25fps tiff sequence multiple times till the end # Repeated 25fps tiff sequence multiple times till the end
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif ' f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1395 C:/result/output.%04d.jpg', '-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1395 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif ' f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1496 C:/result/output.%04d.jpg', '-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1496 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif ' f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1597 C:/result/output.%04d.jpg', '-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1597 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif ' f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1698 C:/result/output.%04d.jpg', '-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1698 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif ' f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1799 C:/result/output.%04d.jpg', '-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1799 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif ' f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 1900 C:/result/output.%04d.jpg', '-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1900 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif ' f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 2001 C:/result/output.%04d.jpg', '-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 2001 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif ' f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 2102 C:/result/output.%04d.jpg', '-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 2102 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
f'C:\\no_tc{os.sep}output.%04d.tif ' f'C:\\no_tc{os.sep}output.%04d.tif '
'-start_number 2203 C:/result/output.%04d.jpg' '-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 2203 -pix_fmt rgba C:/result/output.%04d.png'
] ]
assert calls == expected assert calls == expected
@ -323,15 +344,17 @@ def test_multiple_review_clips_with_gap():
# Gap on review track (12 frames) # Gap on review track (12 frames)
'/path/to/ffmpeg -t 0.5 -r 24.0 -f lavfi' '/path/to/ffmpeg -t 0.5 -r 24.0 -f lavfi'
' -i color=c=black:s=1280x720 -tune ' ' -i color=c=black:s=1280x720 -tune '
'stillimage -start_number 991 C:/result/output.%04d.jpg', 'stillimage -start_number 991 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i ' '/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
f'C:\\with_tc{os.sep}output.%04d.exr ' f'C:\\with_tc{os.sep}output.%04d.exr '
'-start_number 1003 C:/result/output.%04d.jpg', '-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1003 -pix_fmt rgba C:/result/output.%04d.png',
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i ' '/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
f'C:\\with_tc{os.sep}output.%04d.exr ' f'C:\\with_tc{os.sep}output.%04d.exr '
'-start_number 1091 C:/result/output.%04d.jpg' '-vf scale=1280:720:flags=lanczos -compression_level 5 '
'-start_number 1091 -pix_fmt rgba C:/result/output.%04d.png'
] ]
assert calls == expected assert calls == expected