diff --git a/client/ayon_core/lib/transcoding.py b/client/ayon_core/lib/transcoding.py index f1c1cd7aa6..061a7bd398 100644 --- a/client/ayon_core/lib/transcoding.py +++ b/client/ayon_core/lib/transcoding.py @@ -1,3 +1,4 @@ +from __future__ import annotations import os import re import logging @@ -12,6 +13,8 @@ from typing import Optional import xml.etree.ElementTree +import clique + from .execute import run_subprocess from .vendor_bin_utils import ( get_ffmpeg_tool_args, @@ -634,6 +637,37 @@ def should_convert_for_ffmpeg(src_filepath): return False +def _get_attributes_to_erase( + input_info: dict, logger: logging.Logger +) -> list[str]: + """FFMPEG does not support some attributes in metadata.""" + erase_attrs: dict[str, str] = {} # Attr name to reason mapping + for attr_name, attr_value in input_info["attribs"].items(): + if not isinstance(attr_value, str): + continue + + # Remove attributes that have string value longer than allowed length + # for ffmpeg or when contain prohibited symbols + if len(attr_value) > MAX_FFMPEG_STRING_LEN: + reason = f"has too long value ({len(attr_value)} chars)." + erase_attrs[attr_name] = reason + continue + + for char in NOT_ALLOWED_FFMPEG_CHARS: + if char not in attr_value: + continue + reason = f"contains unsupported character \"{char}\"." + erase_attrs[attr_name] = reason + break + + for attr_name, reason in erase_attrs.items(): + logger.info( + f"Removed attribute \"{attr_name}\" from metadata" + f" because {reason}." + ) + return list(erase_attrs.keys()) + + def convert_input_paths_for_ffmpeg( input_paths, output_dir, @@ -659,7 +693,7 @@ def convert_input_paths_for_ffmpeg( Raises: ValueError: If input filepath has extension not supported by function. - Currently is supported only ".exr" extension. + Currently, only ".exr" extension is supported. """ if logger is None: logger = logging.getLogger(__name__) @@ -684,7 +718,22 @@ def convert_input_paths_for_ffmpeg( # Collect channels to export input_arg, channels_arg = get_oiio_input_and_channel_args(input_info) - for input_path in input_paths: + # Find which attributes to strip + erase_attributes: list[str] = _get_attributes_to_erase( + input_info, logger=logger + ) + + # clique.PATTERNS["frames"] supports only `.1001.exr` not `_1001.exr` so + # we use a customized pattern. + pattern = "[_.](?P(?P0*)\\d+)\\.\\D+\\d?$" + input_collections, input_remainder = clique.assemble( + input_paths, + patterns=[pattern], + assume_padded_when_ambiguous=True, + ) + input_items = list(input_collections) + input_items.extend(input_remainder) + for input_item in input_items: # Prepare subprocess arguments oiio_cmd = get_oiio_tool_args( "oiiotool", @@ -695,8 +744,23 @@ def convert_input_paths_for_ffmpeg( if compression: oiio_cmd.extend(["--compression", compression]) + # Convert a sequence of files using a single oiiotool command + # using its sequence syntax + if isinstance(input_item, clique.Collection): + frames = input_item.format("{head}#{tail}").replace(" ", "") + oiio_cmd.extend([ + "--framepadding", input_item.padding, + "--frames", frames, + "--parallel-frames" + ]) + input_item: str = input_item.format("{head}#{tail}") + elif not isinstance(input_item, str): + raise TypeError( + f"Input is not a string or Collection: {input_item}" + ) + oiio_cmd.extend([ - input_arg, input_path, + input_arg, input_item, # Tell oiiotool which channels should be put to top stack # (and output) "--ch", channels_arg, @@ -704,38 +768,11 @@ def convert_input_paths_for_ffmpeg( "--subimage", "0" ]) - for attr_name, attr_value in input_info["attribs"].items(): - if not isinstance(attr_value, str): - continue - - # Remove attributes that have string value longer than allowed - # length for ffmpeg or when containing prohibited symbols - erase_reason = "Missing reason" - erase_attribute = False - if len(attr_value) > MAX_FFMPEG_STRING_LEN: - erase_reason = "has too long value ({} chars).".format( - len(attr_value) - ) - erase_attribute = True - - if not erase_attribute: - for char in NOT_ALLOWED_FFMPEG_CHARS: - if char in attr_value: - erase_attribute = True - erase_reason = ( - "contains unsupported character \"{}\"." - ).format(char) - break - - if erase_attribute: - # Set attribute to empty string - logger.info(( - "Removed attribute \"{}\" from metadata because {}." - ).format(attr_name, erase_reason)) - oiio_cmd.extend(["--eraseattrib", attr_name]) + for attr_name in erase_attributes: + oiio_cmd.extend(["--eraseattrib", attr_name]) # Add last argument - path to output - base_filename = os.path.basename(input_path) + base_filename = os.path.basename(input_item) output_path = os.path.join(output_dir, base_filename) oiio_cmd.extend([ "-o", output_path @@ -1136,7 +1173,10 @@ def oiio_color_convert( target_display=None, target_view=None, additional_command_args=None, - logger=None, + frames: Optional[str] = None, + frame_padding: Optional[int] = None, + parallel_frames: bool = False, + logger: Optional[logging.Logger] = None, ): """Transcode source file to other with colormanagement. @@ -1148,7 +1188,7 @@ def oiio_color_convert( input_path (str): Path that should be converted. It is expected that contains single file or image sequence of same type (sequence in format 'file.FRAMESTART-FRAMEEND#.ext', see oiio docs, - eg `big.1-3#.tif`) + eg `big.1-3#.tif` or `big.1-3%d.ext` with `frames` argument) output_path (str): Path to output filename. (must follow format of 'input_path', eg. single file or sequence in 'file.FRAMESTART-FRAMEEND#.ext', `output.1-3#.tif`) @@ -1169,6 +1209,13 @@ def oiio_color_convert( both 'view' and 'display' must be filled (if 'target_colorspace') additional_command_args (list): arguments for oiiotool (like binary depth for .dpx) + frames (Optional[str]): Complex frame range to process. This requires + input path and output path to use frame token placeholder like + `#` or `%d`, e.g. file.#.exr + frame_padding (Optional[int]): Frame padding to use for the input and + output when using a sequence filepath. + parallel_frames (bool): If True, process frames in parallel inside + the `oiiotool` process. Only supported in OIIO 2.5.20.0+. logger (logging.Logger): Logger used for logging. Raises: @@ -1178,7 +1225,16 @@ def oiio_color_convert( if logger is None: logger = logging.getLogger(__name__) - input_info = get_oiio_info_for_input(input_path, logger=logger) + # Get oiioinfo only from first image, otherwise file can't be found + first_input_path = input_path + if frames: + frames: str + first_frame = int(re.split("[ x-]", frames, 1)[0]) + first_frame = str(first_frame).zfill(frame_padding or 0) + for token in ["#", "%d"]: + first_input_path = first_input_path.replace(token, first_frame) + + input_info = get_oiio_info_for_input(first_input_path, logger=logger) # Collect channels to export input_arg, channels_arg = get_oiio_input_and_channel_args(input_info) @@ -1191,6 +1247,22 @@ def oiio_color_convert( "--colorconfig", config_path ) + if frames: + # If `frames` is specified, then process the input and output + # as if it's a sequence of frames (must contain `%04d` as frame + # token placeholder in filepaths) + oiio_cmd.extend([ + "--frames", frames, + ]) + + if frame_padding: + oiio_cmd.extend([ + "--framepadding", str(frame_padding), + ]) + + if parallel_frames: + oiio_cmd.append("--parallel-frames") + oiio_cmd.extend([ input_arg, input_path, # Tell oiiotool which channels should be put to top stack diff --git a/client/ayon_core/pipeline/create/creator_plugins.py b/client/ayon_core/pipeline/create/creator_plugins.py index 4f0baa7beb..7b168984ef 100644 --- a/client/ayon_core/pipeline/create/creator_plugins.py +++ b/client/ayon_core/pipeline/create/creator_plugins.py @@ -146,7 +146,15 @@ class BaseCreator(ABC): project_settings (dict[str, Any]): Project settings. create_context (CreateContext): Context which initialized creator. headless (bool): Running in headless mode. + """ + # Attribute 'skip_discovery' is used during discovery phase to skip + # plugins, which can be used to mark base plugins that should not be + # considered as plugins "to use". The discovery logic does NOT use + # the attribute value from parent classes. Each base class has to define + # the attribute again. + skip_discovery = True + # Label shown in UI label = None group_label = None @@ -642,7 +650,7 @@ class Creator(BaseCreator): Creation requires prepared product name and instance data. """ - + skip_discovery = True # GUI Purposes # - default_variants may not be used if `get_default_variants` # is overridden @@ -931,6 +939,8 @@ class Creator(BaseCreator): class HiddenCreator(BaseCreator): + skip_discovery = True + @abstractmethod def create(self, instance_data, source_data): pass @@ -941,6 +951,7 @@ class AutoCreator(BaseCreator): Can be used e.g. for `workfile`. """ + skip_discovery = True def remove_instances(self, instances): """Skip removal.""" diff --git a/client/ayon_core/pipeline/load/plugins.py b/client/ayon_core/pipeline/load/plugins.py index ed963110c6..b8cca08802 100644 --- a/client/ayon_core/pipeline/load/plugins.py +++ b/client/ayon_core/pipeline/load/plugins.py @@ -21,6 +21,13 @@ from .utils import get_representation_path_from_context class LoaderPlugin(list): """Load representation into host application""" + # Attribute 'skip_discovery' is used during discovery phase to skip + # plugins, which can be used to mark base plugins that should not be + # considered as plugins "to use". The discovery logic does NOT use + # the attribute value from parent classes. Each base class has to define + # the attribute again. + skip_discovery = True + product_types: set[str] = set() product_base_types: Optional[set[str]] = None representations = set() diff --git a/client/ayon_core/pipeline/plugin_discover.py b/client/ayon_core/pipeline/plugin_discover.py index dddd6847ec..fd907eb22c 100644 --- a/client/ayon_core/pipeline/plugin_discover.py +++ b/client/ayon_core/pipeline/plugin_discover.py @@ -138,7 +138,14 @@ def discover_plugins( for item in modules: filepath, module = item result.add_module(module) - all_plugins.extend(classes_from_module(base_class, module)) + for cls in classes_from_module(base_class, module): + if cls is base_class: + continue + # Class has defined 'skip_discovery = True' + skip_discovery = cls.__dict__.get("skip_discovery") + if skip_discovery is True: + continue + all_plugins.append(cls) if base_class not in ignored_classes: ignored_classes.append(base_class) diff --git a/client/ayon_core/plugins/publish/collect_scene_loaded_versions.py b/client/ayon_core/plugins/publish/collect_scene_loaded_versions.py index f509ed807a..54eeefc60b 100644 --- a/client/ayon_core/plugins/publish/collect_scene_loaded_versions.py +++ b/client/ayon_core/plugins/publish/collect_scene_loaded_versions.py @@ -1,3 +1,5 @@ +from __future__ import annotations +from typing import Any import ayon_api import ayon_api.utils @@ -32,6 +34,8 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): self.log.debug("No loaded containers found in scene.") return + containers = self._filter_invalid_containers(containers) + repre_ids = { container["representation"] for container in containers @@ -78,3 +82,28 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): self.log.debug(f"Collected {len(loaded_versions)} loaded versions.") context.data["loadedVersions"] = loaded_versions + + def _filter_invalid_containers( + self, + containers: list[dict[str, Any]] + ) -> list[dict[str, Any]]: + """Filter out invalid containers lacking required keys. + + Skip any invalid containers that lack 'representation' or 'name' + keys to avoid KeyError. + """ + # Only filter by what's required for this plug-in instead of validating + # a full container schema. + required_keys = {"name", "representation"} + valid = [] + for container in containers: + missing = [key for key in required_keys if key not in container] + if missing: + self.log.warning( + "Skipping invalid container, missing required keys:" + " {}. {}".format(", ".join(missing), container) + ) + continue + valid.append(container) + + return valid diff --git a/client/ayon_core/plugins/publish/extract_color_transcode.py b/client/ayon_core/plugins/publish/extract_color_transcode.py index b293bd29c3..63a73e07fa 100644 --- a/client/ayon_core/plugins/publish/extract_color_transcode.py +++ b/client/ayon_core/plugins/publish/extract_color_transcode.py @@ -172,20 +172,33 @@ class ExtractOIIOTranscode(publish.Extractor): additional_command_args = (output_def["oiiotool_args"] ["additional_command_args"]) - sequence_files = self._translate_to_sequence(files_to_convert) + sequence_files = self._translate_to_sequence( + files_to_convert) self.log.debug("Files to convert: {}".format(sequence_files)) missing_rgba_review_channels = False for file_name in sequence_files: if isinstance(file_name, clique.Collection): - # Convert to filepath that can be directly converted - # by oiio like `frame.1001-1025%04d.exr` - file_name: str = file_name.format( - "{head}{range}{padding}{tail}" + # Support sequences with holes by supplying + # dedicated `--frames` argument to `oiiotool` + # Create `frames` string like "1001-1002,1004,1010-1012 + # Create `filename` string like "file.#.exr" + frames = file_name.format("{ranges}").replace(" ", "") + frame_padding = file_name.padding + file_name = file_name.format("{head}#{tail}") + parallel_frames = True + elif isinstance(file_name, str): + # Single file + frames = None + frame_padding = None + parallel_frames = False + else: + raise TypeError( + f"Unsupported file name type: {type(file_name)}." + " Expected str or clique.Collection." ) self.log.debug("Transcoding file: `{}`".format(file_name)) - input_path = os.path.join(original_staging_dir, - file_name) + input_path = os.path.join(original_staging_dir, file_name) output_path = self._get_output_file_path(input_path, new_staging_dir, output_extension) @@ -201,6 +214,9 @@ class ExtractOIIOTranscode(publish.Extractor): source_display=source_display, source_view=source_view, additional_command_args=additional_command_args, + frames=frames, + frame_padding=frame_padding, + parallel_frames=parallel_frames, logger=self.log ) except MissingRGBAChannelsError as exc: @@ -294,16 +310,18 @@ class ExtractOIIOTranscode(publish.Extractor): new_repre["files"] = renamed_files def _translate_to_sequence(self, files_to_convert): - """Returns original list or a clique.Collection of a sequence. + """Returns original individual filepaths or list of clique.Collection. - Uses clique to find frame sequence Collection. - If sequence not found, it returns original list. + Uses clique to find frame sequence, and return the collections instead. + If sequence not detected in input filenames, it returns original list. Args: - files_to_convert (list): list of file names + files_to_convert (list[str]): list of file names Returns: - list[str | clique.Collection]: List of filepaths or a list - of Collections (usually one, unless there are holes) + list[str | clique.Collection]: List of + filepaths ['fileA.exr', 'fileB.exr'] + or clique.Collection for a sequence. + """ pattern = [clique.PATTERNS["frames"]] collections, _ = clique.assemble( @@ -314,14 +332,7 @@ class ExtractOIIOTranscode(publish.Extractor): raise ValueError( "Too many collections {}".format(collections)) - collection = collections[0] - # TODO: Technically oiiotool supports holes in the sequence as well - # using the dedicated --frames argument to specify the frames. - # We may want to use that too so conversions of sequences with - # holes will perform faster as well. - # Separate the collection so that we have no holes/gaps per - # collection. - return collection.separate() + return collections return files_to_convert diff --git a/client/ayon_core/plugins/publish/extract_thumbnail.py b/client/ayon_core/plugins/publish/extract_thumbnail.py index 2a43c12af3..22f538966d 100644 --- a/client/ayon_core/plugins/publish/extract_thumbnail.py +++ b/client/ayon_core/plugins/publish/extract_thumbnail.py @@ -1,8 +1,9 @@ import copy +from dataclasses import dataclass, field, fields import os import subprocess import tempfile -import re +from typing import Dict, Any, List, Tuple, Optional import pyblish.api from ayon_core.lib import ( @@ -15,6 +16,7 @@ from ayon_core.lib import ( path_to_subprocess_arg, run_subprocess, + filter_profiles, ) from ayon_core.lib.transcoding import ( MissingRGBAChannelsError, @@ -26,6 +28,61 @@ from ayon_core.lib.transcoding import ( from ayon_core.lib.transcoding import VIDEO_EXTENSIONS, IMAGE_EXTENSIONS +@dataclass +class ThumbnailDef: + """ + Data class representing the full configuration for selected profile + + Any change of controllable fields in Settings must propagate here! + """ + integrate_thumbnail: bool = False + + target_size: Dict[str, Any] = field( + default_factory=lambda: { + "type": "source", + "resize": {"width": 1920, "height": 1080}, + } + ) + + duration_split: float = 0.5 + + oiiotool_defaults: Dict[str, str] = field( + default_factory=lambda: { + "type": "colorspace", + "colorspace": "color_picking" + } + ) + + ffmpeg_args: Dict[str, List[Any]] = field( + default_factory=lambda: {"input": [], "output": []} + ) + + # Background color defined as (R, G, B, A) tuple. + # Note: Use float for alpha channel (0.0 to 1.0). + background_color: Tuple[int, int, int, float] = (0, 0, 0, 0.0) + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "ThumbnailDef": + """ + Creates a ThumbnailDef instance from a dictionary, safely ignoring + any keys in the dictionary that are not fields in the dataclass. + + Args: + data (Dict[str, Any]): The dictionary containing configuration data + + Returns: + MediaConfig: A new instance of the dataclass. + """ + # Get all field names defined in the dataclass + field_names = {f.name for f in fields(cls)} + + # Filter the input dictionary to include only keys matching field names + filtered_data = {k: v for k, v in data.items() if k in field_names} + + # Unpack the filtered dictionary into the constructor + return cls(**filtered_data) + + class ExtractThumbnail(pyblish.api.InstancePlugin): """Create jpg thumbnail from sequence using ffmpeg""" @@ -52,30 +109,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): settings_category = "core" enabled = False - integrate_thumbnail = False - target_size = { - "type": "source", - "resize": { - "width": 1920, - "height": 1080 - } - } - background_color = (0, 0, 0, 0.0) - duration_split = 0.5 - # attribute presets from settings - oiiotool_defaults = { - "type": "colorspace", - "colorspace": "color_picking", - "display_and_view": { - "display": "default", - "view": "sRGB" - } - } - ffmpeg_args = { - "input": [], - "output": [] - } - product_names = [] + profiles = [] def process(self, instance): # run main process @@ -98,6 +132,13 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): instance.data["representations"].remove(repre) def _main_process(self, instance): + if not self.profiles: + self.log.debug("No profiles present for extract review thumbnail.") + return + thumbnail_def = self._get_config_from_profile(instance) + if not thumbnail_def: + return + product_name = instance.data["productName"] instance_repres = instance.data.get("representations") if not instance_repres: @@ -130,24 +171,6 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): self.log.debug("Skipping crypto passes.") return - # We only want to process the produces needed from settings. - def validate_string_against_patterns(input_str, patterns): - for pattern in patterns: - if re.match(pattern, input_str): - return True - return False - - product_names = self.product_names - if product_names: - result = validate_string_against_patterns( - product_name, product_names - ) - if not result: - self.log.debug(( - "Product name \"{}\" did not match settings filters: {}" - ).format(product_name, product_names)) - return - # first check for any explicitly marked representations for thumbnail explicit_repres = self._get_explicit_repres_for_thumbnail(instance) if explicit_repres: @@ -192,7 +215,8 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): ) file_path = self._create_frame_from_video( video_file_path, - dst_staging + dst_staging, + thumbnail_def ) if file_path: src_staging, input_file = os.path.split(file_path) @@ -205,7 +229,8 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): if "slate-frame" in repre.get("tags", []): repre_files_thumb = repre_files_thumb[1:] file_index = int( - float(len(repre_files_thumb)) * self.duration_split) + float(len(repre_files_thumb)) * thumbnail_def.duration_split # noqa: E501 + ) input_file = repre_files[file_index] full_input_path = os.path.join(src_staging, input_file) @@ -242,13 +267,13 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): # colorspace data if not repre_thumb_created: repre_thumb_created = self._create_thumbnail_ffmpeg( - full_input_path, full_output_path + full_input_path, full_output_path, thumbnail_def ) # Skip representation and try next one if wasn't created if not repre_thumb_created and oiio_supported: repre_thumb_created = self._create_thumbnail_oiio( - full_input_path, full_output_path + full_input_path, full_output_path, thumbnail_def ) if not repre_thumb_created: @@ -276,7 +301,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): new_repre_tags = ["thumbnail"] # for workflows which needs to have thumbnails published as # separate representations `delete` tag should not be added - if not self.integrate_thumbnail: + if not thumbnail_def.integrate_thumbnail: new_repre_tags.append("delete") new_repre = { @@ -398,6 +423,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): src_path, dst_path, colorspace_data, + thumbnail_def ): """Create thumbnail using OIIO tool oiiotool @@ -415,7 +441,9 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): str: path to created thumbnail """ self.log.info("Extracting thumbnail {}".format(dst_path)) - resolution_arg = self._get_resolution_arg("oiiotool", src_path) + resolution_arg = self._get_resolution_args( + "oiiotool", src_path, thumbnail_def + ) repre_display = colorspace_data.get("display") repre_view = colorspace_data.get("view") @@ -434,12 +462,13 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): ) # if representation doesn't have display and view then use # oiiotool_defaults - elif self.oiiotool_defaults: - oiio_default_type = self.oiiotool_defaults["type"] + elif thumbnail_def.oiiotool_defaults: + oiiotool_defaults = thumbnail_def.oiiotool_defaults + oiio_default_type = oiiotool_defaults["type"] if "colorspace" == oiio_default_type: - oiio_default_colorspace = self.oiiotool_defaults["colorspace"] + oiio_default_colorspace = oiiotool_defaults["colorspace"] else: - display_and_view = self.oiiotool_defaults["display_and_view"] + display_and_view = oiiotool_defaults["display_and_view"] oiio_default_display = display_and_view["display"] oiio_default_view = display_and_view["view"] @@ -466,11 +495,13 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): return True - def _create_thumbnail_oiio(self, src_path, dst_path): + def _create_thumbnail_oiio(self, src_path, dst_path, thumbnail_def): self.log.debug(f"Extracting thumbnail with OIIO: {dst_path}") try: - resolution_arg = self._get_resolution_arg("oiiotool", src_path) + resolution_arg = self._get_resolution_args( + "oiiotool", src_path, thumbnail_def + ) except RuntimeError: self.log.warning( "Failed to create thumbnail using oiio", exc_info=True @@ -510,9 +541,11 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): ) return False - def _create_thumbnail_ffmpeg(self, src_path, dst_path): + def _create_thumbnail_ffmpeg(self, src_path, dst_path, thumbnail_def): try: - resolution_arg = self._get_resolution_arg("ffmpeg", src_path) + resolution_arg = self._get_resolution_args( + "ffmpeg", src_path, thumbnail_def + ) except RuntimeError: self.log.warning( "Failed to create thumbnail using ffmpeg", exc_info=True @@ -520,7 +553,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): return False ffmpeg_path_args = get_ffmpeg_tool_args("ffmpeg") - ffmpeg_args = self.ffmpeg_args or {} + ffmpeg_args = thumbnail_def.ffmpeg_args or {} jpeg_items = [ subprocess.list2cmdline(ffmpeg_path_args) @@ -560,7 +593,12 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): ) return False - def _create_frame_from_video(self, video_file_path, output_dir): + def _create_frame_from_video( + self, + video_file_path, + output_dir, + thumbnail_def + ): """Convert video file to one frame image via ffmpeg""" # create output file path base_name = os.path.basename(video_file_path) @@ -585,7 +623,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): seek_position = 0.0 # Only use timestamp calculation for videos longer than 0.1 seconds if duration > 0.1: - seek_position = duration * self.duration_split + seek_position = duration * thumbnail_def.duration_split # Build command args cmd_args = [] @@ -659,16 +697,17 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): ): os.remove(output_thumb_file_path) - def _get_resolution_arg( + def _get_resolution_args( self, application, input_path, + thumbnail_def ): # get settings - if self.target_size["type"] == "source": + if thumbnail_def.target_size["type"] == "source": return [] - resize = self.target_size["resize"] + resize = thumbnail_def.target_size["resize"] target_width = resize["width"] target_height = resize["height"] @@ -678,6 +717,43 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): input_path, target_width, target_height, - bg_color=self.background_color, + bg_color=thumbnail_def.background_color, log=self.log ) + + def _get_config_from_profile( + self, + instance: pyblish.api.Instance + ) -> Optional[ThumbnailDef]: + """Returns profile if and how repre should be color transcoded.""" + host_name = instance.context.data["hostName"] + product_type = instance.data["productType"] + product_name = instance.data["productName"] + task_data = instance.data["anatomyData"].get("task", {}) + task_name = task_data.get("name") + task_type = task_data.get("type") + filtering_criteria = { + "host_names": host_name, + "product_types": product_type, + "product_names": product_name, + "task_names": task_name, + "task_types": task_type, + } + profile = filter_profiles( + self.profiles, + filtering_criteria, + logger=self.log + ) + + if not profile: + self.log.debug( + "Skipped instance. None of profiles in presets are for" + f' Host: "{host_name}"' + f' | Product types: "{product_type}"' + f' | Product names: "{product_name}"' + f' | Task name "{task_name}"' + f' | Task type "{task_type}"' + ) + return None + + return ThumbnailDef.from_dict(profile) diff --git a/client/ayon_core/plugins/publish/integrate.py b/client/ayon_core/plugins/publish/integrate.py index 9f24b35754..5afdcbdf07 100644 --- a/client/ayon_core/plugins/publish/integrate.py +++ b/client/ayon_core/plugins/publish/integrate.py @@ -924,8 +924,12 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # Include optional data if present in optionals = [ - "frameStart", "frameEnd", "step", - "handleEnd", "handleStart", "sourceHashes" + "frameStart", "frameEnd", + "handleEnd", "handleStart", + "step", + "resolutionWidth", "resolutionHeight", + "pixelAspect", + "sourceHashes" ] for key in optionals: if key in instance.data: diff --git a/package.py b/package.py index 003c41f0f5..857b3f6906 100644 --- a/package.py +++ b/package.py @@ -12,6 +12,7 @@ ayon_server_version = ">=1.8.4,<2.0.0" ayon_launcher_version = ">=1.0.2" ayon_required_addons = {} ayon_compatible_addons = { + "ayon_third_party": ">=1.3.0", "ayon_ocio": ">=1.2.1", "applications": ">=1.1.2", "harmony": ">0.4.0", diff --git a/pyproject.toml b/pyproject.toml index b06f812b27..562bb72035 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,7 +37,7 @@ opentimelineio = "^0.17.0" speedcopy = "^2.1" qtpy="^2.4.3" pyside6 = "^6.5.2" -pytest-ayon = { git = "https://github.com/ynput/pytest-ayon.git", branch = "chore/align-dependencies" } +pytest-ayon = { git = "https://github.com/ynput/pytest-ayon.git", branch = "develop" } [tool.codespell] # Ignore words that are not in the dictionary. diff --git a/server/settings/conversion.py b/server/settings/conversion.py index 757818a9ff..9da765e366 100644 --- a/server/settings/conversion.py +++ b/server/settings/conversion.py @@ -158,6 +158,46 @@ def _convert_publish_plugins(overrides): _convert_oiio_transcode_0_4_5(overrides["publish"]) +def _convert_extract_thumbnail(overrides): + """ExtractThumbnail config settings did change to profiles.""" + extract_thumbnail_overrides = ( + overrides.get("publish", {}).get("ExtractThumbnail") + ) + if extract_thumbnail_overrides is None: + return + + base_value = { + "product_types": [], + "host_names": [], + "task_types": [], + "task_names": [], + "product_names": [], + "integrate_thumbnail": True, + "target_size": {"type": "source"}, + "duration_split": 0.5, + "oiiotool_defaults": { + "type": "colorspace", + "colorspace": "color_picking", + }, + "ffmpeg_args": {"input": ["-apply_trc gamma22"], "output": []}, + } + for key in ( + "product_names", + "integrate_thumbnail", + "target_size", + "duration_split", + "oiiotool_defaults", + "ffmpeg_args", + ): + if key in extract_thumbnail_overrides: + base_value[key] = extract_thumbnail_overrides.pop(key) + + extract_thumbnail_profiles = extract_thumbnail_overrides.setdefault( + "profiles", [] + ) + extract_thumbnail_profiles.append(base_value) + + def convert_settings_overrides( source_version: str, overrides: dict[str, Any], @@ -166,4 +206,5 @@ def convert_settings_overrides( _convert_imageio_configs_0_4_5(overrides) _convert_imageio_configs_1_6_5(overrides) _convert_publish_plugins(overrides) + _convert_extract_thumbnail(overrides) return overrides diff --git a/server/settings/publish_plugins.py b/server/settings/publish_plugins.py index ffd25079d1..da22e4206f 100644 --- a/server/settings/publish_plugins.py +++ b/server/settings/publish_plugins.py @@ -400,24 +400,30 @@ class ExtractThumbnailOIIODefaultsModel(BaseSettingsModel): ) -class ExtractThumbnailModel(BaseSettingsModel): - _isGroup = True - enabled: bool = SettingsField(True) +class ExtractThumbnailProfileModel(BaseSettingsModel): + product_types: list[str] = SettingsField( + default_factory=list, title="Product types" + ) + host_names: list[str] = SettingsField( + default_factory=list, title="Host names" + ) + task_types: list[str] = SettingsField( + default_factory=list, title="Task types", enum_resolver=task_types_enum + ) + task_names: list[str] = SettingsField( + default_factory=list, title="Task names" + ) product_names: list[str] = SettingsField( - default_factory=list, - title="Product names" + default_factory=list, title="Product names" ) integrate_thumbnail: bool = SettingsField( - True, - title="Integrate Thumbnail Representation" + True, title="Integrate Thumbnail Representation" ) target_size: ResizeModel = SettingsField( - default_factory=ResizeModel, - title="Target size" + default_factory=ResizeModel, title="Target size" ) background_color: ColorRGBA_uint8 = SettingsField( - (0, 0, 0, 0.0), - title="Background color" + (0, 0, 0, 0.0), title="Background color" ) duration_split: float = SettingsField( 0.5, @@ -434,6 +440,15 @@ class ExtractThumbnailModel(BaseSettingsModel): ) +class ExtractThumbnailModel(BaseSettingsModel): + _isGroup = True + enabled: bool = SettingsField(True) + + profiles: list[ExtractThumbnailProfileModel] = SettingsField( + default_factory=list, title="Profiles" + ) + + def _extract_oiio_transcoding_type(): return [ {"value": "colorspace", "label": "Use Colorspace"}, @@ -1480,22 +1495,30 @@ DEFAULT_PUBLISH_VALUES = { }, "ExtractThumbnail": { "enabled": True, - "product_names": [], - "integrate_thumbnail": True, - "target_size": { - "type": "source" - }, - "duration_split": 0.5, - "oiiotool_defaults": { - "type": "colorspace", - "colorspace": "color_picking" - }, - "ffmpeg_args": { - "input": [ - "-apply_trc gamma22" - ], - "output": [] - } + "profiles": [ + { + "product_types": [], + "host_names": [], + "task_types": [], + "task_names": [], + "product_names": [], + "integrate_thumbnail": True, + "target_size": { + "type": "source" + }, + "duration_split": 0.5, + "oiiotool_defaults": { + "type": "colorspace", + "colorspace": "color_picking" + }, + "ffmpeg_args": { + "input": [ + "-apply_trc gamma22" + ], + "output": [] + } + } + ] }, "ExtractThumbnailFromSource": { "enabled": True,