diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 646a2dd1ee..77e1e14479 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -35,6 +35,15 @@ body: label: Version description: What version are you running? Look to AYON Tray options: + - 1.7.0 + - 1.6.13 + - 1.6.12 + - 1.6.11 + - 1.6.10 + - 1.6.9 + - 1.6.8 + - 1.6.7 + - 1.6.6 - 1.6.5 - 1.6.4 - 1.6.3 diff --git a/client/ayon_core/addon/base.py b/client/ayon_core/addon/base.py index 9207bb74c0..a04aedb8cc 100644 --- a/client/ayon_core/addon/base.py +++ b/client/ayon_core/addon/base.py @@ -141,6 +141,9 @@ def _get_ayon_bundle_data() -> tuple[ ]: studio_bundle_name = os.environ.get("AYON_STUDIO_BUNDLE_NAME") project_bundle_name = os.getenv("AYON_BUNDLE_NAME") + # If AYON launcher <1.4.0 was used + if not studio_bundle_name: + studio_bundle_name = project_bundle_name bundles = ayon_api.get_bundles()["bundles"] studio_bundle = next( ( diff --git a/client/ayon_core/addon/interfaces.py b/client/ayon_core/addon/interfaces.py index bf08ccd48c..0a17ec9fb9 100644 --- a/client/ayon_core/addon/interfaces.py +++ b/client/ayon_core/addon/interfaces.py @@ -185,6 +185,20 @@ class IPluginPaths(AYONInterface): """ return self._get_plugin_paths_by_type("inventory") + def get_loader_action_plugin_paths( + self, host_name: Optional[str] + ) -> list[str]: + """Receive loader action plugin paths. + + Args: + host_name (Optional[str]): Current host name. + + Returns: + list[str]: Paths to loader action plugins. + + """ + return [] + class ITrayAddon(AYONInterface): """Addon has special procedures when used in Tray tool. diff --git a/client/ayon_core/cli.py b/client/ayon_core/cli.py index 85c254e7eb..4135aa2e31 100644 --- a/client/ayon_core/cli.py +++ b/client/ayon_core/cli.py @@ -6,7 +6,6 @@ import logging import code import traceback from pathlib import Path -import warnings import click @@ -90,54 +89,6 @@ def addon(ctx): pass -@main_cli.command() -@click.pass_context -@click.argument("output_json_path") -@click.option("--project", help="Project name", default=None) -@click.option("--asset", help="Folder path", default=None) -@click.option("--task", help="Task name", default=None) -@click.option("--app", help="Application name", default=None) -@click.option( - "--envgroup", help="Environment group (e.g. \"farm\")", default=None -) -def extractenvironments( - ctx, output_json_path, project, asset, task, app, envgroup -): - """Extract environment variables for entered context to a json file. - - Entered output filepath will be created if does not exists. - - All context options must be passed otherwise only AYON's global - environments will be extracted. - - Context options are "project", "asset", "task", "app" - - Deprecated: - This function is deprecated and will be removed in future. Please use - 'addon applications extractenvironments ...' instead. - """ - warnings.warn( - ( - "Command 'extractenvironments' is deprecated and will be" - " removed in future. Please use" - " 'addon applications extractenvironments ...' instead." - ), - DeprecationWarning - ) - - addons_manager = ctx.obj["addons_manager"] - applications_addon = addons_manager.get_enabled_addon("applications") - if applications_addon is None: - raise RuntimeError( - "Applications addon is not available or enabled." - ) - - # Please ignore the fact this is using private method - applications_addon._cli_extract_environments( - output_json_path, project, asset, task, app, envgroup - ) - - @main_cli.command() @click.pass_context @click.argument("path", required=True) diff --git a/client/ayon_core/host/constants.py b/client/ayon_core/host/constants.py index 2564c5d54d..1ca33728d8 100644 --- a/client/ayon_core/host/constants.py +++ b/client/ayon_core/host/constants.py @@ -1,11 +1,4 @@ -from enum import Enum - - -class StrEnum(str, Enum): - """A string-based Enum class that allows for string comparison.""" - - def __str__(self) -> str: - return self.value +from ayon_core.lib import StrEnum class ContextChangeReason(StrEnum): diff --git a/client/ayon_core/host/host.py b/client/ayon_core/host/host.py index 7d6d3ddbe4..b52506c0b8 100644 --- a/client/ayon_core/host/host.py +++ b/client/ayon_core/host/host.py @@ -137,7 +137,7 @@ class HostBase(AbstractHost): def get_current_folder_path(self) -> Optional[str]: """ Returns: - Optional[str]: Current asset name. + Optional[str]: Current folder path. """ return os.environ.get("AYON_FOLDER_PATH") diff --git a/client/ayon_core/lib/__init__.py b/client/ayon_core/lib/__init__.py index 5ccc8d03e5..7627c67f06 100644 --- a/client/ayon_core/lib/__init__.py +++ b/client/ayon_core/lib/__init__.py @@ -2,6 +2,7 @@ # flake8: noqa E402 """AYON lib functions.""" +from ._compatibility import StrEnum from .local_settings import ( IniSettingRegistry, JSONSettingRegistry, @@ -11,6 +12,7 @@ from .local_settings import ( get_launcher_storage_dir, get_addons_resources_dir, get_local_site_id, + get_ayon_user_entity, get_ayon_username, ) from .ayon_connection import initialize_ayon_connection @@ -73,6 +75,7 @@ from .log import ( ) from .path_templates import ( + DefaultKeysDict, TemplateUnsolved, StringTemplate, FormatObject, @@ -140,6 +143,8 @@ from .ayon_info import ( terminal = Terminal __all__ = [ + "StrEnum", + "IniSettingRegistry", "JSONSettingRegistry", "AYONSecureRegistry", @@ -148,6 +153,7 @@ __all__ = [ "get_launcher_storage_dir", "get_addons_resources_dir", "get_local_site_id", + "get_ayon_user_entity", "get_ayon_username", "initialize_ayon_connection", @@ -228,6 +234,7 @@ __all__ = [ "get_version_from_path", "get_last_version_from_path", + "DefaultKeysDict", "TemplateUnsolved", "StringTemplate", "FormatObject", diff --git a/client/ayon_core/lib/_compatibility.py b/client/ayon_core/lib/_compatibility.py new file mode 100644 index 0000000000..299ed5e233 --- /dev/null +++ b/client/ayon_core/lib/_compatibility.py @@ -0,0 +1,8 @@ +from enum import Enum + + +class StrEnum(str, Enum): + """A string-based Enum class that allows for string comparison.""" + + def __str__(self) -> str: + return self.value diff --git a/client/ayon_core/lib/attribute_definitions.py b/client/ayon_core/lib/attribute_definitions.py index cb74fea0f1..36c6429f5e 100644 --- a/client/ayon_core/lib/attribute_definitions.py +++ b/client/ayon_core/lib/attribute_definitions.py @@ -604,7 +604,11 @@ class EnumDef(AbstractAttrDef): if value is None: return copy.deepcopy(self.default) - return list(self._item_values.intersection(value)) + return [ + v + for v in value + if v in self._item_values + ] def is_value_valid(self, value: Any) -> bool: """Check if item is available in possible values.""" diff --git a/client/ayon_core/lib/local_settings.py b/client/ayon_core/lib/local_settings.py index 1edfc3c1b6..8a17b7af38 100644 --- a/client/ayon_core/lib/local_settings.py +++ b/client/ayon_core/lib/local_settings.py @@ -5,6 +5,7 @@ import json import platform import configparser import warnings +import copy from datetime import datetime from abc import ABC, abstractmethod from functools import lru_cache @@ -13,6 +14,8 @@ from typing import Optional, Any import platformdirs import ayon_api +from .cache import NestedCacheItem, CacheItem + _PLACEHOLDER = object() @@ -23,6 +26,7 @@ class RegistryItemNotFound(ValueError): class _Cache: username = None + user_entities_by_name = NestedCacheItem() def _get_ayon_appdirs(*args: str) -> str: @@ -569,6 +573,68 @@ def get_local_site_id(): return site_id +def _get_ayon_service_username() -> Optional[str]: + # TODO @iLLiCiTiT - do not use private attribute of 'ServerAPI', rather + # use public method to get username from connection stack. + con = ayon_api.get_server_api_connection() + user_stack = getattr(con, "_as_user_stack", None) + if user_stack is None: + return None + return user_stack.username + + +def get_ayon_user_entity(username: Optional[str] = None) -> dict[str, Any]: + """AYON user entity used for templates and publishing. + + Note: + Usually only service and admin users can receive the full user entity. + + Args: + username (Optional[str]): Username of the user. If not passed, then + the current user in 'ayon_api' is used. + + Returns: + dict[str, Any]: User entity. + + """ + service_username = _get_ayon_service_username() + # Handle service user handling first + if service_username: + if username is None: + username = service_username + cache: CacheItem = _Cache.user_entities_by_name[username] + if not cache.is_valid: + if username == service_username: + user = ayon_api.get_user() + else: + user = ayon_api.get_user(username) + cache.update_data(user) + return copy.deepcopy(cache.get_data()) + + # Cache current user + current_user = None + if _Cache.username is None: + current_user = ayon_api.get_user() + _Cache.username = current_user["name"] + + if username is None: + username = _Cache.username + + cache: CacheItem = _Cache.user_entities_by_name[username] + if not cache.is_valid: + user = None + if username == _Cache.username: + if current_user is None: + current_user = ayon_api.get_user() + user = current_user + + if user is None: + user = ayon_api.get_user(username) + cache.update_data(user) + + return copy.deepcopy(cache.get_data()) + + def get_ayon_username(): """AYON username used for templates and publishing. @@ -578,20 +644,5 @@ def get_ayon_username(): str: Username. """ - # Look for username in the connection stack - # - this is used when service is working as other user - # (e.g. in background sync) - # TODO @iLLiCiTiT - do not use private attribute of 'ServerAPI', rather - # use public method to get username from connection stack. - con = ayon_api.get_server_api_connection() - user_stack = getattr(con, "_as_user_stack", None) - if user_stack is not None: - username = user_stack.username - if username is not None: - return username - - # Cache the username to avoid multiple API calls - # - it is not expected that user would change - if _Cache.username is None: - _Cache.username = ayon_api.get_user()["name"] - return _Cache.username + user = get_ayon_user_entity() + return user["name"] diff --git a/client/ayon_core/lib/path_templates.py b/client/ayon_core/lib/path_templates.py index c6e9e14eac..aba2f296e3 100644 --- a/client/ayon_core/lib/path_templates.py +++ b/client/ayon_core/lib/path_templates.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import re import copy @@ -5,11 +7,7 @@ import numbers import warnings import platform from string import Formatter -import typing -from typing import List, Dict, Any, Set - -if typing.TYPE_CHECKING: - from typing import Union +from typing import Any, Union, Iterable SUB_DICT_PATTERN = re.compile(r"([^\[\]]+)") OPTIONAL_PATTERN = re.compile(r"(<.*?[^{0]*>)[^0-9]*?") @@ -44,6 +42,54 @@ class TemplateUnsolved(Exception): ) +class DefaultKeysDict(dict): + """Dictionary that supports the default key to use for str conversion. + + Is helpful for changes of a key in a template from string to dictionary + for example '{folder}' -> '{folder[name]}'. + >>> data = DefaultKeysDict( + >>> "name", + >>> {"folder": {"name": "FolderName"}} + >>> ) + >>> print("{folder[name]}".format_map(data)) + FolderName + >>> print("{folder}".format_map(data)) + FolderName + + Args: + default_key (Union[str, Iterable[str]]): Default key to use for str + conversion. Can also expect multiple keys for more nested + dictionary. + + """ + def __init__( + self, default_keys: Union[str, Iterable[str]], *args, **kwargs + ) -> None: + if isinstance(default_keys, str): + default_keys = [default_keys] + else: + default_keys = list(default_keys) + if not default_keys: + raise ValueError( + "Default key must be set. Got empty default keys." + ) + + self._default_keys = default_keys + super().__init__(*args, **kwargs) + + def __str__(self) -> str: + return str(self.get_default_value()) + + def get_default_keys(self) -> list[str]: + return list(self._default_keys) + + def get_default_value(self) -> Any: + value = self + for key in self._default_keys: + value = value[key] + return value + + class StringTemplate: """String that can be formatted.""" def __init__(self, template: str): @@ -84,7 +130,7 @@ class StringTemplate: if substr: new_parts.append(substr) - self._parts: List["Union[str, OptionalPart, FormattingPart]"] = ( + self._parts: list[Union[str, OptionalPart, FormattingPart]] = ( self.find_optional_parts(new_parts) ) @@ -105,7 +151,7 @@ class StringTemplate: def template(self) -> str: return self._template - def format(self, data: Dict[str, Any]) -> "TemplateResult": + def format(self, data: dict[str, Any]) -> "TemplateResult": """ Figure out with whole formatting. Separate advanced keys (*Like '{project[name]}') from string which must @@ -145,29 +191,29 @@ class StringTemplate: invalid_types ) - def format_strict(self, data: Dict[str, Any]) -> "TemplateResult": + def format_strict(self, data: dict[str, Any]) -> "TemplateResult": result = self.format(data) result.validate() return result @classmethod def format_template( - cls, template: str, data: Dict[str, Any] + cls, template: str, data: dict[str, Any] ) -> "TemplateResult": objected_template = cls(template) return objected_template.format(data) @classmethod def format_strict_template( - cls, template: str, data: Dict[str, Any] + cls, template: str, data: dict[str, Any] ) -> "TemplateResult": objected_template = cls(template) return objected_template.format_strict(data) @staticmethod def find_optional_parts( - parts: List["Union[str, FormattingPart]"] - ) -> List["Union[str, OptionalPart, FormattingPart]"]: + parts: list[Union[str, FormattingPart]] + ) -> list[Union[str, OptionalPart, FormattingPart]]: new_parts = [] tmp_parts = {} counted_symb = -1 @@ -192,7 +238,7 @@ class StringTemplate: len(parts) == 1 and isinstance(parts[0], str) ): - value = "<{}>".format(parts[0]) + value = f"<{parts[0]}>" else: value = OptionalPart(parts) @@ -223,7 +269,7 @@ class TemplateResult(str): only used keys. solved (bool): For check if all required keys were filled. template (str): Original template. - missing_keys (Iterable[str]): Missing keys that were not in the data. + missing_keys (list[str]): Missing keys that were not in the data. Include missing optional keys. invalid_types (dict): When key was found in data, but value had not allowed DataType. Allowed data types are `numbers`, @@ -232,11 +278,11 @@ class TemplateResult(str): of number. """ - used_values: Dict[str, Any] = None + used_values: dict[str, Any] = None solved: bool = None template: str = None - missing_keys: List[str] = None - invalid_types: Dict[str, Any] = None + missing_keys: list[str] = None + invalid_types: dict[str, Any] = None def __new__( cls, filled_template, template, solved, @@ -296,21 +342,21 @@ class TemplatePartResult: """Result to store result of template parts.""" def __init__(self, optional: bool = False): # Missing keys or invalid value types of required keys - self._missing_keys: Set[str] = set() - self._invalid_types: Dict[str, Any] = {} + self._missing_keys: set[str] = set() + self._invalid_types: dict[str, Any] = {} # Missing keys or invalid value types of optional keys - self._missing_optional_keys: Set[str] = set() - self._invalid_optional_types: Dict[str, Any] = {} + self._missing_optional_keys: set[str] = set() + self._invalid_optional_types: dict[str, Any] = {} # Used values stored by key with origin type # - key without any padding or key modifiers # - value from filling data # Example: {"version": 1} - self._used_values: Dict[str, Any] = {} + self._used_values: dict[str, Any] = {} # Used values stored by key with all modifirs # - value is already formatted string # Example: {"version:0>3": "001"} - self._really_used_values: Dict[str, Any] = {} + self._really_used_values: dict[str, Any] = {} # Concatenated string output after formatting self._output: str = "" # Is this result from optional part @@ -336,8 +382,9 @@ class TemplatePartResult: self._really_used_values.update(other.really_used_values) else: - raise TypeError("Cannot add data from \"{}\" to \"{}\"".format( - str(type(other)), self.__class__.__name__) + raise TypeError( + f"Cannot add data from \"{type(other)}\"" + f" to \"{self.__class__.__name__}\"" ) @property @@ -362,40 +409,41 @@ class TemplatePartResult: return self._output @property - def missing_keys(self) -> Set[str]: + def missing_keys(self) -> set[str]: return self._missing_keys @property - def missing_optional_keys(self) -> Set[str]: + def missing_optional_keys(self) -> set[str]: return self._missing_optional_keys @property - def invalid_types(self) -> Dict[str, Any]: + def invalid_types(self) -> dict[str, Any]: return self._invalid_types @property - def invalid_optional_types(self) -> Dict[str, Any]: + def invalid_optional_types(self) -> dict[str, Any]: return self._invalid_optional_types @property - def really_used_values(self) -> Dict[str, Any]: + def really_used_values(self) -> dict[str, Any]: return self._really_used_values @property - def realy_used_values(self) -> Dict[str, Any]: + def realy_used_values(self) -> dict[str, Any]: warnings.warn( "Property 'realy_used_values' is deprecated." " Use 'really_used_values' instead.", - DeprecationWarning + DeprecationWarning, + stacklevel=2, ) return self._really_used_values @property - def used_values(self) -> Dict[str, Any]: + def used_values(self) -> dict[str, Any]: return self._used_values @staticmethod - def split_keys_to_subdicts(values: Dict[str, Any]) -> Dict[str, Any]: + def split_keys_to_subdicts(values: dict[str, Any]) -> dict[str, Any]: output = {} formatter = Formatter() for key, value in values.items(): @@ -410,7 +458,7 @@ class TemplatePartResult: data[last_key] = value return output - def get_clean_used_values(self) -> Dict[str, Any]: + def get_clean_used_values(self) -> dict[str, Any]: new_used_values = {} for key, value in self.used_values.items(): if isinstance(value, FormatObject): @@ -426,7 +474,8 @@ class TemplatePartResult: warnings.warn( "Method 'add_realy_used_value' is deprecated." " Use 'add_really_used_value' instead.", - DeprecationWarning + DeprecationWarning, + stacklevel=2, ) self.add_really_used_value(key, value) @@ -479,7 +528,7 @@ class FormattingPart: self, field_name: str, format_spec: str, - conversion: "Union[str, None]", + conversion: Union[str, None], ): format_spec_v = "" if format_spec: @@ -546,7 +595,7 @@ class FormattingPart: return not queue @staticmethod - def keys_to_template_base(keys: List[str]): + def keys_to_template_base(keys: list[str]): if not keys: return None # Create copy of keys @@ -556,7 +605,7 @@ class FormattingPart: return f"{template_base}{joined_keys}" def format( - self, data: Dict[str, Any], result: TemplatePartResult + self, data: dict[str, Any], result: TemplatePartResult ) -> TemplatePartResult: """Format the formattings string. @@ -635,6 +684,12 @@ class FormattingPart: result.add_output(self.template) return result + if isinstance(value, DefaultKeysDict): + try: + value = value.get_default_value() + except KeyError: + pass + if not self.validate_value_type(value): result.add_invalid_type(key, value) result.add_output(self.template) @@ -687,23 +742,25 @@ class OptionalPart: def __init__( self, - parts: List["Union[str, OptionalPart, FormattingPart]"] + parts: list[Union[str, OptionalPart, FormattingPart]] ): - self._parts: List["Union[str, OptionalPart, FormattingPart]"] = parts + self._parts: list[Union[str, OptionalPart, FormattingPart]] = parts @property - def parts(self) -> List["Union[str, OptionalPart, FormattingPart]"]: + def parts(self) -> list[Union[str, OptionalPart, FormattingPart]]: return self._parts def __str__(self) -> str: - return "<{}>".format("".join([str(p) for p in self._parts])) + joined_parts = "".join([str(p) for p in self._parts]) + return f"<{joined_parts}>" def __repr__(self) -> str: - return "".format("".join([str(p) for p in self._parts])) + joined_parts = "".join([str(p) for p in self._parts]) + return f"" def format( self, - data: Dict[str, Any], + data: dict[str, Any], result: TemplatePartResult, ) -> TemplatePartResult: new_result = TemplatePartResult(True) diff --git a/client/ayon_core/lib/transcoding.py b/client/ayon_core/lib/transcoding.py index 127bd3bac4..8e9ed90d1a 100644 --- a/client/ayon_core/lib/transcoding.py +++ b/client/ayon_core/lib/transcoding.py @@ -1,3 +1,4 @@ +from __future__ import annotations import os import re import logging @@ -12,6 +13,8 @@ from typing import Optional import xml.etree.ElementTree +import clique + from .execute import run_subprocess from .vendor_bin_utils import ( get_ffmpeg_tool_args, @@ -110,6 +113,15 @@ def deprecated(new_destination): return _decorator(func) +class MissingRGBAChannelsError(ValueError): + """Raised when we can't find channels to use as RGBA for conversion in + input media. + + This may be other channels than solely RGBA, like Z-channel. The error is + raised when no matching 'reviewable' channel was found. + """ + + def get_transcode_temp_directory(): """Creates temporary folder for transcoding. @@ -122,16 +134,29 @@ def get_transcode_temp_directory(): ) -def get_oiio_info_for_input(filepath, logger=None, subimages=False): +def get_oiio_info_for_input( + filepath: str, + *, + subimages: bool = False, + verbose: bool = True, + logger: logging.Logger = None, +): """Call oiiotool to get information about input and return stdout. + Args: + filepath (str): Path to file. + subimages (bool): include info about subimages in the output. + verbose (bool): get the full metadata about each input image. + logger (logging.Logger): Logger used for logging. + Stdout should contain xml format string. """ args = get_oiio_tool_args( "oiiotool", "--info", - "-v" ) + if verbose: + args.append("-v") if subimages: args.append("-a") @@ -388,6 +413,10 @@ def get_review_info_by_layer_name(channel_names): ... ] + This tries to find suitable outputs good for review purposes, by + searching for channel names like RGBA, but also XYZ, Z, N, AR, AG, AB + channels. + Args: channel_names (list[str]): List of channel names. @@ -396,7 +425,6 @@ def get_review_info_by_layer_name(channel_names): """ layer_names_order = [] - rgba_by_layer_name = collections.defaultdict(dict) channels_by_layer_name = collections.defaultdict(dict) for channel_name in channel_names: @@ -405,45 +433,95 @@ def get_review_info_by_layer_name(channel_names): if "." in channel_name: layer_name, last_part = channel_name.rsplit(".", 1) - channels_by_layer_name[layer_name][channel_name] = last_part - if last_part.lower() not in { - "r", "red", - "g", "green", - "b", "blue", - "a", "alpha" + # R, G, B, A or X, Y, Z, N, AR, AG, AB, RED, GREEN, BLUE, ALPHA + channel = last_part.upper() + if channel not in { + # Detect RGBA channels + "R", "G", "B", "A", + # Support fully written out rgba channel names + "RED", "GREEN", "BLUE", "ALPHA", + # Allow detecting of x, y and z channels, and normal channels + "X", "Y", "Z", "N", + # red, green and blue alpha/opacity, for colored mattes + "AR", "AG", "AB" }: continue if layer_name not in layer_names_order: layer_names_order.append(layer_name) - # R, G, B or A - channel = last_part[0].upper() - rgba_by_layer_name[layer_name][channel] = channel_name + + channels_by_layer_name[layer_name][channel] = channel_name # Put empty layer or 'rgba' to the beginning of the list # - if input has R, G, B, A channels they should be used for review - # NOTE They are iterated in reversed order because they're inserted to - # the beginning of 'layer_names_order' -> last added will be first. - for name in reversed(["", "rgba"]): - if name in layer_names_order: - layer_names_order.remove(name) - layer_names_order.insert(0, name) + def _sort(_layer_name: str) -> int: + # Prioritize "" layer name + # Prioritize layers with RGB channels + if _layer_name == "rgba": + return 0 + + if _layer_name == "": + return 1 + + channels = channels_by_layer_name[_layer_name] + if all(channel in channels for channel in "RGB"): + return 2 + return 10 + layer_names_order.sort(key=_sort) output = [] for layer_name in layer_names_order: - rgba_layer_info = rgba_by_layer_name[layer_name] - red = rgba_layer_info.get("R") - green = rgba_layer_info.get("G") - blue = rgba_layer_info.get("B") - if not red or not green or not blue: + channel_info = channels_by_layer_name[layer_name] + + alpha = channel_info.get("A") + + # RGB channels + if all(channel in channel_info for channel in "RGB"): + rgb = "R", "G", "B" + + # RGB channels using fully written out channel names + elif all( + channel in channel_info + for channel in ("RED", "GREEN", "BLUE") + ): + rgb = "RED", "GREEN", "BLUE" + alpha = channel_info.get("ALPHA") + + # XYZ channels (position pass) + elif all(channel in channel_info for channel in "XYZ"): + rgb = "X", "Y", "Z" + + # Colored mattes (as defined in OpenEXR Channel Name standards) + elif all(channel in channel_info for channel in ("AR", "AG", "AB")): + rgb = "AR", "AG", "AB" + + # Luminance channel (as defined in OpenEXR Channel Name standards) + elif "Y" in channel_info: + rgb = "Y", "Y", "Y" + + # Has only Z channel (Z-depth layer) + elif "Z" in channel_info: + rgb = "Z", "Z", "Z" + + # Has only A channel (Alpha layer) + elif "A" in channel_info: + rgb = "A", "A", "A" + alpha = None + + else: + # No reviewable channels found continue + + red = channel_info[rgb[0]] + green = channel_info[rgb[1]] + blue = channel_info[rgb[2]] output.append({ "name": layer_name, "review_channels": { "R": red, "G": green, "B": blue, - "A": rgba_layer_info.get("A"), + "A": alpha, } }) return output @@ -508,7 +586,10 @@ def get_review_layer_name(src_filepath): return None # Load info about file from oiio tool - input_info = get_oiio_info_for_input(src_filepath) + input_info = get_oiio_info_for_input( + src_filepath, + verbose=False, + ) if not input_info: return None @@ -572,6 +653,37 @@ def should_convert_for_ffmpeg(src_filepath): return False +def _get_attributes_to_erase( + input_info: dict, logger: logging.Logger +) -> list[str]: + """FFMPEG does not support some attributes in metadata.""" + erase_attrs: dict[str, str] = {} # Attr name to reason mapping + for attr_name, attr_value in input_info["attribs"].items(): + if not isinstance(attr_value, str): + continue + + # Remove attributes that have string value longer than allowed length + # for ffmpeg or when contain prohibited symbols + if len(attr_value) > MAX_FFMPEG_STRING_LEN: + reason = f"has too long value ({len(attr_value)} chars)." + erase_attrs[attr_name] = reason + continue + + for char in NOT_ALLOWED_FFMPEG_CHARS: + if char not in attr_value: + continue + reason = f"contains unsupported character \"{char}\"." + erase_attrs[attr_name] = reason + break + + for attr_name, reason in erase_attrs.items(): + logger.info( + f"Removed attribute \"{attr_name}\" from metadata" + f" because {reason}." + ) + return list(erase_attrs.keys()) + + def convert_input_paths_for_ffmpeg( input_paths, output_dir, @@ -597,7 +709,7 @@ def convert_input_paths_for_ffmpeg( Raises: ValueError: If input filepath has extension not supported by function. - Currently is supported only ".exr" extension. + Currently, only ".exr" extension is supported. """ if logger is None: logger = logging.getLogger(__name__) @@ -622,7 +734,22 @@ def convert_input_paths_for_ffmpeg( # Collect channels to export input_arg, channels_arg = get_oiio_input_and_channel_args(input_info) - for input_path in input_paths: + # Find which attributes to strip + erase_attributes: list[str] = _get_attributes_to_erase( + input_info, logger=logger + ) + + # clique.PATTERNS["frames"] supports only `.1001.exr` not `_1001.exr` so + # we use a customized pattern. + pattern = "[_.](?P(?P0*)\\d+)\\.\\D+\\d?$" + input_collections, input_remainder = clique.assemble( + input_paths, + patterns=[pattern], + assume_padded_when_ambiguous=True, + ) + input_items = list(input_collections) + input_items.extend(input_remainder) + for input_item in input_items: # Prepare subprocess arguments oiio_cmd = get_oiio_tool_args( "oiiotool", @@ -633,8 +760,23 @@ def convert_input_paths_for_ffmpeg( if compression: oiio_cmd.extend(["--compression", compression]) + # Convert a sequence of files using a single oiiotool command + # using its sequence syntax + if isinstance(input_item, clique.Collection): + frames = input_item.format("{head}#{tail}").replace(" ", "") + oiio_cmd.extend([ + "--framepadding", input_item.padding, + "--frames", frames, + "--parallel-frames" + ]) + input_item: str = input_item.format("{head}#{tail}") + elif not isinstance(input_item, str): + raise TypeError( + f"Input is not a string or Collection: {input_item}" + ) + oiio_cmd.extend([ - input_arg, input_path, + input_arg, input_item, # Tell oiiotool which channels should be put to top stack # (and output) "--ch", channels_arg, @@ -642,38 +784,11 @@ def convert_input_paths_for_ffmpeg( "--subimage", "0" ]) - for attr_name, attr_value in input_info["attribs"].items(): - if not isinstance(attr_value, str): - continue - - # Remove attributes that have string value longer than allowed - # length for ffmpeg or when containing prohibited symbols - erase_reason = "Missing reason" - erase_attribute = False - if len(attr_value) > MAX_FFMPEG_STRING_LEN: - erase_reason = "has too long value ({} chars).".format( - len(attr_value) - ) - erase_attribute = True - - if not erase_attribute: - for char in NOT_ALLOWED_FFMPEG_CHARS: - if char in attr_value: - erase_attribute = True - erase_reason = ( - "contains unsupported character \"{}\"." - ).format(char) - break - - if erase_attribute: - # Set attribute to empty string - logger.info(( - "Removed attribute \"{}\" from metadata because {}." - ).format(attr_name, erase_reason)) - oiio_cmd.extend(["--eraseattrib", attr_name]) + for attr_name in erase_attributes: + oiio_cmd.extend(["--eraseattrib", attr_name]) # Add last argument - path to output - base_filename = os.path.basename(input_path) + base_filename = os.path.basename(input_item) output_path = os.path.join(output_dir, base_filename) oiio_cmd.extend([ "-o", output_path @@ -1074,7 +1189,10 @@ def oiio_color_convert( target_display=None, target_view=None, additional_command_args=None, - logger=None, + frames: Optional[str] = None, + frame_padding: Optional[int] = None, + parallel_frames: bool = False, + logger: Optional[logging.Logger] = None, ): """Transcode source file to other with colormanagement. @@ -1086,7 +1204,7 @@ def oiio_color_convert( input_path (str): Path that should be converted. It is expected that contains single file or image sequence of same type (sequence in format 'file.FRAMESTART-FRAMEEND#.ext', see oiio docs, - eg `big.1-3#.tif`) + eg `big.1-3#.tif` or `big.1-3%d.ext` with `frames` argument) output_path (str): Path to output filename. (must follow format of 'input_path', eg. single file or sequence in 'file.FRAMESTART-FRAMEEND#.ext', `output.1-3#.tif`) @@ -1107,6 +1225,13 @@ def oiio_color_convert( both 'view' and 'display' must be filled (if 'target_colorspace') additional_command_args (list): arguments for oiiotool (like binary depth for .dpx) + frames (Optional[str]): Complex frame range to process. This requires + input path and output path to use frame token placeholder like + `#` or `%d`, e.g. file.#.exr + frame_padding (Optional[int]): Frame padding to use for the input and + output when using a sequence filepath. + parallel_frames (bool): If True, process frames in parallel inside + the `oiiotool` process. Only supported in OIIO 2.5.20.0+. logger (logging.Logger): Logger used for logging. Raises: @@ -1116,7 +1241,20 @@ def oiio_color_convert( if logger is None: logger = logging.getLogger(__name__) - input_info = get_oiio_info_for_input(input_path, logger=logger) + # Get oiioinfo only from first image, otherwise file can't be found + first_input_path = input_path + if frames: + frames: str + first_frame = int(re.split("[ x-]", frames, 1)[0]) + first_frame = str(first_frame).zfill(frame_padding or 0) + for token in ["#", "%d"]: + first_input_path = first_input_path.replace(token, first_frame) + + input_info = get_oiio_info_for_input( + first_input_path, + verbose=False, + logger=logger, + ) # Collect channels to export input_arg, channels_arg = get_oiio_input_and_channel_args(input_info) @@ -1129,6 +1267,22 @@ def oiio_color_convert( "--colorconfig", config_path ) + if frames: + # If `frames` is specified, then process the input and output + # as if it's a sequence of frames (must contain `%04d` as frame + # token placeholder in filepaths) + oiio_cmd.extend([ + "--frames", frames, + ]) + + if frame_padding: + oiio_cmd.extend([ + "--framepadding", str(frame_padding), + ]) + + if parallel_frames: + oiio_cmd.append("--parallel-frames") + oiio_cmd.extend([ input_arg, input_path, # Tell oiiotool which channels should be put to top stack @@ -1170,31 +1324,45 @@ def oiio_color_convert( # Handle the different conversion cases # Source view and display are known if source_view and source_display: + color_convert_args = None + ocio_display_args = None if target_colorspace: # This is a two-step conversion process since there's no direct # display/view to colorspace command # This could be a config parameter or determined from OCIO config - # Use temporarty role space 'scene_linear' + # Use temporary role space 'scene_linear' color_convert_args = ("scene_linear", target_colorspace) elif source_display != target_display or source_view != target_view: # Complete display/view pair conversion # - go through a reference space - color_convert_args = (target_display, target_view) + ocio_display_args = (target_display, target_view) else: - color_convert_args = None logger.debug( "Source and target display/view pairs are identical." " No color conversion needed." ) - if color_convert_args: + if color_convert_args or ocio_display_args: + # Invert source display/view so that we can go from there to the + # target colorspace or display/view oiio_cmd.extend([ "--ociodisplay:inverse=1:subimages=0", source_display, source_view, + ]) + + if color_convert_args: + # Use colorconvert for colorspace target + oiio_cmd.extend([ "--colorconvert:subimages=0", *color_convert_args ]) + elif ocio_display_args: + # Use ociodisplay for display/view target + oiio_cmd.extend([ + "--ociodisplay:subimages=0", + *ocio_display_args + ]) elif target_colorspace: # Standard color space to color space conversion @@ -1219,24 +1387,6 @@ def oiio_color_convert( run_subprocess(oiio_cmd, logger=logger) -def split_cmd_args(in_args): - """Makes sure all entered arguments are separated in individual items. - - Split each argument string with " -" to identify if string contains - one or more arguments. - Args: - in_args (list): of arguments ['-n', '-d uint10'] - Returns - (list): ['-n', '-d', 'unint10'] - """ - splitted_args = [] - for arg in in_args: - if not arg.strip(): - continue - splitted_args.extend(arg.split(" ")) - return splitted_args - - def get_rescaled_command_arguments( application, input_path, @@ -1318,7 +1468,11 @@ def get_rescaled_command_arguments( command_args.extend(["-vf", "{0},{1}".format(scale, pad)]) elif application == "oiiotool": - input_info = get_oiio_info_for_input(input_path, logger=log) + input_info = get_oiio_info_for_input( + input_path, + verbose=False, + logger=log, + ) # Collect channels to export _, channels_arg = get_oiio_input_and_channel_args( input_info, alpha_default=1.0) @@ -1409,7 +1563,11 @@ def _get_image_dimensions(application, input_path, log): # fallback for weird files with width=0, height=0 if (input_width == 0 or input_height == 0) and application == "oiiotool": # Load info about file from oiio tool - input_info = get_oiio_info_for_input(input_path, logger=log) + input_info = get_oiio_info_for_input( + input_path, + verbose=False, + logger=log, + ) if input_info: input_width = int(input_info["width"]) input_height = int(input_info["height"]) @@ -1458,17 +1616,21 @@ def get_oiio_input_and_channel_args(oiio_input_info, alpha_default=None): """Get input and channel arguments for oiiotool. Args: oiio_input_info (dict): Information about input from oiio tool. - Should be output of function `get_oiio_info_for_input`. + Should be output of function 'get_oiio_info_for_input' (can be + called with 'verbose=False'). alpha_default (float, optional): Default value for alpha channel. + Returns: tuple[str, str]: Tuple of input and channel arguments. + """ channel_names = oiio_input_info["channelnames"] review_channels = get_convert_rgb_channels(channel_names) if review_channels is None: - raise ValueError( - "Couldn't find channels that can be used for conversion." + raise MissingRGBAChannelsError( + "Couldn't find channels that can be used for conversion " + f"among channels: {channel_names}." ) red, green, blue, alpha = review_channels @@ -1482,7 +1644,8 @@ def get_oiio_input_and_channel_args(oiio_input_info, alpha_default=None): channels_arg += ",A={}".format(float(alpha_default)) input_channels.append("A") - input_channels_str = ",".join(input_channels) + # Make sure channels are unique, but preserve order to avoid oiiotool crash + input_channels_str = ",".join(list(dict.fromkeys(input_channels))) subimages = oiio_input_info.get("subimages") input_arg = "-i" diff --git a/client/ayon_core/pipeline/actions/__init__.py b/client/ayon_core/pipeline/actions/__init__.py new file mode 100644 index 0000000000..7af3ac1130 --- /dev/null +++ b/client/ayon_core/pipeline/actions/__init__.py @@ -0,0 +1,62 @@ +from .structures import ( + ActionForm, +) +from .utils import ( + webaction_fields_to_attribute_defs, +) +from .loader import ( + LoaderSelectedType, + LoaderActionResult, + LoaderActionItem, + LoaderActionPlugin, + LoaderActionSelection, + LoaderActionsContext, + SelectionEntitiesCache, + LoaderSimpleActionPlugin, +) + +from .launcher import ( + LauncherAction, + LauncherActionSelection, + discover_launcher_actions, + register_launcher_action, + register_launcher_action_path, +) + +from .inventory import ( + InventoryAction, + discover_inventory_actions, + register_inventory_action, + register_inventory_action_path, + + deregister_inventory_action, + deregister_inventory_action_path, +) + + +__all__ = ( + "ActionForm", + "webaction_fields_to_attribute_defs", + + "LoaderSelectedType", + "LoaderActionResult", + "LoaderActionItem", + "LoaderActionPlugin", + "LoaderActionSelection", + "LoaderActionsContext", + "SelectionEntitiesCache", + "LoaderSimpleActionPlugin", + + "LauncherAction", + "LauncherActionSelection", + "discover_launcher_actions", + "register_launcher_action", + "register_launcher_action_path", + + "InventoryAction", + "discover_inventory_actions", + "register_inventory_action", + "register_inventory_action_path", + "deregister_inventory_action", + "deregister_inventory_action_path", +) diff --git a/client/ayon_core/pipeline/actions/inventory.py b/client/ayon_core/pipeline/actions/inventory.py new file mode 100644 index 0000000000..2300119336 --- /dev/null +++ b/client/ayon_core/pipeline/actions/inventory.py @@ -0,0 +1,108 @@ +import logging + +from ayon_core.pipeline.plugin_discover import ( + discover, + register_plugin, + register_plugin_path, + deregister_plugin, + deregister_plugin_path +) +from ayon_core.pipeline.load.utils import get_representation_path_from_context + + +class InventoryAction: + """A custom action for the scene inventory tool + + If registered the action will be visible in the Right Mouse Button menu + under the submenu "Actions". + + """ + + label = None + icon = None + color = None + order = 0 + + log = logging.getLogger("InventoryAction") + log.propagate = True + + @staticmethod + def is_compatible(container): + """Override function in a custom class + + This method is specifically used to ensure the action can operate on + the container. + + Args: + container(dict): the data of a loaded asset, see host.ls() + + Returns: + bool + """ + return bool(container.get("objectName")) + + def process(self, containers): + """Override function in a custom class + + This method will receive all containers even those which are + incompatible. It is advised to create a small filter along the lines + of this example: + + valid_containers = filter(self.is_compatible(c) for c in containers) + + The return value will need to be a True-ish value to trigger + the data_changed signal in order to refresh the view. + + You can return a list of container names to trigger GUI to select + treeview items. + + You can return a dict to carry extra GUI options. For example: + { + "objectNames": [container names...], + "options": {"mode": "toggle", + "clear": False} + } + Currently workable GUI options are: + - clear (bool): Clear current selection before selecting by action. + Default `True`. + - mode (str): selection mode, use one of these: + "select", "deselect", "toggle". Default is "select". + + Args: + containers (list): list of dictionaries + + Return: + bool, list or dict + + """ + return True + + @classmethod + def filepath_from_context(cls, context): + return get_representation_path_from_context(context) + + +def discover_inventory_actions(): + actions = discover(InventoryAction) + filtered_actions = [] + for action in actions: + if action is not InventoryAction: + filtered_actions.append(action) + + return filtered_actions + + +def register_inventory_action(plugin): + return register_plugin(InventoryAction, plugin) + + +def deregister_inventory_action(plugin): + deregister_plugin(InventoryAction, plugin) + + +def register_inventory_action_path(path): + return register_plugin_path(InventoryAction, path) + + +def deregister_inventory_action_path(path): + return deregister_plugin_path(InventoryAction, path) diff --git a/client/ayon_core/pipeline/actions.py b/client/ayon_core/pipeline/actions/launcher.py similarity index 78% rename from client/ayon_core/pipeline/actions.py rename to client/ayon_core/pipeline/actions/launcher.py index 6892af4252..8d4b514393 100644 --- a/client/ayon_core/pipeline/actions.py +++ b/client/ayon_core/pipeline/actions/launcher.py @@ -8,12 +8,8 @@ from ayon_core.pipeline.plugin_discover import ( discover, register_plugin, register_plugin_path, - deregister_plugin, - deregister_plugin_path ) -from .load.utils import get_representation_path_from_context - class LauncherActionSelection: """Object helper to pass selection to actions. @@ -390,79 +386,6 @@ class LauncherAction(object): pass -class InventoryAction(object): - """A custom action for the scene inventory tool - - If registered the action will be visible in the Right Mouse Button menu - under the submenu "Actions". - - """ - - label = None - icon = None - color = None - order = 0 - - log = logging.getLogger("InventoryAction") - log.propagate = True - - @staticmethod - def is_compatible(container): - """Override function in a custom class - - This method is specifically used to ensure the action can operate on - the container. - - Args: - container(dict): the data of a loaded asset, see host.ls() - - Returns: - bool - """ - return bool(container.get("objectName")) - - def process(self, containers): - """Override function in a custom class - - This method will receive all containers even those which are - incompatible. It is advised to create a small filter along the lines - of this example: - - valid_containers = filter(self.is_compatible(c) for c in containers) - - The return value will need to be a True-ish value to trigger - the data_changed signal in order to refresh the view. - - You can return a list of container names to trigger GUI to select - treeview items. - - You can return a dict to carry extra GUI options. For example: - { - "objectNames": [container names...], - "options": {"mode": "toggle", - "clear": False} - } - Currently workable GUI options are: - - clear (bool): Clear current selection before selecting by action. - Default `True`. - - mode (str): selection mode, use one of these: - "select", "deselect", "toggle". Default is "select". - - Args: - containers (list): list of dictionaries - - Return: - bool, list or dict - - """ - return True - - @classmethod - def filepath_from_context(cls, context): - return get_representation_path_from_context(context) - - -# Launcher action def discover_launcher_actions(): return discover(LauncherAction) @@ -473,30 +396,3 @@ def register_launcher_action(plugin): def register_launcher_action_path(path): return register_plugin_path(LauncherAction, path) - - -# Inventory action -def discover_inventory_actions(): - actions = discover(InventoryAction) - filtered_actions = [] - for action in actions: - if action is not InventoryAction: - filtered_actions.append(action) - - return filtered_actions - - -def register_inventory_action(plugin): - return register_plugin(InventoryAction, plugin) - - -def deregister_inventory_action(plugin): - deregister_plugin(InventoryAction, plugin) - - -def register_inventory_action_path(path): - return register_plugin_path(InventoryAction, path) - - -def deregister_inventory_action_path(path): - return deregister_plugin_path(InventoryAction, path) diff --git a/client/ayon_core/pipeline/actions/loader.py b/client/ayon_core/pipeline/actions/loader.py new file mode 100644 index 0000000000..53cc52d39f --- /dev/null +++ b/client/ayon_core/pipeline/actions/loader.py @@ -0,0 +1,882 @@ +"""API for actions for loader tool. + +Even though the api is meant for the loader tool, the api should be possible + to use in a standalone way out of the loader tool. + +To use add actions, make sure your addon does inherit from + 'IPluginPaths' and implements 'get_loader_action_plugin_paths' which + returns paths to python files with loader actions. + +The plugin is used to collect available actions for the given context and to + execute them. Selection is defined with 'LoaderActionSelection' object + that also contains a cache of entities and project anatomy. + +Implementing 'get_action_items' allows the plugin to define what actions + are shown and available for the selection. Because for a single selection + can be shown multiple actions with the same action identifier, the action + items also have 'data' attribute which can be used to store additional + data for the action (they have to be json-serializable). + +The action is triggered by calling the 'execute_action' method. Which takes + the action identifier, the selection, the additional data from the action + item and form values from the form if any. + +Using 'LoaderActionResult' as the output of 'execute_action' can trigger to + show a message in UI or to show an additional form ('ActionForm') + which would retrigger the action with the values from the form on + submitting. That allows handling of multistep actions. + +It is also recommended that the plugin does override the 'identifier' + attribute. The identifier has to be unique across all plugins. + Class name is used by default. + +The selection wrapper currently supports the following types of entity types: + - version + - representation +It is planned to add 'folder' and 'task' selection in the future. + +NOTE: It is possible to trigger 'execute_action' without ever calling + 'get_action_items', that can be handy in automations. + +The whole logic is wrapped into 'LoaderActionsContext'. It takes care of + the discovery of plugins and wraps the collection and execution of + action items. Method 'execute_action' on context also requires plugin + identifier. + +The flow of the logic is (in the loader tool): + 1. User selects entities in the UI. + 2. Right-click the selected entities. + 3. Use 'LoaderActionsContext' to collect items using 'get_action_items'. + 4. Show a menu (with submenus) in the UI. + 5. If a user selects an action, the action is triggered using + 'execute_action'. + 5a. If the action returns 'LoaderActionResult', show a 'message' if it is + filled and show a form dialog if 'form' is filled. + 5b. If the user submitted the form, trigger the action again with the + values from the form and repeat from 5a. + +""" +from __future__ import annotations + +import os +import collections +import copy +import logging +from abc import ABC, abstractmethod +import typing +from typing import Optional, Any, Callable +from dataclasses import dataclass + +import ayon_api + +from ayon_core import AYON_CORE_ROOT +from ayon_core.lib import StrEnum, Logger, is_func_signature_supported +from ayon_core.host import AbstractHost +from ayon_core.addon import AddonsManager, IPluginPaths +from ayon_core.settings import get_studio_settings, get_project_settings +from ayon_core.pipeline import Anatomy +from ayon_core.pipeline.plugin_discover import discover_plugins + +from .structures import ActionForm + +if typing.TYPE_CHECKING: + from typing import Union + + DataBaseType = Union[str, int, float, bool] + DataType = dict[str, Union[DataBaseType, list[DataBaseType]]] + +_PLACEHOLDER = object() + + +class LoaderSelectedType(StrEnum): + """Selected entity type.""" + # folder = "folder" + # task = "task" + version = "version" + representation = "representation" + + +class SelectionEntitiesCache: + """Cache of entities used as helper in the selection wrapper. + + It is possible to get entities based on ids with helper methods to get + entities, their parents or their children's entities. + + The goal is to avoid multiple API calls for the same entity in multiple + action plugins. + + The cache is based on the selected project. Entities are fetched + if are not in cache yet. + """ + def __init__( + self, + project_name: str, + project_entity: Optional[dict[str, Any]] = None, + folders_by_id: Optional[dict[str, dict[str, Any]]] = None, + tasks_by_id: Optional[dict[str, dict[str, Any]]] = None, + products_by_id: Optional[dict[str, dict[str, Any]]] = None, + versions_by_id: Optional[dict[str, dict[str, Any]]] = None, + representations_by_id: Optional[dict[str, dict[str, Any]]] = None, + task_ids_by_folder_id: Optional[dict[str, set[str]]] = None, + product_ids_by_folder_id: Optional[dict[str, set[str]]] = None, + version_ids_by_product_id: Optional[dict[str, set[str]]] = None, + representation_ids_by_version_id: Optional[dict[str, set[str]]] = None, + ): + self._project_name = project_name + self._project_entity = project_entity + self._folders_by_id = folders_by_id or {} + self._tasks_by_id = tasks_by_id or {} + self._products_by_id = products_by_id or {} + self._versions_by_id = versions_by_id or {} + self._representations_by_id = representations_by_id or {} + + self._task_ids_by_folder_id = task_ids_by_folder_id or {} + self._product_ids_by_folder_id = product_ids_by_folder_id or {} + self._version_ids_by_product_id = version_ids_by_product_id or {} + self._representation_ids_by_version_id = ( + representation_ids_by_version_id or {} + ) + + def get_project(self) -> dict[str, Any]: + """Get project entity""" + if self._project_entity is None: + self._project_entity = ayon_api.get_project(self._project_name) + return copy.deepcopy(self._project_entity) + + def get_folders( + self, folder_ids: set[str] + ) -> list[dict[str, Any]]: + return self._get_entities( + folder_ids, + self._folders_by_id, + "folder_ids", + ayon_api.get_folders, + ) + + def get_tasks( + self, task_ids: set[str] + ) -> list[dict[str, Any]]: + return self._get_entities( + task_ids, + self._tasks_by_id, + "task_ids", + ayon_api.get_tasks, + ) + + def get_products( + self, product_ids: set[str] + ) -> list[dict[str, Any]]: + return self._get_entities( + product_ids, + self._products_by_id, + "product_ids", + ayon_api.get_products, + ) + + def get_versions( + self, version_ids: set[str] + ) -> list[dict[str, Any]]: + return self._get_entities( + version_ids, + self._versions_by_id, + "version_ids", + ayon_api.get_versions, + ) + + def get_representations( + self, representation_ids: set[str] + ) -> list[dict[str, Any]]: + return self._get_entities( + representation_ids, + self._representations_by_id, + "representation_ids", + ayon_api.get_representations, + ) + + def get_folders_tasks( + self, folder_ids: set[str] + ) -> list[dict[str, Any]]: + task_ids = self._fill_parent_children_ids( + folder_ids, + "folderId", + "folder_ids", + self._task_ids_by_folder_id, + ayon_api.get_tasks, + ) + return self.get_tasks(task_ids) + + def get_folders_products( + self, folder_ids: set[str] + ) -> list[dict[str, Any]]: + product_ids = self._get_folders_products_ids(folder_ids) + return self.get_products(product_ids) + + def get_tasks_versions( + self, task_ids: set[str] + ) -> list[dict[str, Any]]: + folder_ids = { + task["folderId"] + for task in self.get_tasks(task_ids) + } + product_ids = self._get_folders_products_ids(folder_ids) + output = [] + for version in self.get_products_versions(product_ids): + task_id = version["taskId"] + if task_id in task_ids: + output.append(version) + return output + + def get_products_versions( + self, product_ids: set[str] + ) -> list[dict[str, Any]]: + version_ids = self._fill_parent_children_ids( + product_ids, + "productId", + "product_ids", + self._version_ids_by_product_id, + ayon_api.get_versions, + ) + return self.get_versions(version_ids) + + def get_versions_representations( + self, version_ids: set[str] + ) -> list[dict[str, Any]]: + repre_ids = self._fill_parent_children_ids( + version_ids, + "versionId", + "version_ids", + self._representation_ids_by_version_id, + ayon_api.get_representations, + ) + return self.get_representations(repre_ids) + + def get_tasks_folders(self, task_ids: set[str]) -> list[dict[str, Any]]: + folder_ids = { + task["folderId"] + for task in self.get_tasks(task_ids) + } + return self.get_folders(folder_ids) + + def get_products_folders( + self, product_ids: set[str] + ) -> list[dict[str, Any]]: + folder_ids = { + product["folderId"] + for product in self.get_products(product_ids) + } + return self.get_folders(folder_ids) + + def get_versions_products( + self, version_ids: set[str] + ) -> list[dict[str, Any]]: + product_ids = { + version["productId"] + for version in self.get_versions(version_ids) + } + return self.get_products(product_ids) + + def get_versions_tasks( + self, version_ids: set[str] + ) -> list[dict[str, Any]]: + task_ids = { + version["taskId"] + for version in self.get_versions(version_ids) + if version["taskId"] + } + return self.get_tasks(task_ids) + + def get_representations_versions( + self, representation_ids: set[str] + ) -> list[dict[str, Any]]: + version_ids = { + repre["versionId"] + for repre in self.get_representations(representation_ids) + } + return self.get_versions(version_ids) + + def _get_folders_products_ids(self, folder_ids: set[str]) -> set[str]: + return self._fill_parent_children_ids( + folder_ids, + "folderId", + "folder_ids", + self._product_ids_by_folder_id, + ayon_api.get_products, + ) + + def _fill_parent_children_ids( + self, + entity_ids: set[str], + parent_key: str, + filter_attr: str, + parent_mapping: dict[str, set[str]], + getter: Callable, + ) -> set[str]: + if not entity_ids: + return set() + children_ids = set() + missing_ids = set() + for entity_id in entity_ids: + _children_ids = parent_mapping.get(entity_id) + if _children_ids is None: + missing_ids.add(entity_id) + else: + children_ids.update(_children_ids) + if missing_ids: + entities_by_parent_id = collections.defaultdict(set) + for entity in getter( + self._project_name, + fields={"id", parent_key}, + **{filter_attr: missing_ids}, + ): + child_id = entity["id"] + children_ids.add(child_id) + entities_by_parent_id[entity[parent_key]].add(child_id) + + for entity_id in missing_ids: + parent_mapping[entity_id] = entities_by_parent_id[entity_id] + + return children_ids + + def _get_entities( + self, + entity_ids: set[str], + cache_var: dict[str, Any], + filter_arg: str, + getter: Callable, + ) -> list[dict[str, Any]]: + if not entity_ids: + return [] + + output = [] + missing_ids: set[str] = set() + for entity_id in entity_ids: + entity = cache_var.get(entity_id) + if entity_id not in cache_var: + missing_ids.add(entity_id) + cache_var[entity_id] = None + elif entity: + output.append(entity) + + if missing_ids: + for entity in getter( + self._project_name, + **{filter_arg: missing_ids} + ): + output.append(entity) + cache_var[entity["id"]] = entity + return output + + +class LoaderActionSelection: + """Selection of entities for loader actions. + + Selection tells action plugins what exactly is selected in the tool and + which ids. + + Contains entity cache which can be used to get entities by their ids. Or + to get project settings and anatomy. + + """ + def __init__( + self, + project_name: str, + selected_ids: set[str], + selected_type: LoaderSelectedType, + *, + project_anatomy: Optional[Anatomy] = None, + project_settings: Optional[dict[str, Any]] = None, + entities_cache: Optional[SelectionEntitiesCache] = None, + ): + self._project_name = project_name + self._selected_ids = selected_ids + self._selected_type = selected_type + + self._project_anatomy = project_anatomy + self._project_settings = project_settings + + if entities_cache is None: + entities_cache = SelectionEntitiesCache(project_name) + self._entities_cache = entities_cache + + def get_entities_cache(self) -> SelectionEntitiesCache: + return self._entities_cache + + def get_project_name(self) -> str: + return self._project_name + + def get_selected_ids(self) -> set[str]: + return set(self._selected_ids) + + def get_selected_type(self) -> str: + return self._selected_type + + def get_project_settings(self) -> dict[str, Any]: + if self._project_settings is None: + self._project_settings = get_project_settings(self._project_name) + return copy.deepcopy(self._project_settings) + + def get_project_anatomy(self) -> Anatomy: + if self._project_anatomy is None: + self._project_anatomy = Anatomy( + self._project_name, + project_entity=self.get_entities_cache().get_project(), + ) + return self._project_anatomy + + project_name = property(get_project_name) + selected_ids = property(get_selected_ids) + selected_type = property(get_selected_type) + project_settings = property(get_project_settings) + project_anatomy = property(get_project_anatomy) + entities = property(get_entities_cache) + + # --- Helper methods --- + def versions_selected(self) -> bool: + """Selected entity type is version. + + Returns: + bool: True if selected entity type is version. + + """ + return self._selected_type == LoaderSelectedType.version + + def representations_selected(self) -> bool: + """Selected entity type is representation. + + Returns: + bool: True if selected entity type is representation. + + """ + return self._selected_type == LoaderSelectedType.representation + + def get_selected_version_entities(self) -> list[dict[str, Any]]: + """Retrieve selected version entities. + + An empty list is returned if 'version' is not the selected + entity type. + + Returns: + list[dict[str, Any]]: List of selected version entities. + + """ + if self.versions_selected(): + return self.entities.get_versions(self.selected_ids) + return [] + + def get_selected_representation_entities(self) -> list[dict[str, Any]]: + """Retrieve selected representation entities. + + An empty list is returned if 'representation' is not the selected + entity type. + + Returns: + list[dict[str, Any]]: List of selected representation entities. + + """ + if self.representations_selected(): + return self.entities.get_representations(self.selected_ids) + return [] + + +@dataclass +class LoaderActionItem: + """Item of loader action. + + Action plugins return these items as possible actions to run for a given + context. + + Because the action item can be related to a specific entity + and not the whole selection, they also have to define the entity type + and ids to be executed on. + + Attributes: + label (str): Text shown in UI. + order (int): Order of the action in UI. + group_label (Optional[str]): Label of the group to which the action + belongs. + icon (Optional[dict[str, Any]): Icon definition. + data (Optional[DataType]): Action item data. + identifier (Optional[str]): Identifier of the plugin which + created the action item. Is filled automatically. Is not changed + if is filled -> can lead to different plugin. + + """ + label: str + order: int = 0 + group_label: Optional[str] = None + icon: Optional[dict[str, Any]] = None + data: Optional[DataType] = None + # Is filled automatically + identifier: str = None + + +@dataclass +class LoaderActionResult: + """Result of loader action execution. + + Attributes: + message (Optional[str]): Message to show in UI. + success (bool): If the action was successful. Affects color of + the message. + form (Optional[ActionForm]): Form to show in UI. + form_values (Optional[dict[str, Any]]): Values for the form. Can be + used if the same form is re-shown e.g. because a user forgot to + fill a required field. + + """ + message: Optional[str] = None + success: bool = True + form: Optional[ActionForm] = None + form_values: Optional[dict[str, Any]] = None + + def to_json_data(self) -> dict[str, Any]: + form = self.form + if form is not None: + form = form.to_json_data() + return { + "message": self.message, + "success": self.success, + "form": form, + "form_values": self.form_values, + } + + @classmethod + def from_json_data(cls, data: dict[str, Any]) -> "LoaderActionResult": + form = data["form"] + if form is not None: + data["form"] = ActionForm.from_json_data(form) + return LoaderActionResult(**data) + + +class LoaderActionPlugin(ABC): + """Plugin for loader actions. + + Plugin is responsible for getting action items and executing actions. + + """ + _log: Optional[logging.Logger] = None + enabled: bool = True + + def __init__(self, context: "LoaderActionsContext") -> None: + self._context = context + self.apply_settings(context.get_studio_settings()) + + def apply_settings(self, studio_settings: dict[str, Any]) -> None: + """Apply studio settings to the plugin. + + Args: + studio_settings (dict[str, Any]): Studio settings. + + """ + pass + + @property + def log(self) -> logging.Logger: + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + @property + def identifier(self) -> str: + """Identifier of the plugin. + + Returns: + str: Plugin identifier. + + """ + return self.__class__.__name__ + + @property + def host_name(self) -> Optional[str]: + """Name of the current host.""" + return self._context.get_host_name() + + @abstractmethod + def get_action_items( + self, selection: LoaderActionSelection + ) -> list[LoaderActionItem]: + """Action items for the selection. + + Args: + selection (LoaderActionSelection): Selection. + + Returns: + list[LoaderActionItem]: Action items. + + """ + pass + + @abstractmethod + def execute_action( + self, + selection: LoaderActionSelection, + data: Optional[DataType], + form_values: dict[str, Any], + ) -> Optional[LoaderActionResult]: + """Execute an action. + + Args: + selection (LoaderActionSelection): Selection wrapper. Can be used + to get entities or get context of original selection. + data (Optional[DataType]): Additional action item data. + form_values (dict[str, Any]): Attribute values. + + Returns: + Optional[LoaderActionResult]: Result of the action execution. + + """ + pass + + +class LoaderActionsContext: + """Wrapper for loader actions and their logic. + + Takes care about the public api of loader actions and internal logic like + discovery and initialization of plugins. + + """ + def __init__( + self, + studio_settings: Optional[dict[str, Any]] = None, + addons_manager: Optional[AddonsManager] = None, + host: Optional[AbstractHost] = _PLACEHOLDER, + ) -> None: + self._log = Logger.get_logger(self.__class__.__name__) + + self._addons_manager = addons_manager + self._host = host + + # Attributes that are re-cached on reset + self._studio_settings = studio_settings + self._plugins = None + + def reset( + self, studio_settings: Optional[dict[str, Any]] = None + ) -> None: + """Reset context cache. + + Reset plugins and studio settings to reload them. + + Notes: + Does not reset the cache of AddonsManger because there should not + be a reason to do so. + + """ + self._studio_settings = studio_settings + self._plugins = None + + def get_addons_manager(self) -> AddonsManager: + if self._addons_manager is None: + self._addons_manager = AddonsManager( + settings=self.get_studio_settings() + ) + return self._addons_manager + + def get_host(self) -> Optional[AbstractHost]: + """Get current host integration. + + Returns: + Optional[AbstractHost]: Host integration. Can be None if host + integration is not registered -> probably not used in the + host integration process. + + """ + if self._host is _PLACEHOLDER: + from ayon_core.pipeline import registered_host + + self._host = registered_host() + return self._host + + def get_host_name(self) -> Optional[str]: + host = self.get_host() + if host is None: + return None + return host.name + + def get_studio_settings(self) -> dict[str, Any]: + if self._studio_settings is None: + self._studio_settings = get_studio_settings() + return copy.deepcopy(self._studio_settings) + + def get_action_items( + self, selection: LoaderActionSelection + ) -> list[LoaderActionItem]: + """Collect action items from all plugins for given selection. + + Args: + selection (LoaderActionSelection): Selection wrapper. + + """ + output = [] + for plugin_id, plugin in self._get_plugins().items(): + try: + for action_item in plugin.get_action_items(selection): + if action_item.identifier is None: + action_item.identifier = plugin_id + output.append(action_item) + + except Exception: + self._log.warning( + "Failed to get action items for" + f" plugin '{plugin.identifier}'", + exc_info=True, + ) + return output + + def execute_action( + self, + identifier: str, + selection: LoaderActionSelection, + data: Optional[DataType], + form_values: dict[str, Any], + ) -> Optional[LoaderActionResult]: + """Trigger action execution. + + Args: + identifier (str): Identifier of the plugin. + selection (LoaderActionSelection): Selection wrapper. Can be used + to get what is selected in UI and to get access to entity + cache. + data (Optional[DataType]): Additional action item data. + form_values (dict[str, Any]): Form values related to action. + Usually filled if action returned response with form. + + """ + plugins_by_id = self._get_plugins() + plugin = plugins_by_id[identifier] + return plugin.execute_action( + selection, + data, + form_values, + ) + + def _get_plugins(self) -> dict[str, LoaderActionPlugin]: + if self._plugins is None: + host_name = self.get_host_name() + addons_manager = self.get_addons_manager() + all_paths = [ + os.path.join(AYON_CORE_ROOT, "plugins", "loader") + ] + for addon in addons_manager.addons: + if not isinstance(addon, IPluginPaths): + continue + + try: + if is_func_signature_supported( + addon.get_loader_action_plugin_paths, + host_name + ): + paths = addon.get_loader_action_plugin_paths( + host_name + ) + else: + paths = addon.get_loader_action_plugin_paths() + except Exception: + self._log.warning( + "Failed to get plugin paths for addon", + exc_info=True + ) + continue + + if paths: + all_paths.extend(paths) + + result = discover_plugins(LoaderActionPlugin, all_paths) + result.log_report() + plugins = {} + for cls in result.plugins: + try: + plugin = cls(self) + if not plugin.enabled: + continue + + plugin_id = plugin.identifier + if plugin_id not in plugins: + plugins[plugin_id] = plugin + continue + + self._log.warning( + f"Duplicated plugins identifier found '{plugin_id}'." + ) + + except Exception: + self._log.warning( + f"Failed to initialize plugin '{cls.__name__}'", + exc_info=True, + ) + self._plugins = plugins + return self._plugins + + +class LoaderSimpleActionPlugin(LoaderActionPlugin): + """Simple action plugin. + + This action will show exactly one action item defined by attributes + on the class. + + Attributes: + label: Label of the action item. + order: Order of the action item. + group_label: Label of the group to which the action belongs. + icon: Icon definition shown next to label. + + """ + + label: Optional[str] = None + order: int = 0 + group_label: Optional[str] = None + icon: Optional[dict[str, Any]] = None + + @abstractmethod + def is_compatible(self, selection: LoaderActionSelection) -> bool: + """Check if plugin is compatible with selection. + + Args: + selection (LoaderActionSelection): Selection information. + + Returns: + bool: True if plugin is compatible with selection. + + """ + pass + + @abstractmethod + def execute_simple_action( + self, + selection: LoaderActionSelection, + form_values: dict[str, Any], + ) -> Optional[LoaderActionResult]: + """Process action based on selection. + + Args: + selection (LoaderActionSelection): Selection information. + form_values (dict[str, Any]): Values from a form if there are any. + + Returns: + Optional[LoaderActionResult]: Result of the action. + + """ + pass + + def get_action_items( + self, selection: LoaderActionSelection + ) -> list[LoaderActionItem]: + if self.is_compatible(selection): + label = self.label or self.__class__.__name__ + return [ + LoaderActionItem( + label=label, + order=self.order, + group_label=self.group_label, + icon=self.icon, + ) + ] + return [] + + def execute_action( + self, + selection: LoaderActionSelection, + data: Optional[DataType], + form_values: dict[str, Any], + ) -> Optional[LoaderActionResult]: + return self.execute_simple_action(selection, form_values) diff --git a/client/ayon_core/pipeline/actions/structures.py b/client/ayon_core/pipeline/actions/structures.py new file mode 100644 index 0000000000..0283a7a272 --- /dev/null +++ b/client/ayon_core/pipeline/actions/structures.py @@ -0,0 +1,60 @@ +from dataclasses import dataclass +from typing import Optional, Any + +from ayon_core.lib.attribute_definitions import ( + AbstractAttrDef, + serialize_attr_defs, + deserialize_attr_defs, +) + + +@dataclass +class ActionForm: + """Form for loader action. + + If an action needs to collect information from a user before or during of + the action execution, it can return a response with a form. When the + form is submitted, a new execution of the action is triggered. + + It is also possible to just show a label message without the submit + button to make sure the user has seen the message. + + Attributes: + title (str): Title of the form -> title of the window. + fields (list[AbstractAttrDef]): Fields of the form. + submit_label (Optional[str]): Label of the submit button. Is hidden + if is set to None. + submit_icon (Optional[dict[str, Any]]): Icon definition of the submit + button. + cancel_label (Optional[str]): Label of the cancel button. Is hidden + if is set to None. User can still close the window tho. + cancel_icon (Optional[dict[str, Any]]): Icon definition of the cancel + button. + + """ + title: str + fields: list[AbstractAttrDef] + submit_label: Optional[str] = "Submit" + submit_icon: Optional[dict[str, Any]] = None + cancel_label: Optional[str] = "Cancel" + cancel_icon: Optional[dict[str, Any]] = None + + def to_json_data(self) -> dict[str, Any]: + fields = self.fields + if fields is not None: + fields = serialize_attr_defs(fields) + return { + "title": self.title, + "fields": fields, + "submit_label": self.submit_label, + "submit_icon": self.submit_icon, + "cancel_label": self.cancel_label, + "cancel_icon": self.cancel_icon, + } + + @classmethod + def from_json_data(cls, data: dict[str, Any]) -> "ActionForm": + fields = data["fields"] + if fields is not None: + data["fields"] = deserialize_attr_defs(fields) + return cls(**data) diff --git a/client/ayon_core/pipeline/actions/utils.py b/client/ayon_core/pipeline/actions/utils.py new file mode 100644 index 0000000000..3502300ead --- /dev/null +++ b/client/ayon_core/pipeline/actions/utils.py @@ -0,0 +1,100 @@ +from __future__ import annotations + +import uuid +from typing import Any + +from ayon_core.lib.attribute_definitions import ( + AbstractAttrDef, + UILabelDef, + BoolDef, + TextDef, + NumberDef, + EnumDef, + HiddenDef, +) + + +def webaction_fields_to_attribute_defs( + fields: list[dict[str, Any]] +) -> list[AbstractAttrDef]: + """Helper function to convert fields definition from webactions form. + + Convert form fields to attribute definitions to be able to display them + using attribute definitions. + + Args: + fields (list[dict[str, Any]]): Fields from webaction form. + + Returns: + list[AbstractAttrDef]: Converted attribute definitions. + + """ + attr_defs = [] + for field in fields: + field_type = field["type"] + attr_def = None + if field_type == "label": + label = field.get("value") + if label is None: + label = field.get("text") + attr_def = UILabelDef( + label, key=uuid.uuid4().hex + ) + elif field_type == "boolean": + value = field["value"] + if isinstance(value, str): + value = value.lower() == "true" + + attr_def = BoolDef( + field["name"], + default=value, + label=field.get("label"), + ) + elif field_type == "text": + attr_def = TextDef( + field["name"], + default=field.get("value"), + label=field.get("label"), + placeholder=field.get("placeholder"), + multiline=field.get("multiline", False), + regex=field.get("regex"), + # syntax=field["syntax"], + ) + elif field_type in ("integer", "float"): + value = field.get("value") + if isinstance(value, str): + if field_type == "integer": + value = int(value) + else: + value = float(value) + attr_def = NumberDef( + field["name"], + default=value, + label=field.get("label"), + decimals=0 if field_type == "integer" else 5, + # placeholder=field.get("placeholder"), + minimum=field.get("min"), + maximum=field.get("max"), + ) + elif field_type in ("select", "multiselect"): + attr_def = EnumDef( + field["name"], + items=field["options"], + default=field.get("value"), + label=field.get("label"), + multiselection=field_type == "multiselect", + ) + elif field_type == "hidden": + attr_def = HiddenDef( + field["name"], + default=field.get("value"), + ) + + if attr_def is None: + print(f"Unknown config field type: {field_type}") + attr_def = UILabelDef( + f"Unknown field type '{field_type}", + key=uuid.uuid4().hex + ) + attr_defs.append(attr_def) + return attr_defs diff --git a/client/ayon_core/pipeline/colorspace.py b/client/ayon_core/pipeline/colorspace.py index 41241e17ca..7a4d9dda50 100644 --- a/client/ayon_core/pipeline/colorspace.py +++ b/client/ayon_core/pipeline/colorspace.py @@ -7,6 +7,7 @@ import platform import tempfile import warnings from copy import deepcopy +from dataclasses import dataclass import ayon_api @@ -26,6 +27,18 @@ from ayon_core.pipeline.load import get_representation_path_with_anatomy log = Logger.get_logger(__name__) +@dataclass +class ConfigData: + """OCIO Config to use in a certain context. + + When enabled and no path/template are set, it will be considered invalid + and will error on OCIO path not found. Enabled must be False to explicitly + allow OCIO to be disabled.""" + path: str = "" + template: str = "" + enabled: bool = True + + class CachedData: remapping = {} has_compatible_ocio_package = None @@ -710,7 +723,7 @@ def _get_config_path_from_profile_data( template_data (dict[str, Any]): Template data. Returns: - dict[str, str]: Config data with path and template. + ConfigData: Config data with path and template. """ template = profile[profile_type] result = StringTemplate.format_strict_template( @@ -719,12 +732,12 @@ def _get_config_path_from_profile_data( normalized_path = str(result.normalized()) if not os.path.exists(normalized_path): log.warning(f"Path was not found '{normalized_path}'.") - return None + return ConfigData() # Return invalid config data - return { - "path": normalized_path, - "template": template - } + return ConfigData( + path=normalized_path, + template=template + ) def _get_global_config_data( @@ -735,7 +748,7 @@ def _get_global_config_data( imageio_global, folder_id, log, -): +) -> ConfigData: """Get global config data. Global config from core settings is using profiles that are based on @@ -759,8 +772,7 @@ def _get_global_config_data( log (logging.Logger): Logger object. Returns: - Union[dict[str, str], None]: Config data with path and template - or None. + ConfigData: Config data with path and template. """ task_name = task_type = None @@ -779,12 +791,14 @@ def _get_global_config_data( ) if profile is None: log.info(f"No config profile matched filters {str(filter_values)}") - return None + return ConfigData(enabled=False) profile_type = profile["type"] - if profile_type in ("builtin_path", "custom_path"): + if profile_type in {"builtin_path", "custom_path"}: return _get_config_path_from_profile_data( profile, profile_type, template_data) + elif profile_type == "disabled": + return ConfigData(enabled=False) # TODO decide if this is the right name for representation repre_name = "ocioconfig" @@ -798,7 +812,7 @@ def _get_global_config_data( "Colorspace OCIO config path cannot be set. " "Profile is set to published product but `Product name` is empty." ) - return None + return ConfigData() folder_info = template_data.get("folder") if not folder_info: @@ -819,7 +833,7 @@ def _get_global_config_data( ) if not folder_entity: log.warning(f"Folder entity '{folder_path}' was not found..") - return None + return ConfigData() folder_id = folder_entity["id"] product_entities_by_name = { @@ -855,7 +869,7 @@ def _get_global_config_data( log.info( f"Product '{product_name}' does not have available any versions." ) - return None + return ConfigData() # Find 'ocioconfig' representation entity repre_entity = ayon_api.get_representation_by_name( @@ -868,15 +882,15 @@ def _get_global_config_data( f"Representation '{repre_name}'" f" not found on product '{product_name}'." ) - return None + return ConfigData() path = get_representation_path_with_anatomy(repre_entity, anatomy) template = repre_entity["attrib"]["template"] - return { - "path": path, - "template": template, - } + return ConfigData( + path=path, + template=template + ) def get_imageio_config_preset( @@ -1015,13 +1029,19 @@ def get_imageio_config_preset( host_ocio_config["filepath"], template_data ) - if not config_data: + if not config_data.enabled: + return {} # OCIO management disabled + + if not config_data.path: raise FileExistsError( "No OCIO config found in settings. It is" " either missing or there is typo in path inputs" ) - return config_data + return { + "path": config_data.path, + "template": config_data.template, + } def _get_host_config_data(templates, template_data): diff --git a/client/ayon_core/pipeline/compatibility.py b/client/ayon_core/pipeline/compatibility.py index f7d48526b7..78ba5ad71e 100644 --- a/client/ayon_core/pipeline/compatibility.py +++ b/client/ayon_core/pipeline/compatibility.py @@ -1,4 +1,5 @@ """Package to handle compatibility checks for pipeline components.""" +import ayon_api def is_product_base_type_supported() -> bool: @@ -13,4 +14,7 @@ def is_product_base_type_supported() -> bool: bool: True if product base types are supported, False otherwise. """ - return False + + if not hasattr(ayon_api, "is_product_base_type_supported"): + return False + return ayon_api.is_product_base_type_supported() diff --git a/client/ayon_core/pipeline/create/context.py b/client/ayon_core/pipeline/create/context.py index c9b3178fe4..d8cb9d1b9e 100644 --- a/client/ayon_core/pipeline/create/context.py +++ b/client/ayon_core/pipeline/create/context.py @@ -15,6 +15,7 @@ from typing import ( Any, Callable, ) +from warnings import warn import pyblish.logic import pyblish.api @@ -752,13 +753,13 @@ class CreateContext: manual_creators = {} report = discover_creator_plugins(return_report=True) self.creator_discover_result = report - for creator_class in report.plugins: - if inspect.isabstract(creator_class): - self.log.debug( - "Skipping abstract Creator {}".format(str(creator_class)) - ) - continue + for creator_class in report.abstract_plugins: + self.log.debug( + "Skipping abstract Creator '%s'", + str(creator_class) + ) + for creator_class in report.plugins: creator_identifier = creator_class.identifier if creator_identifier in creators: self.log.warning( @@ -772,19 +773,17 @@ class CreateContext: creator_class.host_name and creator_class.host_name != self.host_name ): - self.log.info(( - "Creator's host name \"{}\"" - " is not supported for current host \"{}\"" - ).format(creator_class.host_name, self.host_name)) + self.log.info( + ( + 'Creator\'s host name "{}"' + ' is not supported for current host "{}"' + ).format(creator_class.host_name, self.host_name) + ) continue # TODO report initialization error try: - creator = creator_class( - project_settings, - self, - self.headless - ) + creator = creator_class(project_settings, self, self.headless) except Exception: self.log.error( f"Failed to initialize plugin: {creator_class}", @@ -792,6 +791,19 @@ class CreateContext: ) continue + if not creator.product_base_type: + message = ( + f"Provided creator {creator!r} doesn't have " + "product base type attribute defined. This will be " + "required in future." + ) + warn( + message, + DeprecationWarning, + stacklevel=2 + ) + self.log.warning(message) + if not creator.enabled: disabled_creators[creator_identifier] = creator continue @@ -1289,8 +1301,12 @@ class CreateContext: "folderPath": folder_entity["path"], "task": task_entity["name"] if task_entity else None, "productType": creator.product_type, + # Add product base type if supported. Fallback to product type + "productBaseType": ( + creator.product_base_type or creator.product_type), "variant": variant } + if active is not None: if not isinstance(active, bool): self.log.warning( diff --git a/client/ayon_core/pipeline/create/creator_plugins.py b/client/ayon_core/pipeline/create/creator_plugins.py index 7573589b82..7b168984ef 100644 --- a/client/ayon_core/pipeline/create/creator_plugins.py +++ b/client/ayon_core/pipeline/create/creator_plugins.py @@ -1,20 +1,21 @@ -# -*- coding: utf-8 -*- -import os -import copy -import collections -from typing import TYPE_CHECKING, Optional, Dict, Any +"""Creator plugins for the create process.""" +from __future__ import annotations +import collections +import copy +import os from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any, Dict, Optional from ayon_core.lib import Logger, get_version_from_path from ayon_core.pipeline.plugin_discover import ( + deregister_plugin, + deregister_plugin_path, discover, register_plugin, register_plugin_path, - deregister_plugin, - deregister_plugin_path ) -from ayon_core.pipeline.staging_dir import get_staging_dir_info, StagingDir +from ayon_core.pipeline.staging_dir import StagingDir, get_staging_dir_info from .constants import DEFAULT_VARIANT_VALUE from .product_name import get_product_name @@ -23,6 +24,7 @@ from .structures import CreatedInstance if TYPE_CHECKING: from ayon_core.lib import AbstractAttrDef + # Avoid cyclic imports from .context import CreateContext, UpdateData # noqa: F401 @@ -66,7 +68,6 @@ class ProductConvertorPlugin(ABC): Returns: logging.Logger: Logger with name of the plugin. """ - if self._log is None: self._log = Logger.get_logger(self.__class__.__name__) return self._log @@ -82,9 +83,8 @@ class ProductConvertorPlugin(ABC): Returns: str: Converted identifier unique for all converters in host. - """ - pass + """ @abstractmethod def find_instances(self): @@ -94,14 +94,10 @@ class ProductConvertorPlugin(ABC): convert. """ - pass - @abstractmethod def convert(self): """Conversion code.""" - pass - @property def create_context(self): """Quick access to create context. @@ -109,7 +105,6 @@ class ProductConvertorPlugin(ABC): Returns: CreateContext: Context which initialized the plugin. """ - return self._create_context @property @@ -122,7 +117,6 @@ class ProductConvertorPlugin(ABC): Raises: UnavailableSharedData: When called out of collection phase. """ - return self._create_context.collection_shared_data def add_convertor_item(self, label): @@ -131,12 +125,10 @@ class ProductConvertorPlugin(ABC): Args: label (str): Label of item which will show in UI. """ - self._create_context.add_convertor_item(self.identifier, label) def remove_convertor_item(self): """Remove legacy item from create context when conversion finished.""" - self._create_context.remove_convertor_item(self.identifier) @@ -154,7 +146,14 @@ class BaseCreator(ABC): project_settings (dict[str, Any]): Project settings. create_context (CreateContext): Context which initialized creator. headless (bool): Running in headless mode. + """ + # Attribute 'skip_discovery' is used during discovery phase to skip + # plugins, which can be used to mark base plugins that should not be + # considered as plugins "to use". The discovery logic does NOT use + # the attribute value from parent classes. Each base class has to define + # the attribute again. + skip_discovery = True # Label shown in UI label = None @@ -219,7 +218,6 @@ class BaseCreator(ABC): Returns: Optional[dict[str, Any]]: Settings values or None. """ - settings = project_settings.get(category_name) if not settings: return None @@ -265,7 +263,6 @@ class BaseCreator(ABC): Args: project_settings (dict[str, Any]): Project settings. """ - settings_category = self.settings_category if not settings_category: return @@ -277,18 +274,17 @@ class BaseCreator(ABC): project_settings, settings_category, settings_name ) if settings is None: - self.log.debug("No settings found for {}".format(cls_name)) + self.log.debug(f"No settings found for {cls_name}") return for key, value in settings.items(): # Log out attributes that are not defined on plugin object # - those may be potential dangerous typos in settings if not hasattr(self, key): - self.log.debug(( - "Applying settings to unknown attribute '{}' on '{}'." - ).format( + self.log.debug( + "Applying settings to unknown attribute '%s' on '%s'.", key, cls_name - )) + ) setattr(self, key, value) def register_callbacks(self): @@ -297,23 +293,39 @@ class BaseCreator(ABC): Default implementation does nothing. It can be overridden to register callbacks for creator. """ - pass @property def identifier(self): """Identifier of creator (must be unique). - Default implementation returns plugin's product type. - """ + Default implementation returns plugin's product base type, + or falls back to product type if product base type is not set. - return self.product_type + """ + identifier = self.product_base_type + if not identifier: + identifier = self.product_type + return identifier @property @abstractmethod def product_type(self): """Family that plugin represents.""" - pass + @property + def product_base_type(self) -> Optional[str]: + """Base product type that plugin represents. + + Todo (antirotor): This should be required in future - it + should be made abstract then. + + Returns: + Optional[str]: Base product type that plugin represents. + If not set, it is assumed that the creator plugin is obsolete + and does not support product base type. + + """ + return None @property def project_name(self): @@ -322,7 +334,6 @@ class BaseCreator(ABC): Returns: str: Name of a project. """ - return self.create_context.project_name @property @@ -332,7 +343,6 @@ class BaseCreator(ABC): Returns: Anatomy: Project anatomy object. """ - return self.create_context.project_anatomy @property @@ -344,13 +354,14 @@ class BaseCreator(ABC): Default implementation use attributes in this order: - 'group_label' -> 'label' -> 'identifier' - Keep in mind that 'identifier' use 'product_type' by default. + + Keep in mind that 'identifier' uses 'product_base_type' by default. Returns: str: Group label that can be used for grouping of instances in UI. - Group label can be overridden by instance itself. - """ + Group label can be overridden by the instance itself. + """ if self._cached_group_label is None: label = self.identifier if self.group_label: @@ -367,7 +378,6 @@ class BaseCreator(ABC): Returns: logging.Logger: Logger with name of the plugin. """ - if self._log is None: self._log = Logger.get_logger(self.__class__.__name__) return self._log @@ -376,7 +386,8 @@ class BaseCreator(ABC): self, product_name: str, data: Dict[str, Any], - product_type: Optional[str] = None + product_type: Optional[str] = None, + product_base_type: Optional[str] = None ) -> CreatedInstance: """Create instance and add instance to context. @@ -385,6 +396,8 @@ class BaseCreator(ABC): data (Dict[str, Any]): Instance data. product_type (Optional[str]): Product type, object attribute 'product_type' is used if not passed. + product_base_type (Optional[str]): Product base type, object + attribute 'product_base_type' is used if not passed. Returns: CreatedInstance: Created instance. @@ -392,11 +405,16 @@ class BaseCreator(ABC): """ if product_type is None: product_type = self.product_type + + if not product_base_type and not self.product_base_type: + product_base_type = product_type + instance = CreatedInstance( - product_type, - product_name, - data, + product_type=product_type, + product_name=product_name, + data=data, creator=self, + product_base_type=product_base_type, ) self._add_instance_to_context(instance) return instance @@ -412,7 +430,6 @@ class BaseCreator(ABC): Args: instance (CreatedInstance): New created instance. """ - self.create_context.creator_adds_instance(instance) def _remove_instance_from_context(self, instance): @@ -425,7 +442,6 @@ class BaseCreator(ABC): Args: instance (CreatedInstance): Instance which should be removed. """ - self.create_context.creator_removed_instance(instance) @abstractmethod @@ -437,8 +453,6 @@ class BaseCreator(ABC): implementation """ - pass - @abstractmethod def collect_instances(self): """Collect existing instances related to this creator plugin. @@ -464,8 +478,6 @@ class BaseCreator(ABC): ``` """ - pass - @abstractmethod def update_instances(self, update_list): """Store changes of existing instances so they can be recollected. @@ -475,8 +487,6 @@ class BaseCreator(ABC): contain changed instance and it's changes. """ - pass - @abstractmethod def remove_instances(self, instances): """Method called on instance removal. @@ -489,14 +499,11 @@ class BaseCreator(ABC): removed. """ - pass - def get_icon(self): """Icon of creator (product type). Can return path to image file or awesome icon name. """ - return self.icon def get_dynamic_data( @@ -512,19 +519,18 @@ class BaseCreator(ABC): These may be dynamically created based on current context of workfile. """ - return {} def get_product_name( self, - project_name, - folder_entity, - task_entity, - variant, - host_name=None, - instance=None, - project_entity=None, - ): + project_name: str, + folder_entity: dict[str, Any], + task_entity: Optional[dict[str, Any]], + variant: str, + host_name: Optional[str] = None, + instance: Optional[CreatedInstance] = None, + project_entity: Optional[dict[str, Any]] = None, + ) -> str: """Return product name for passed context. Method is also called on product name update. In that case origin @@ -546,11 +552,6 @@ class BaseCreator(ABC): if host_name is None: host_name = self.create_context.host_name - task_name = task_type = None - if task_entity: - task_name = task_entity["name"] - task_type = task_entity["taskType"] - dynamic_data = self.get_dynamic_data( project_name, folder_entity, @@ -566,11 +567,12 @@ class BaseCreator(ABC): return get_product_name( project_name, - task_name, - task_type, - host_name, - self.product_type, - variant, + folder_entity=folder_entity, + task_entity=task_entity, + product_base_type=self.product_base_type, + product_type=self.product_type, + host_name=host_name, + variant=variant, dynamic_data=dynamic_data, project_settings=self.project_settings, project_entity=project_entity, @@ -583,15 +585,15 @@ class BaseCreator(ABC): and values are stored to metadata for future usage and for publishing purposes. - NOTE: - Convert method should be implemented which should care about updating - keys/values when plugin attributes change. + Note: + Convert method should be implemented which should care about + updating keys/values when plugin attributes change. Returns: list[AbstractAttrDef]: Attribute definitions that can be tweaked for created instance. - """ + """ return self.instance_attr_defs def get_attr_defs_for_instance(self, instance): @@ -614,12 +616,10 @@ class BaseCreator(ABC): Raises: UnavailableSharedData: When called out of collection phase. """ - return self.create_context.collection_shared_data def set_instance_thumbnail_path(self, instance_id, thumbnail_path=None): """Set path to thumbnail for instance.""" - self.create_context.thumbnail_paths_by_instance_id[instance_id] = ( thumbnail_path ) @@ -640,7 +640,6 @@ class BaseCreator(ABC): Returns: dict[str, int]: Next versions by instance id. """ - return get_next_versions_for_instances( self.create_context.project_name, instances ) @@ -651,7 +650,7 @@ class Creator(BaseCreator): Creation requires prepared product name and instance data. """ - + skip_discovery = True # GUI Purposes # - default_variants may not be used if `get_default_variants` # is overridden @@ -707,7 +706,6 @@ class Creator(BaseCreator): int: Order in which is creator shown (less == earlier). By default is using Creator's 'order' or processing. """ - return self.order @abstractmethod @@ -722,11 +720,9 @@ class Creator(BaseCreator): pre_create_data(dict): Data based on pre creation attributes. Those may affect how creator works. """ - # instance = CreatedInstance( # self.product_type, product_name, instance_data # ) - pass def get_description(self): """Short description of product type and plugin. @@ -734,7 +730,6 @@ class Creator(BaseCreator): Returns: str: Short description of product type. """ - return self.description def get_detail_description(self): @@ -745,7 +740,6 @@ class Creator(BaseCreator): Returns: str: Detailed description of product type for artist. """ - return self.detailed_description def get_default_variants(self): @@ -759,7 +753,6 @@ class Creator(BaseCreator): Returns: list[str]: Whisper variants for user input. """ - return copy.deepcopy(self.default_variants) def get_default_variant(self, only_explicit=False): @@ -779,7 +772,6 @@ class Creator(BaseCreator): Returns: str: Variant value. """ - if only_explicit or self._default_variant: return self._default_variant @@ -800,7 +792,6 @@ class Creator(BaseCreator): Returns: str: Variant value. """ - return self.get_default_variant() def _set_default_variant_wrap(self, variant): @@ -812,7 +803,6 @@ class Creator(BaseCreator): Args: variant (str): New default variant value. """ - self._default_variant = variant default_variant = property( @@ -949,6 +939,8 @@ class Creator(BaseCreator): class HiddenCreator(BaseCreator): + skip_discovery = True + @abstractmethod def create(self, instance_data, source_data): pass @@ -959,10 +951,10 @@ class AutoCreator(BaseCreator): Can be used e.g. for `workfile`. """ + skip_discovery = True def remove_instances(self, instances): """Skip removal.""" - pass def discover_creator_plugins(*args, **kwargs): @@ -1020,7 +1012,6 @@ def cache_and_get_instances(creator, shared_key, list_instances_func): dict[str, dict[str, Any]]: Cached instances by creator identifier from result of passed function. """ - if shared_key not in creator.collection_shared_data: value = collections.defaultdict(list) for instance in list_instances_func(): diff --git a/client/ayon_core/pipeline/create/product_name.py b/client/ayon_core/pipeline/create/product_name.py index ecffa4a340..9a50e18afd 100644 --- a/client/ayon_core/pipeline/create/product_name.py +++ b/client/ayon_core/pipeline/create/product_name.py @@ -1,24 +1,38 @@ +"""Functions for handling product names.""" +from __future__ import annotations + +import warnings +from functools import wraps +from typing import Any, Optional, Union, overload +from warnings import warn + import ayon_api from ayon_core.lib import ( StringTemplate, filter_profiles, prepare_template_data, + Logger, + is_func_signature_supported, ) +from ayon_core.lib.path_templates import TemplateResult from ayon_core.settings import get_project_settings from .constants import DEFAULT_PRODUCT_TEMPLATE from .exceptions import TaskNotSetError, TemplateFillError +log = Logger.get_logger(__name__) + def get_product_name_template( - project_name, - product_type, - task_name, - task_type, - host_name, - default_template=None, - project_settings=None -): + project_name: str, + product_type: str, + task_name: Optional[str], + task_type: Optional[str], + host_name: str, + default_template: Optional[str] = None, + project_settings: Optional[dict[str, Any]] = None, + product_base_type: Optional[str] = None +) -> str: """Get product name template based on passed context. Args: @@ -26,26 +40,32 @@ def get_product_name_template( product_type (str): Product type for which the product name is calculated. host_name (str): Name of host in which the product name is calculated. - task_name (str): Name of task in which context the product is created. - task_type (str): Type of task in which context the product is created. - default_template (Union[str, None]): Default template which is used if + task_name (Optional[str]): Name of task in which context the + product is created. + task_type (Optional[str]): Type of task in which context the + product is created. + default_template (Optional[str]): Default template which is used if settings won't find any matching possibility. Constant 'DEFAULT_PRODUCT_TEMPLATE' is used if not defined. - project_settings (Union[Dict[str, Any], None]): Prepared settings for + project_settings (Optional[dict[str, Any]]): Prepared settings for project. Settings are queried if not passed. - """ + product_base_type (Optional[str]): Base type of product. + Returns: + str: Product name template. + + """ if project_settings is None: project_settings = get_project_settings(project_name) tools_settings = project_settings["core"]["tools"] profiles = tools_settings["creator"]["product_name_profiles"] filtering_criteria = { + "product_base_types": product_base_type or product_type, "product_types": product_type, - "hosts": host_name, - "tasks": task_name, - "task_types": task_type + "host_names": host_name, + "task_names": task_name, + "task_types": task_type, } - matching_profile = filter_profiles(profiles, filtering_criteria) template = None if matching_profile: @@ -69,6 +89,214 @@ def get_product_name_template( return template +def _get_product_name_old( + project_name: str, + task_name: Optional[str], + task_type: Optional[str], + host_name: str, + product_type: str, + variant: str, + default_template: Optional[str] = None, + dynamic_data: Optional[dict[str, Any]] = None, + project_settings: Optional[dict[str, Any]] = None, + product_type_filter: Optional[str] = None, + project_entity: Optional[dict[str, Any]] = None, + product_base_type: Optional[str] = None, +) -> TemplateResult: + warnings.warn( + "Used deprecated 'task_name' and 'task_type' arguments." + " Please use new signature with 'folder_entity' and 'task_entity'.", + DeprecationWarning, + stacklevel=2 + ) + if not product_type: + return StringTemplate("").format({}) + + template = get_product_name_template( + project_name=project_name, + product_type=product_type_filter or product_type, + task_name=task_name, + task_type=task_type, + host_name=host_name, + default_template=default_template, + project_settings=project_settings, + product_base_type=product_base_type, + ) + + template_low = template.lower() + # Simple check of task name existence for template with {task[name]} in + if not task_name and "{task" in template_low: + raise TaskNotSetError() + + task_value = { + "name": task_name, + "type": task_type, + } + if "{task}" in template_low: + task_value = task_name + # NOTE this is message for TDs and Admins -> not really for users + # TODO validate this in settings and not allow it + log.warning( + "Found deprecated task key '{task}' in product name template." + " Please use '{task[name]}' instead." + ) + + elif "{task[short]}" in template_low: + if project_entity is None: + project_entity = ayon_api.get_project(project_name) + task_types_by_name = { + task["name"]: task for task in + project_entity["taskTypes"] + } + task_short = task_types_by_name.get(task_type, {}).get("shortName") + task_value["short"] = task_short + + if not product_base_type and "{product[basetype]}" in template.lower(): + warn( + "You have Product base type in product name template, " + "but it is not provided by the creator, please update your " + "creation code to include it. It will be required in " + "the future.", + DeprecationWarning, + stacklevel=2) + + fill_pairs: dict[str, Union[str, dict[str, str]]] = { + "variant": variant, + "family": product_type, + "task": task_value, + "product": { + "type": product_type, + "basetype": product_base_type or product_type, + } + } + + if dynamic_data: + # Dynamic data may override default values + for key, value in dynamic_data.items(): + fill_pairs[key] = value + + try: + return StringTemplate.format_strict_template( + template=template, + data=prepare_template_data(fill_pairs) + ) + except KeyError as exp: + msg = ( + f"Value for {exp} key is missing in template '{template}'." + f" Available values are {fill_pairs}" + ) + raise TemplateFillError(msg) from exp + + +def _backwards_compatibility_product_name(func): + """Helper to decide which variant of 'get_product_name' to use. + + The old version expected 'task_name' and 'task_type' arguments. The new + version expects 'folder_entity' and 'task_entity' arguments instead. + + The function is also marked with an attribute 'version' so other addons + can check if the function is using the new signature or is using + the old signature. That should allow addons to adapt to new signature. + >>> if getattr(get_product_name, "use_entities", None): + >>> # New signature is used + >>> path = get_product_name(project_name, folder_entity, ...) + >>> else: + >>> # Old signature is used + >>> path = get_product_name(project_name, taks_name, ...) + """ + # Add attribute to function to identify it as the new function + # so other addons can easily identify it. + # >>> geattr(get_product_name, "use_entities", False) + setattr(func, "use_entities", True) + + @wraps(func) + def inner(*args, **kwargs): + # --- + # Decide which variant of the function is used based on + # passed arguments. + # --- + + # Entities in key-word arguments mean that the new function is used + if "folder_entity" in kwargs or "task_entity" in kwargs: + return func(*args, **kwargs) + + # Using more than 7 positional arguments is not allowed + # in the new function + if len(args) > 7: + return _get_product_name_old(*args, **kwargs) + + if len(args) > 1: + arg_2 = args[1] + # The second argument is a string -> task name + if isinstance(arg_2, str): + return _get_product_name_old(*args, **kwargs) + + if is_func_signature_supported(func, *args, **kwargs): + return func(*args, **kwargs) + return _get_product_name_old(*args, **kwargs) + + return inner + + +@overload +def get_product_name( + project_name: str, + folder_entity: dict[str, Any], + task_entity: Optional[dict[str, Any]], + product_base_type: str, + product_type: str, + host_name: str, + variant: str, + *, + dynamic_data: Optional[dict[str, Any]] = None, + project_settings: Optional[dict[str, Any]] = None, + project_entity: Optional[dict[str, Any]] = None, + default_template: Optional[str] = None, + product_base_type_filter: Optional[str] = None, +) -> TemplateResult: + """Calculate product name based on passed context and AYON settings. + + Subst name templates are defined in `project_settings/global/tools/creator + /product_name_profiles` where are profiles with host name, product type, + task name and task type filters. If context does not match any profile + then `DEFAULT_PRODUCT_TEMPLATE` is used as default template. + + That's main reason why so many arguments are required to calculate product + name. + + Args: + project_name (str): Project name. + folder_entity (Optional[dict[str, Any]]): Folder entity. + task_entity (Optional[dict[str, Any]]): Task entity. + host_name (str): Host name. + product_base_type (str): Product base type. + product_type (str): Product type. + variant (str): In most of the cases it is user input during creation. + dynamic_data (Optional[dict[str, Any]]): Dynamic data specific for + a creator which creates instance. + project_settings (Optional[dict[str, Any]]): Prepared settings + for project. Settings are queried if not passed. + project_entity (Optional[dict[str, Any]]): Project entity used when + task short name is required by template. + default_template (Optional[str]): Default template if any profile does + not match passed context. Constant 'DEFAULT_PRODUCT_TEMPLATE' + is used if is not passed. + product_base_type_filter (Optional[str]): Use different product base + type for product template filtering. Value of + `product_base_type_filter` is used when not passed. + + Returns: + TemplateResult: Product name. + + Raises: + TaskNotSetError: If template requires task which is not provided. + TemplateFillError: If filled template contains placeholder key which + is not collected. + + """ + + +@overload def get_product_name( project_name, task_name, @@ -81,25 +309,25 @@ def get_product_name( project_settings=None, product_type_filter=None, project_entity=None, -): +) -> TemplateResult: """Calculate product name based on passed context and AYON settings. - Subst name templates are defined in `project_settings/global/tools/creator - /product_name_profiles` where are profiles with host name, product type, - task name and task type filters. If context does not match any profile - then `DEFAULT_PRODUCT_TEMPLATE` is used as default template. + Product name templates are defined in `project_settings/global/tools + /creator/product_name_profiles` where are profiles with host name, + product type, task name and task type filters. If context does not match + any profile then `DEFAULT_PRODUCT_TEMPLATE` is used as default template. That's main reason why so many arguments are required to calculate product name. - Todos: - Find better filtering options to avoid requirement of - argument 'family_filter'. + Deprecated: + This function is using deprecated signature that does not support + folder entity data to be used. Args: project_name (str): Project name. - task_name (Union[str, None]): Task name. - task_type (Union[str, None]): Task type. + task_name (Optional[str]): Task name. + task_type (Optional[str]): Task type. host_name (str): Host name. product_type (str): Product type. variant (str): In most of the cases it is user input during creation. @@ -117,7 +345,63 @@ def get_product_name( task short name is required by template. Returns: - str: Product name. + TemplateResult: Product name. + + """ + pass + + +@_backwards_compatibility_product_name +def get_product_name( + project_name: str, + folder_entity: dict[str, Any], + task_entity: Optional[dict[str, Any]], + product_base_type: str, + product_type: str, + host_name: str, + variant: str, + *, + dynamic_data: Optional[dict[str, Any]] = None, + project_settings: Optional[dict[str, Any]] = None, + project_entity: Optional[dict[str, Any]] = None, + default_template: Optional[str] = None, + product_base_type_filter: Optional[str] = None, +) -> TemplateResult: + """Calculate product name based on passed context and AYON settings. + + Product name templates are defined in `project_settings/global/tools + /creator/product_name_profiles` where are profiles with host name, + product base type, product type, task name and task type filters. + + If context does not match any profile then `DEFAULT_PRODUCT_TEMPLATE` + is used as default template. + + That's main reason why so many arguments are required to calculate product + name. + + Args: + project_name (str): Project name. + folder_entity (Optional[dict[str, Any]]): Folder entity. + task_entity (Optional[dict[str, Any]]): Task entity. + host_name (str): Host name. + product_base_type (str): Product base type. + product_type (str): Product type. + variant (str): In most of the cases it is user input during creation. + dynamic_data (Optional[dict[str, Any]]): Dynamic data specific for + a creator which creates instance. + project_settings (Optional[dict[str, Any]]): Prepared settings + for project. Settings are queried if not passed. + project_entity (Optional[dict[str, Any]]): Project entity used when + task short name is required by template. + default_template (Optional[str]): Default template if any profile does + not match passed context. Constant 'DEFAULT_PRODUCT_TEMPLATE' + is used if is not passed. + product_base_type_filter (Optional[str]): Use different product base + type for product template filtering. Value of + `product_base_type_filter` is used when not passed. + + Returns: + TemplateResult: Product name. Raises: TaskNotSetError: If template requires task which is not provided. @@ -126,47 +410,68 @@ def get_product_name( """ if not product_type: - return "" + return StringTemplate("").format({}) + + task_name = task_type = None + if task_entity: + task_name = task_entity["name"] + task_type = task_entity["taskType"] template = get_product_name_template( - project_name, - product_type_filter or product_type, - task_name, - task_type, - host_name, + project_name=project_name, + product_base_type=product_base_type_filter or product_base_type, + product_type=product_type, + task_name=task_name, + task_type=task_type, + host_name=host_name, default_template=default_template, - project_settings=project_settings + project_settings=project_settings, ) - # Simple check of task name existence for template with {task} in - # - missing task should be possible only in Standalone publisher - if not task_name and "{task" in template.lower(): + + template_low = template.lower() + # Simple check of task name existence for template with {task[name]} in + if not task_name and "{task" in template_low: raise TaskNotSetError() task_value = { "name": task_name, "type": task_type, } - if "{task}" in template.lower(): + if "{task}" in template_low: task_value = task_name + # NOTE this is message for TDs and Admins -> not really for users + # TODO validate this in settings and not allow it + log.warning( + "Found deprecated task key '{task}' in product name template." + " Please use '{task[name]}' instead." + ) - elif "{task[short]}" in template.lower(): + elif "{task[short]}" in template_low: if project_entity is None: project_entity = ayon_api.get_project(project_name) task_types_by_name = { - task["name"]: task for task in - project_entity["taskTypes"] + task["name"]: task + for task in project_entity["taskTypes"] } task_short = task_types_by_name.get(task_type, {}).get("shortName") task_value["short"] = task_short fill_pairs = { "variant": variant, + # TODO We should stop support 'family' key. "family": product_type, "task": task_value, "product": { - "type": product_type + "type": product_type, + "basetype": product_base_type, } } + if folder_entity: + fill_pairs["folder"] = { + "name": folder_entity["name"], + "type": folder_entity["folderType"], + } + if dynamic_data: # Dynamic data may override default values for key, value in dynamic_data.items(): @@ -178,7 +483,8 @@ def get_product_name( data=prepare_template_data(fill_pairs) ) except KeyError as exp: - raise TemplateFillError( - "Value for {} key is missing in template '{}'." - " Available values are {}".format(str(exp), template, fill_pairs) + msg = ( + f"Value for {exp} key is missing in template '{template}'." + f" Available values are {fill_pairs}" ) + raise TemplateFillError(msg) diff --git a/client/ayon_core/pipeline/create/structures.py b/client/ayon_core/pipeline/create/structures.py index b2be377b42..6f53a61b25 100644 --- a/client/ayon_core/pipeline/create/structures.py +++ b/client/ayon_core/pipeline/create/structures.py @@ -11,6 +11,8 @@ from ayon_core.lib.attribute_definitions import ( serialize_attr_defs, deserialize_attr_defs, ) + + from ayon_core.pipeline import ( AYON_INSTANCE_ID, AVALON_INSTANCE_ID, @@ -137,6 +139,7 @@ class AttributeValues: if value is None: continue converted_value = attr_def.convert_value(value) + # QUESTION Could we just use converted value all the time? if converted_value == value: self._data[attr_def.key] = value @@ -245,11 +248,11 @@ class AttributeValues: def _update(self, value): changes = {} - for key, value in dict(value).items(): - if key in self._data and self._data.get(key) == value: + for key, key_value in dict(value).items(): + if key in self._data and self._data.get(key) == key_value: continue - self._data[key] = value - changes[key] = value + self._data[key] = key_value + changes[key] = key_value return changes def _pop(self, key, default): @@ -479,6 +482,10 @@ class CreatedInstance: data (Dict[str, Any]): Data used for filling product name or override data from already existing instance. creator (BaseCreator): Creator responsible for instance. + product_base_type (Optional[str]): Product base type that will be + created. If not provided then product base type is taken from + creator plugin. If creator does not have product base type then + deprecation warning is raised. """ # Keys that can't be changed or removed from data after loading using @@ -489,6 +496,7 @@ class CreatedInstance: "id", "instance_id", "productType", + "productBaseType", "creator_identifier", "creator_attributes", "publish_attributes" @@ -508,7 +516,13 @@ class CreatedInstance: data: Dict[str, Any], creator: "BaseCreator", transient_data: Optional[Dict[str, Any]] = None, + product_base_type: Optional[str] = None ): + """Initialize CreatedInstance.""" + # fallback to product type for backward compatibility + if not product_base_type: + product_base_type = creator.product_base_type or product_type + self._creator = creator creator_identifier = creator.identifier group_label = creator.get_group_label() @@ -561,6 +575,9 @@ class CreatedInstance: self._data["id"] = item_id self._data["productType"] = product_type self._data["productName"] = product_name + + self._data["productBaseType"] = product_base_type + self._data["active"] = data.get("active", True) self._data["creator_identifier"] = creator_identifier diff --git a/client/ayon_core/pipeline/editorial.py b/client/ayon_core/pipeline/editorial.py index b553fae3fb..21468e6ddd 100644 --- a/client/ayon_core/pipeline/editorial.py +++ b/client/ayon_core/pipeline/editorial.py @@ -202,7 +202,8 @@ def is_clip_from_media_sequence(otio_clip): def remap_range_on_file_sequence(otio_clip, otio_range): - """ + """ Remap the provided range on a file sequence clip. + Args: otio_clip (otio.schema.Clip): The OTIO clip to check. otio_range (otio.schema.TimeRange): The trim range to apply. @@ -249,7 +250,11 @@ def remap_range_on_file_sequence(otio_clip, otio_range): if ( is_clip_from_media_sequence(otio_clip) and available_range_start_frame == media_ref.start_frame - and conformed_src_in.to_frames() < media_ref.start_frame + + # source range should be included in available range from media + # using round instead of conformed_src_in.to_frames() to avoid + # any precision issue with frame rate. + and round(conformed_src_in.value) < media_ref.start_frame ): media_in = otio.opentime.RationalTime( 0, rate=available_range_rate diff --git a/client/ayon_core/pipeline/farm/pyblish_functions.py b/client/ayon_core/pipeline/farm/pyblish_functions.py index a5053844b9..a58cecf68c 100644 --- a/client/ayon_core/pipeline/farm/pyblish_functions.py +++ b/client/ayon_core/pipeline/farm/pyblish_functions.py @@ -253,6 +253,19 @@ def create_skeleton_instance( "reuseLastVersion": data.get("reuseLastVersion", False), } + # Pass on the OCIO metadata of what the source display and view are + # so that the farm can correctly set up color management. + if "sceneDisplay" in data and "sceneView" in data: + instance_skeleton_data["sceneDisplay"] = data["sceneDisplay"] + instance_skeleton_data["sceneView"] = data["sceneView"] + elif "colorspaceDisplay" in data and "colorspaceView" in data: + # Backwards compatibility for sceneDisplay and sceneView + instance_skeleton_data["colorspaceDisplay"] = data["colorspaceDisplay"] + instance_skeleton_data["colorspaceView"] = data["colorspaceView"] + if "sourceDisplay" in data and "sourceView" in data: + instance_skeleton_data["sourceDisplay"] = data["sourceDisplay"] + instance_skeleton_data["sourceView"] = data["sourceView"] + if data.get("renderlayer"): instance_skeleton_data["renderlayer"] = data["renderlayer"] @@ -589,24 +602,7 @@ def create_instances_for_aov( """ # we cannot attach AOVs to other products as we consider every # AOV product of its own. - log = Logger.get_logger("farm_publishing") - additional_color_data = { - "renderProducts": instance.data["renderProducts"], - "colorspaceConfig": instance.data["colorspaceConfig"], - "display": instance.data["colorspaceDisplay"], - "view": instance.data["colorspaceView"] - } - - # Get templated path from absolute config path. - anatomy = instance.context.data["anatomy"] - colorspace_template = instance.data["colorspaceConfig"] - try: - additional_color_data["colorspaceTemplate"] = remap_source( - colorspace_template, anatomy) - except ValueError as e: - log.warning(e) - additional_color_data["colorspaceTemplate"] = colorspace_template # if there are product to attach to and more than one AOV, # we cannot proceed. @@ -618,6 +614,29 @@ def create_instances_for_aov( "attaching multiple AOVs or renderable cameras to " "product is not supported yet.") + additional_data = { + "renderProducts": instance.data["renderProducts"], + } + + # Collect color management data if present + colorspace_config = instance.data.get("colorspaceConfig") + if colorspace_config: + additional_data.update({ + "colorspaceConfig": colorspace_config, + # Display/View are optional + "display": instance.data.get("sourceDisplay"), + "view": instance.data.get("sourceView") + }) + + # Get templated path from absolute config path. + anatomy = instance.context.data["anatomy"] + try: + additional_data["colorspaceTemplate"] = remap_source( + colorspace_config, anatomy) + except ValueError as e: + log.warning(e) + additional_data["colorspaceTemplate"] = colorspace_config + # create instances for every AOV we found in expected files. # NOTE: this is done for every AOV and every render camera (if # there are multiple renderable cameras in scene) @@ -625,7 +644,7 @@ def create_instances_for_aov( instance, skeleton, aov_filter, - additional_color_data, + additional_data, skip_integration_repre_list, do_not_add_review, frames_to_render @@ -936,16 +955,28 @@ def _create_instances_for_aov( "stagingDir": staging_dir, "fps": new_instance.get("fps"), "tags": ["review"] if preview else [], - "colorspaceData": { + } + + if colorspace and additional_data["colorspaceConfig"]: + # Only apply colorspace data if the image has a colorspace + colorspace_data: dict = { "colorspace": colorspace, "config": { "path": additional_data["colorspaceConfig"], "template": additional_data["colorspaceTemplate"] }, - "display": additional_data["display"], - "view": additional_data["view"] } - } + # Display/View are optional + display = additional_data.get("display") + if display: + colorspace_data["display"] = display + view = additional_data.get("view") + if view: + colorspace_data["view"] = view + + rep["colorspaceData"] = colorspace_data + else: + log.debug("No colorspace data for representation: {}".format(rep)) # support conversion from tiled to scanline if instance.data.get("convertToScanline"): @@ -1045,7 +1076,9 @@ def get_resources(project_name, version_entity, extension=None): filtered.append(repre_entity) representation = filtered[0] - directory = get_representation_path(representation) + directory = get_representation_path( + project_name, representation + ) print("Source: ", directory) resources = sorted( [ diff --git a/client/ayon_core/pipeline/load/__init__.py b/client/ayon_core/pipeline/load/__init__.py index 2a33fa119b..b5b09a5dc9 100644 --- a/client/ayon_core/pipeline/load/__init__.py +++ b/client/ayon_core/pipeline/load/__init__.py @@ -25,8 +25,8 @@ from .utils import ( get_loader_identifier, get_loaders_by_name, - get_representation_path_from_context, get_representation_path, + get_representation_path_from_context, get_representation_path_with_anatomy, is_compatible_loader, @@ -85,8 +85,8 @@ __all__ = ( "get_loader_identifier", "get_loaders_by_name", - "get_representation_path_from_context", "get_representation_path", + "get_representation_path_from_context", "get_representation_path_with_anatomy", "is_compatible_loader", diff --git a/client/ayon_core/pipeline/load/plugins.py b/client/ayon_core/pipeline/load/plugins.py index ed963110c6..b8cca08802 100644 --- a/client/ayon_core/pipeline/load/plugins.py +++ b/client/ayon_core/pipeline/load/plugins.py @@ -21,6 +21,13 @@ from .utils import get_representation_path_from_context class LoaderPlugin(list): """Load representation into host application""" + # Attribute 'skip_discovery' is used during discovery phase to skip + # plugins, which can be used to mark base plugins that should not be + # considered as plugins "to use". The discovery logic does NOT use + # the attribute value from parent classes. Each base class has to define + # the attribute again. + skip_discovery = True + product_types: set[str] = set() product_base_types: Optional[set[str]] = None representations = set() diff --git a/client/ayon_core/pipeline/load/utils.py b/client/ayon_core/pipeline/load/utils.py index d1731d4cf9..a02a4b30e0 100644 --- a/client/ayon_core/pipeline/load/utils.py +++ b/client/ayon_core/pipeline/load/utils.py @@ -1,11 +1,15 @@ +from __future__ import annotations + import os import uuid -import platform +import warnings import logging import inspect import collections import numbers -from typing import Optional, Union, Any +import copy +from functools import wraps +from typing import Optional, Union, Any, overload import ayon_api @@ -14,9 +18,8 @@ from ayon_core.lib import ( StringTemplate, TemplateUnsolved, ) -from ayon_core.pipeline import ( - Anatomy, -) +from ayon_core.lib.path_templates import TemplateResult +from ayon_core.pipeline import Anatomy log = logging.getLogger(__name__) @@ -644,15 +647,15 @@ def get_representation_path_from_context(context): representation = context["representation"] project_entity = context.get("project") - root = None - if ( - project_entity - and project_entity["name"] != get_current_project_name() - ): - anatomy = Anatomy(project_entity["name"]) - root = anatomy.roots - - return get_representation_path(representation, root) + if project_entity: + project_name = project_entity["name"] + else: + project_name = get_current_project_name() + return get_representation_path( + project_name, + representation, + project_entity=project_entity, + ) def get_representation_path_with_anatomy(repre_entity, anatomy): @@ -671,139 +674,248 @@ def get_representation_path_with_anatomy(repre_entity, anatomy): anatomy (Anatomy): Project anatomy object. Returns: - Union[None, TemplateResult]: None if path can't be received + TemplateResult: Resolved representation path. Raises: InvalidRepresentationContext: When representation data are probably invalid or not available. + """ + return get_representation_path( + anatomy.project_name, + repre_entity, + anatomy=anatomy, + ) + + +def get_representation_path_with_roots( + representation: dict[str, Any], + roots: dict[str, str], +) -> Optional[TemplateResult]: + """Get filename from representation with custom root. + + Args: + representation(dict): Representation entity. + roots (dict[str, str]): Roots to use. + + + Returns: + Optional[TemplateResult]: Resolved representation path. + + """ + try: + template = representation["attrib"]["template"] + except KeyError: + return None + + try: + context = representation["context"] + + _fix_representation_context_compatibility(context) + + context["root"] = roots + path = StringTemplate.format_strict_template( + template, context + ) + except (TemplateUnsolved, KeyError): + # Template references unavailable data + return None + + return path.normalized() + + +def _backwards_compatibility_repre_path(func): + """Wrapper handling backwards compatibility of 'get_representation_path'. + + Allows 'get_representation_path' to support old and new signatures of the + function. The old signature supported passing in representation entity + and optional roots. The new signature requires the project name + to be passed. In case custom roots should be used, a dedicated function + 'get_representation_path_with_roots' is available. + + The wrapper handles passed arguments, and based on kwargs and types + of the arguments will call the function which relates to + the arguments. + + The function is also marked with an attribute 'version' so other addons + can check if the function is using the new signature or is using + the old signature. That should allow addons to adapt to new signature. + >>> if getattr(get_representation_path, "version", None) == 2: + >>> path = get_representation_path(project_name, repre_entity) + >>> else: + >>> path = get_representation_path(repre_entity) + + The plan to remove backwards compatibility is 1.1.2026. + + """ + # Add an attribute to the function so addons can check if the new variant + # of the function is available. + # >>> getattr(get_representation_path, "version", None) == 2 + # >>> True + setattr(func, "version", 2) + + @wraps(func) + def inner(*args, **kwargs): + from ayon_core.pipeline import get_current_project_name + + # Decide which variant of the function based on passed arguments + # will be used. + if args: + arg_1 = args[0] + if isinstance(arg_1, str): + return func(*args, **kwargs) + + elif "project_name" in kwargs: + return func(*args, **kwargs) + + warnings.warn( + ( + "Used deprecated variant of 'get_representation_path'." + " Please change used arguments signature to follow" + " new definition. Will be removed 1.1.2026." + ), + DeprecationWarning, + stacklevel=2, + ) + + # Find out which arguments were passed + if args: + representation = args[0] + else: + representation = kwargs.get("representation") + + if len(args) > 1: + roots = args[1] + else: + roots = kwargs.get("root") + + if roots is not None: + return get_representation_path_with_roots( + representation, roots + ) + + project_name = ( + representation["context"].get("project", {}).get("name") + ) + if project_name is None: + project_name = get_current_project_name() + + return func(project_name, representation) + + return inner + + +@overload +def get_representation_path( + representation: dict[str, Any], + root: Optional[dict[str, Any]] = None, +) -> TemplateResult: + """DEPRECATED Get filled representation path. + + Use 'get_representation_path' using the new function signature. + + Args: + representation (dict[str, Any]): Representation entity. + root (Optional[dict[str, Any]): Roots to fill the path. + + Returns: + TemplateResult: Resolved path to representation. + + Raises: + InvalidRepresentationContext: When representation data are probably + invalid or not available. + + """ + pass + + +@overload +def get_representation_path( + project_name: str, + repre_entity: dict[str, Any], + *, + anatomy: Optional[Anatomy] = None, + project_entity: Optional[dict[str, Any]] = None, +) -> TemplateResult: + """Get filled representation path. + + Args: + project_name (str): Project name. + repre_entity (dict[str, Any]): Representation entity. + anatomy (Optional[Anatomy]): Project anatomy. + project_entity (Optional[dict[str, Any]): Project entity. Is used to + initialize Anatomy and is not needed if 'anatomy' is passed in. + + Returns: + TemplateResult: Resolved path to representation. + + Raises: + InvalidRepresentationContext: When representation data are probably + invalid or not available. + + """ + pass + + +@_backwards_compatibility_repre_path +def get_representation_path( + project_name: str, + repre_entity: dict[str, Any], + *, + anatomy: Optional[Anatomy] = None, + project_entity: Optional[dict[str, Any]] = None, +) -> TemplateResult: + """Get filled representation path. + + Args: + project_name (str): Project name. + repre_entity (dict[str, Any]): Representation entity. + anatomy (Optional[Anatomy]): Project anatomy. + project_entity (Optional[dict[str, Any]): Project entity. Is used to + initialize Anatomy and is not needed if 'anatomy' is passed in. + + Returns: + TemplateResult: Resolved path to representation. + + Raises: + InvalidRepresentationContext: When representation data are probably + invalid or not available. + + """ + if anatomy is None: + anatomy = Anatomy(project_name, project_entity=project_entity) try: template = repre_entity["attrib"]["template"] - except KeyError: - raise InvalidRepresentationContext(( - "Representation document does not" - " contain template in data ('data.template')" - )) + except KeyError as exc: + raise InvalidRepresentationContext( + "Failed to receive template from representation entity." + ) from exc try: - context = repre_entity["context"] + context = copy.deepcopy(repre_entity["context"]) _fix_representation_context_compatibility(context) context["root"] = anatomy.roots path = StringTemplate.format_strict_template(template, context) except TemplateUnsolved as exc: - raise InvalidRepresentationContext(( - "Couldn't resolve representation template with available data." - " Reason: {}".format(str(exc)) - )) + raise InvalidRepresentationContext( + "Failed to resolve representation template with available data." + ) from exc return path.normalized() -def get_representation_path(representation, root=None): - """Get filename from representation document - - There are three ways of getting the path from representation which are - tried in following sequence until successful. - 1. Get template from representation['data']['template'] and data from - representation['context']. Then format template with the data. - 2. Get template from project['config'] and format it with default data set - 3. Get representation['data']['path'] and use it directly - - Args: - representation(dict): representation document from the database - - Returns: - str: fullpath of the representation - - """ - if root is None: - from ayon_core.pipeline import get_current_project_name, Anatomy - - anatomy = Anatomy(get_current_project_name()) - return get_representation_path_with_anatomy( - representation, anatomy - ) - - def path_from_representation(): - try: - template = representation["attrib"]["template"] - except KeyError: - return None - - try: - context = representation["context"] - - _fix_representation_context_compatibility(context) - - context["root"] = root - path = StringTemplate.format_strict_template( - template, context - ) - # Force replacing backslashes with forward slashed if not on - # windows - if platform.system().lower() != "windows": - path = path.replace("\\", "/") - except (TemplateUnsolved, KeyError): - # Template references unavailable data - return None - - if not path: - return path - - normalized_path = os.path.normpath(path) - if os.path.exists(normalized_path): - return normalized_path - return path - - def path_from_data(): - if "path" not in representation["attrib"]: - return None - - path = representation["attrib"]["path"] - # Force replacing backslashes with forward slashed if not on - # windows - if platform.system().lower() != "windows": - path = path.replace("\\", "/") - - if os.path.exists(path): - return os.path.normpath(path) - - dir_path, file_name = os.path.split(path) - if not os.path.exists(dir_path): - return None - - base_name, ext = os.path.splitext(file_name) - file_name_items = None - if "#" in base_name: - file_name_items = [part for part in base_name.split("#") if part] - elif "%" in base_name: - file_name_items = base_name.split("%") - - if not file_name_items: - return None - - filename_start = file_name_items[0] - - for _file in os.listdir(dir_path): - if _file.startswith(filename_start) and _file.endswith(ext): - return os.path.normpath(path) - - return ( - path_from_representation() or path_from_data() - ) - - def get_representation_path_by_names( - project_name: str, - folder_path: str, - product_name: str, - version_name: str, - representation_name: str, - anatomy: Optional[Anatomy] = None) -> Optional[str]: + project_name: str, + folder_path: str, + product_name: str, + version_name: Union[int, str], + representation_name: str, + anatomy: Optional[Anatomy] = None +) -> Optional[TemplateResult]: """Get (latest) filepath for representation for folder and product. See `get_representation_by_names` for more details. @@ -820,24 +932,23 @@ def get_representation_path_by_names( representation_name ) if not representation: - return + return None - if not anatomy: - anatomy = Anatomy(project_name) - - if representation: - path = get_representation_path_with_anatomy(representation, anatomy) - return str(path).replace("\\", "/") + return get_representation_path( + project_name, + representation, + anatomy=anatomy, + ) def get_representation_by_names( - project_name: str, - folder_path: str, - product_name: str, - version_name: Union[int, str], - representation_name: str, + project_name: str, + folder_path: str, + product_name: str, + version_name: Union[int, str], + representation_name: str, ) -> Optional[dict]: - """Get representation entity for asset and subset. + """Get representation entity for folder and product. If version_name is "hero" then return the hero version If version_name is "latest" then return the latest version @@ -852,10 +963,10 @@ def get_representation_by_names( folder_entity = ayon_api.get_folder_by_path( project_name, folder_path, fields=["id"]) if not folder_entity: - return + return None if isinstance(product_name, dict) and "name" in product_name: - # Allow explicitly passing subset document + # Allow explicitly passing product entity document product_entity = product_name else: product_entity = ayon_api.get_product_by_name( @@ -864,7 +975,7 @@ def get_representation_by_names( folder_id=folder_entity["id"], fields=["id"]) if not product_entity: - return + return None if version_name == "hero": version_entity = ayon_api.get_hero_version_by_product_id( @@ -876,7 +987,7 @@ def get_representation_by_names( version_entity = ayon_api.get_version_by_name( project_name, version_name, product_id=product_entity["id"]) if not version_entity: - return + return None return ayon_api.get_representation_by_name( project_name, representation_name, version_id=version_entity["id"]) diff --git a/client/ayon_core/pipeline/plugin_discover.py b/client/ayon_core/pipeline/plugin_discover.py index 03da7fce79..fd907eb22c 100644 --- a/client/ayon_core/pipeline/plugin_discover.py +++ b/client/ayon_core/pipeline/plugin_discover.py @@ -1,6 +1,9 @@ +from __future__ import annotations + import os import inspect import traceback +from typing import Optional from ayon_core.lib import Logger from ayon_core.lib.python_module_tools import ( @@ -96,6 +99,77 @@ class DiscoverResult: log.info(report) +def discover_plugins( + base_class: type, + paths: Optional[list[str]] = None, + classes: Optional[list[type]] = None, + ignored_classes: Optional[list[type]] = None, + allow_duplicates: bool = True, +): + """Find and return subclasses of `superclass` + + Args: + base_class (type): Class which determines discovered subclasses. + paths (Optional[list[str]]): List of paths to look for plug-ins. + classes (Optional[list[str]]): List of classes to filter. + ignored_classes (list[type]): List of classes that won't be added to + the output plugins. + allow_duplicates (bool): Validate class name duplications. + + Returns: + DiscoverResult: Object holding successfully + discovered plugins, ignored plugins, plugins with missing + abstract implementation and duplicated plugin. + + """ + ignored_classes = ignored_classes or [] + paths = paths or [] + classes = classes or [] + + result = DiscoverResult(base_class) + + all_plugins = list(classes) + + for path in paths: + modules, crashed = modules_from_path(path) + for (filepath, exc_info) in crashed: + result.crashed_file_paths[filepath] = exc_info + + for item in modules: + filepath, module = item + result.add_module(module) + for cls in classes_from_module(base_class, module): + if cls is base_class: + continue + # Class has defined 'skip_discovery = True' + skip_discovery = cls.__dict__.get("skip_discovery") + if skip_discovery is True: + continue + all_plugins.append(cls) + + if base_class not in ignored_classes: + ignored_classes.append(base_class) + + plugin_names = set() + for cls in all_plugins: + if cls in ignored_classes: + result.ignored_plugins.add(cls) + continue + + if inspect.isabstract(cls): + result.abstract_plugins.append(cls) + continue + + if not allow_duplicates: + class_name = cls.__name__ + if class_name in plugin_names: + result.duplicated_plugins.append(cls) + continue + plugin_names.add(class_name) + result.plugins.append(cls) + return result + + class PluginDiscoverContext(object): """Store and discover registered types nad registered paths to types. @@ -141,58 +215,17 @@ class PluginDiscoverContext(object): Union[DiscoverResult, list[Any]]: Object holding successfully discovered plugins, ignored plugins, plugins with missing abstract implementation and duplicated plugin. + """ - - if not ignore_classes: - ignore_classes = [] - - result = DiscoverResult(superclass) - plugin_names = set() registered_classes = self._registered_plugins.get(superclass) or [] registered_paths = self._registered_plugin_paths.get(superclass) or [] - for cls in registered_classes: - if cls is superclass or cls in ignore_classes: - result.ignored_plugins.add(cls) - continue - - if inspect.isabstract(cls): - result.abstract_plugins.append(cls) - continue - - class_name = cls.__name__ - if class_name in plugin_names: - result.duplicated_plugins.append(cls) - continue - plugin_names.add(class_name) - result.plugins.append(cls) - - # Include plug-ins from registered paths - for path in registered_paths: - modules, crashed = modules_from_path(path) - for item in crashed: - filepath, exc_info = item - result.crashed_file_paths[filepath] = exc_info - - for item in modules: - filepath, module = item - result.add_module(module) - for cls in classes_from_module(superclass, module): - if cls is superclass or cls in ignore_classes: - result.ignored_plugins.add(cls) - continue - - if inspect.isabstract(cls): - result.abstract_plugins.append(cls) - continue - - if not allow_duplicates: - class_name = cls.__name__ - if class_name in plugin_names: - result.duplicated_plugins.append(cls) - continue - plugin_names.add(class_name) - - result.plugins.append(cls) + result = discover_plugins( + superclass, + paths=registered_paths, + classes=registered_classes, + ignored_classes=ignore_classes, + allow_duplicates=allow_duplicates, + ) # Store in memory last result to keep in memory loaded modules self._last_discovered_results[superclass] = result diff --git a/client/ayon_core/pipeline/publish/__init__.py b/client/ayon_core/pipeline/publish/__init__.py index ede7fc3a35..179d749f48 100644 --- a/client/ayon_core/pipeline/publish/__init__.py +++ b/client/ayon_core/pipeline/publish/__init__.py @@ -29,6 +29,7 @@ from .lib import ( get_publish_template_name, publish_plugins_discover, + filter_crashed_publish_paths, load_help_content_from_plugin, load_help_content_from_filepath, @@ -87,6 +88,7 @@ __all__ = ( "get_publish_template_name", "publish_plugins_discover", + "filter_crashed_publish_paths", "load_help_content_from_plugin", "load_help_content_from_filepath", diff --git a/client/ayon_core/pipeline/publish/lib.py b/client/ayon_core/pipeline/publish/lib.py index 7152ec78fa..8492145979 100644 --- a/client/ayon_core/pipeline/publish/lib.py +++ b/client/ayon_core/pipeline/publish/lib.py @@ -1,6 +1,8 @@ """Library functions for publishing.""" from __future__ import annotations import os +import platform +import re import sys import inspect import copy @@ -8,19 +10,19 @@ import warnings import hashlib import xml.etree.ElementTree from typing import TYPE_CHECKING, Optional, Union, List, Any -import clique -import speedcopy import logging -import pyblish.util -import pyblish.plugin -import pyblish.api - from ayon_api import ( get_server_api_connection, get_representations, get_last_version_by_product_name ) +import clique +import pyblish.util +import pyblish.plugin +import pyblish.api +import speedcopy + from ayon_core.lib import ( import_filepath, Logger, @@ -122,7 +124,8 @@ def get_publish_template_name( task_type, project_settings=None, hero=False, - logger=None + product_base_type: Optional[str] = None, + logger=None, ): """Get template name which should be used for passed context. @@ -140,17 +143,29 @@ def get_publish_template_name( task_type (str): Task type on which is instance working. project_settings (Dict[str, Any]): Prepared project settings. hero (bool): Template is for hero version publishing. + product_base_type (Optional[str]): Product type for which should + be found template. logger (logging.Logger): Custom logger used for 'filter_profiles' function. Returns: str: Template name which should be used for integration. """ + if not product_base_type: + msg = ( + "Argument 'product_base_type' is not provided to" + " 'get_publish_template_name' function. This argument" + " will be required in future versions." + ) + warnings.warn(msg, DeprecationWarning) + if logger: + logger.warning(msg) template = None filter_criteria = { "hosts": host_name, "product_types": product_type, + "product_base_types": product_base_type, "task_names": task_name, "task_types": task_type, } @@ -179,7 +194,9 @@ class HelpContent: self.detail = detail -def load_help_content_from_filepath(filepath): +def load_help_content_from_filepath( + filepath: str +) -> dict[str, dict[str, HelpContent]]: """Load help content from xml file. Xml file may contain errors and warnings. """ @@ -214,18 +231,84 @@ def load_help_content_from_filepath(filepath): return output -def load_help_content_from_plugin(plugin): +def load_help_content_from_plugin( + plugin: pyblish.api.Plugin, + help_filename: Optional[str] = None, +) -> dict[str, dict[str, HelpContent]]: cls = plugin if not inspect.isclass(plugin): cls = plugin.__class__ + plugin_filepath = inspect.getfile(cls) plugin_dir = os.path.dirname(plugin_filepath) - basename = os.path.splitext(os.path.basename(plugin_filepath))[0] - filename = basename + ".xml" - filepath = os.path.join(plugin_dir, "help", filename) + if help_filename is None: + basename = os.path.splitext(os.path.basename(plugin_filepath))[0] + help_filename = basename + ".xml" + filepath = os.path.join(plugin_dir, "help", help_filename) return load_help_content_from_filepath(filepath) +def filter_crashed_publish_paths( + project_name: str, + crashed_paths: set[str], + *, + project_settings: Optional[dict[str, Any]] = None, +) -> set[str]: + """Filter crashed paths happened during plugins discovery. + + Check if plugins discovery has enabled strict mode and filter crashed + paths that happened during discover based on regexes from settings. + + Publishing should not start if any paths are returned. + + Args: + project_name (str): Project name in which context plugins discovery + happened. + crashed_paths (set[str]): Crashed paths from plugins discovery report. + project_settings (Optional[dict[str, Any]]): Project settings. + + Returns: + set[str]: Filtered crashed paths. + + """ + filtered_paths = set() + # Nothing crashed all good... + if not crashed_paths: + return filtered_paths + + if project_settings is None: + project_settings = get_project_settings(project_name) + + discover_validation = ( + project_settings["core"]["tools"]["publish"]["discover_validation"] + ) + # Strict mode is not enabled. + if not discover_validation["enabled"]: + return filtered_paths + + regexes = [ + re.compile(value, re.IGNORECASE) + for value in discover_validation["ignore_paths"] + if value + ] + is_windows = platform.system().lower() == "windows" + # Fitler path with regexes from settings + for path in crashed_paths: + # Normalize paths to use forward slashes on windows + if is_windows: + path = path.replace("\\", "/") + is_invalid = True + for regex in regexes: + if regex.match(path): + is_invalid = False + break + + if is_invalid: + filtered_paths.add(path) + + return filtered_paths + + def publish_plugins_discover( paths: Optional[list[str]] = None) -> DiscoverResult: """Find and return available pyblish plug-ins. @@ -812,7 +895,22 @@ def replace_with_published_scene_path(instance, replace_in_path=True): template_data["comment"] = None anatomy = instance.context.data["anatomy"] - template = anatomy.get_template_item("publish", "default", "path") + project_name = anatomy.project_name + task_name = task_type = None + task_entity = instance.data.get("taskEntity") + if task_entity: + task_name = task_entity["name"] + task_type = task_entity["taskType"] + project_settings = instance.context.data["project_settings"] + template_name = get_publish_template_name( + project_name=project_name, + host_name=instance.context.data["hostName"], + product_type=workfile_instance.data["productType"], + task_name=task_name, + task_type=task_type, + project_settings=project_settings, + ) + template = anatomy.get_template_item("publish", template_name, "path") template_filled = template.format_strict(template_data) file_path = os.path.normpath(template_filled) @@ -983,7 +1081,26 @@ def get_instance_expected_output_path( "version": version }) - path_template_obj = anatomy.get_template_item("publish", "default")["path"] + # Get instance publish template name + task_name = task_type = None + task_entity = instance.data.get("taskEntity") + if task_entity: + task_name = task_entity["name"] + task_type = task_entity["taskType"] + + template_name = get_publish_template_name( + project_name=instance.context.data["projectName"], + host_name=instance.context.data["hostName"], + product_type=instance.data["productType"], + task_name=task_name, + task_type=task_type, + project_settings=instance.context.data["project_settings"], + ) + + path_template_obj = anatomy.get_template_item( + "publish", + template_name + )["path"] template_filled = path_template_obj.format_strict(template_data) return os.path.normpath(template_filled) @@ -1045,14 +1162,16 @@ def main_cli_publish( except ValueError: pass + context = get_global_context() + project_settings = get_project_settings(context["project_name"]) + install_ayon_plugins() if addons_manager is None: - addons_manager = AddonsManager() + addons_manager = AddonsManager(project_settings) applications_addon = addons_manager.get_enabled_addon("applications") if applications_addon is not None: - context = get_global_context() env = applications_addon.get_farm_publish_environment_variables( context["project_name"], context["folder_path"], @@ -1075,17 +1194,33 @@ def main_cli_publish( log.info("Running publish ...") discover_result = publish_plugins_discover() - publish_plugins = discover_result.plugins print(discover_result.get_report(only_errors=False)) + filtered_crashed_paths = filter_crashed_publish_paths( + context["project_name"], + set(discover_result.crashed_file_paths), + project_settings=project_settings, + ) + if filtered_crashed_paths: + joined_paths = "\n".join([ + f"- {path}" + for path in filtered_crashed_paths + ]) + log.error( + "Plugin discovery strict mode is enabled." + " Crashed plugin paths that prevent from publishing:" + f"\n{joined_paths}" + ) + sys.exit(1) + + publish_plugins = discover_result.plugins + # Error exit as soon as any error occurs. - error_format = ("Failed {plugin.__name__}: " - "{error} -- {error.traceback}") + error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" for result in pyblish.util.publish_iter(plugins=publish_plugins): if result["error"]: log.error(error_format.format(**result)) - # uninstall() sys.exit(1) log.info("Publish finished.") diff --git a/client/ayon_core/pipeline/publish/publish_plugins.py b/client/ayon_core/pipeline/publish/publish_plugins.py index cc6887e762..90b8e90a3c 100644 --- a/client/ayon_core/pipeline/publish/publish_plugins.py +++ b/client/ayon_core/pipeline/publish/publish_plugins.py @@ -1,7 +1,7 @@ import inspect from abc import ABCMeta import typing -from typing import Optional +from typing import Optional, Any import pyblish.api import pyblish.logic @@ -82,22 +82,51 @@ class PublishValidationError(PublishError): class PublishXmlValidationError(PublishValidationError): + """Raise an error from a dedicated xml file. + + Can be useful to have one xml file with different possible messages that + helps to avoid flood code with dedicated artist messages. + + XML files should live relative to the plugin file location: + '{plugin dir}/help/some_plugin.xml'. + + Args: + plugin (pyblish.api.Plugin): Plugin that raised an error. Is used + to get path to xml file. + message (str): Exception message, can be technical, is used for + console output. + key (Optional[str]): XML file can contain multiple error messages, key + is used to get one of them. By default is used 'main'. + formatting_data (Optional[dict[str, Any]): Error message can have + variables to fill. + help_filename (Optional[str]): Name of xml file with messages. By + default, is used filename where plugin lives with .xml extension. + + """ def __init__( - self, plugin, message, key=None, formatting_data=None - ): + self, + plugin: pyblish.api.Plugin, + message: str, + key: Optional[str] = None, + formatting_data: Optional[dict[str, Any]] = None, + help_filename: Optional[str] = None, + ) -> None: if key is None: key = "main" if not formatting_data: formatting_data = {} - result = load_help_content_from_plugin(plugin) + result = load_help_content_from_plugin(plugin, help_filename) content_obj = result["errors"][key] description = content_obj.description.format(**formatting_data) detail = content_obj.detail if detail: detail = detail.format(**formatting_data) - super(PublishXmlValidationError, self).__init__( - message, content_obj.title, description, detail + super().__init__( + message, + content_obj.title, + description, + detail ) diff --git a/client/ayon_core/pipeline/template_data.py b/client/ayon_core/pipeline/template_data.py index 0a95a98be8..955b1aaac8 100644 --- a/client/ayon_core/pipeline/template_data.py +++ b/client/ayon_core/pipeline/template_data.py @@ -1,27 +1,50 @@ +from __future__ import annotations + +from typing import Optional, Any + import ayon_api from ayon_core.settings import get_studio_settings -from ayon_core.lib.local_settings import get_ayon_username +from ayon_core.lib import DefaultKeysDict +from ayon_core.lib.local_settings import get_ayon_user_entity -def get_general_template_data(settings=None, username=None): +def get_general_template_data( + settings: Optional[dict[str, Any]] = None, + username: Optional[str] = None, + user_entity: Optional[dict[str, Any]] = None, +): """General template data based on system settings or machine. Output contains formatting keys: - - 'studio[name]' - Studio name filled from system settings - - 'studio[code]' - Studio code filled from system settings - - 'user' - User's name using 'get_ayon_username' + - 'studio[name]' - Studio name filled from system settings + - 'studio[code]' - Studio code filled from system settings + - 'user[name]' - User's name + - 'user[attrib][...]' - User's attributes + - 'user[data][...]' - User's data Args: settings (Dict[str, Any]): Studio or project settings. username (Optional[str]): AYON Username. - """ + user_entity (Optional[dict[str, Any]]): User entity. + """ if not settings: settings = get_studio_settings() - if username is None: - username = get_ayon_username() + if user_entity is None: + user_entity = get_ayon_user_entity(username) + + # Use dictionary with default value for backwards compatibility + # - we did support '{user}' now it should be '{user[name]}' + user_data = DefaultKeysDict( + "name", + { + "name": user_entity["name"], + "attrib": user_entity["attrib"], + "data": user_entity["data"], + } + ) core_settings = settings["core"] return { @@ -29,7 +52,7 @@ def get_general_template_data(settings=None, username=None): "name": core_settings["studio_name"], "code": core_settings["studio_code"] }, - "user": username + "user": user_data, } @@ -73,7 +96,6 @@ def get_folder_template_data(folder_entity, project_name): Output dictionary contains keys: - 'folder' - dictionary with 'name' key filled with folder name - - 'asset' - folder name - 'hierarchy' - parent folder names joined with '/' - 'parent' - direct parent name, project name used if is under project @@ -109,7 +131,6 @@ def get_folder_template_data(folder_entity, project_name): "path": path, "parents": parents, }, - "asset": folder_name, "hierarchy": hierarchy, "parent": parent_name } @@ -150,7 +171,8 @@ def get_template_data( task_entity=None, host_name=None, settings=None, - username=None + username=None, + user_entity=None, ): """Prepare data for templates filling from entered documents and info. @@ -173,13 +195,18 @@ def get_template_data( host_name (Optional[str]): Used to fill '{app}' key. settings (Union[Dict, None]): Prepared studio or project settings. They're queried if not passed (may be slower). - username (Optional[str]): AYON Username. + username (Optional[str]): DEPRECATED AYON Username. + user_entity (Optional[dict[str, Any]): AYON user entity. Returns: Dict[str, Any]: Data prepared for filling workdir template. """ - template_data = get_general_template_data(settings, username=username) + template_data = get_general_template_data( + settings, + username=username, + user_entity=user_entity, + ) template_data.update(get_project_template_data(project_entity)) if folder_entity: template_data.update(get_folder_template_data( diff --git a/client/ayon_core/pipeline/usdlib.py b/client/ayon_core/pipeline/usdlib.py index 2ff98c5e45..6b9d19fd35 100644 --- a/client/ayon_core/pipeline/usdlib.py +++ b/client/ayon_core/pipeline/usdlib.py @@ -299,7 +299,6 @@ def add_ordered_sublayer(layer, contribution_path, layer_id, order=None, sdf format args metadata if enabled) """ - # Add the order with the contribution path so that for future # contributions we can again use it to magically fit into the # ordering. We put this in the path because sublayer paths do @@ -317,20 +316,25 @@ def add_ordered_sublayer(layer, contribution_path, layer_id, order=None, # If the layer was already in the layers, then replace it for index, existing_path in enumerate(layer.subLayerPaths): args = get_sdf_format_args(existing_path) - existing_layer = args.get("layer_id") - if existing_layer == layer_id: + existing_layer_id = args.get("layer_id") + if existing_layer_id == layer_id: + existing_layer = layer.subLayerPaths[index] + existing_order = args.get("order") + existing_order = int(existing_order) if existing_order else None + if order is not None and order != existing_order: + # We need to move the layer, so we will remove this index + # and then re-insert it below at the right order + log.debug(f"Removing existing layer: {existing_layer}") + del layer.subLayerPaths[index] + break + # Put it in the same position where it was before when swapping # it with the original, also take over its order metadata - order = args.get("order") - if order is not None: - order = int(order) - else: - order = None contribution_path = _format_path(contribution_path, - order=order, + order=existing_order, layer_id=layer_id) log.debug( - f"Replacing existing layer: {layer.subLayerPaths[index]} " + f"Replacing existing layer: {existing_layer} " f"-> {contribution_path}" ) layer.subLayerPaths[index] = contribution_path @@ -684,3 +688,20 @@ def get_sdf_format_args(path): """Return SDF_FORMAT_ARGS parsed to `dict`""" _raw_path, data = Sdf.Layer.SplitIdentifier(path) return data + + +def get_standard_default_prim_name(folder_path: str) -> str: + """Return the AYON-specified default prim name for a folder path. + + This is used e.g. for the default prim in AYON USD Contribution workflows. + """ + folder_name: str = folder_path.rsplit("/", 1)[-1] + + # Prim names are not allowed to start with a digit in USD. Authoring them + # would mean generating essentially garbage data and may result in + # unexpected behavior in certain USD or DCC versions, like failure to + # refresh in usdview or crashes in Houdini 21. + if folder_name and folder_name[0].isdigit(): + folder_name = f"_{folder_name}" + + return folder_name diff --git a/client/ayon_core/pipeline/version_start.py b/client/ayon_core/pipeline/version_start.py index 7ee20a5dd4..54022012a0 100644 --- a/client/ayon_core/pipeline/version_start.py +++ b/client/ayon_core/pipeline/version_start.py @@ -1,16 +1,19 @@ +from __future__ import annotations +from typing import Optional, Any + from ayon_core.lib.profiles_filtering import filter_profiles from ayon_core.settings import get_project_settings def get_versioning_start( - project_name, - host_name, - task_name=None, - task_type=None, - product_type=None, - product_name=None, - project_settings=None, -): + project_name: str, + host_name: str, + task_name: Optional[str] = None, + task_type: Optional[str] = None, + product_type: Optional[str] = None, + product_name: Optional[str] = None, + project_settings: Optional[dict[str, Any]] = None, +) -> int: """Get anatomy versioning start""" if not project_settings: project_settings = get_project_settings(project_name) @@ -22,14 +25,12 @@ def get_versioning_start( if not profiles: return version_start - # TODO use 'product_types' and 'product_name' instead of - # 'families' and 'subsets' filtering_criteria = { "host_names": host_name, - "families": product_type, + "product_types": product_type, + "product_names": product_name, "task_names": task_name, "task_types": task_type, - "subsets": product_name } profile = filter_profiles(profiles, filtering_criteria) diff --git a/client/ayon_core/pipeline/workfile/workfile_template_builder.py b/client/ayon_core/pipeline/workfile/workfile_template_builder.py index 52e27baa80..b6757db66d 100644 --- a/client/ayon_core/pipeline/workfile/workfile_template_builder.py +++ b/client/ayon_core/pipeline/workfile/workfile_template_builder.py @@ -300,7 +300,11 @@ class AbstractTemplateBuilder(ABC): self._loaders_by_name = get_loaders_by_name() return self._loaders_by_name - def get_linked_folder_entities(self, link_type: Optional[str]): + def get_linked_folder_entities( + self, + link_type: Optional[str], + folder_path_regex: Optional[str], + ): if not link_type: return [] project_name = self.project_name @@ -317,7 +321,11 @@ class AbstractTemplateBuilder(ABC): if link["entityType"] == "folder" } - return list(get_folders(project_name, folder_ids=linked_folder_ids)) + return list(get_folders( + project_name, + folder_path_regex=folder_path_regex, + folder_ids=linked_folder_ids, + )) def _collect_creators(self): self._creators_by_name = { @@ -832,14 +840,24 @@ class AbstractTemplateBuilder(ABC): host_name = self.host_name task_name = self.current_task_name task_type = self.current_task_type + folder_path = self.current_folder_path + folder_type = None + folder_entity = self.current_folder_entity + if folder_entity: + folder_type = folder_entity["folderType"] + + filter_data = { + "task_types": task_type, + "task_names": task_name, + "folder_types": folder_type, + "folder_paths": folder_path, + } build_profiles = self._get_build_profiles() profile = filter_profiles( build_profiles, - { - "task_types": task_type, - "task_names": task_name - } + filter_data, + logger=self.log ) if not profile: raise TemplateProfileNotFound(( @@ -1465,7 +1483,7 @@ class PlaceholderLoadMixin(object): tooltip=( "Link Type\n" "\nDefines what type of link will be used to" - " link the asset to the current folder." + " link the product to the current folder." ) ), attribute_definitions.EnumDef( @@ -1638,7 +1656,10 @@ class PlaceholderLoadMixin(object): linked_folder_entity["id"] for linked_folder_entity in ( self.builder.get_linked_folder_entities( - link_type=link_type)) + link_type=link_type, + folder_path_regex=folder_path_regex + ) + ) ] if not folder_ids: @@ -1666,6 +1687,8 @@ class PlaceholderLoadMixin(object): for version in get_last_versions( project_name, filtered_product_ids, fields={"id"} ).values() + # Version may be none if a product has no versions + if version is not None ) return list(get_representations( project_name, diff --git a/client/ayon_core/plugins/load/copy_file.py b/client/ayon_core/plugins/load/copy_file.py deleted file mode 100644 index 08dad03be3..0000000000 --- a/client/ayon_core/plugins/load/copy_file.py +++ /dev/null @@ -1,34 +0,0 @@ -from ayon_core.style import get_default_entity_icon_color -from ayon_core.pipeline import load - - -class CopyFile(load.LoaderPlugin): - """Copy the published file to be pasted at the desired location""" - - representations = {"*"} - product_types = {"*"} - - label = "Copy File" - order = 10 - icon = "copy" - color = get_default_entity_icon_color() - - def load(self, context, name=None, namespace=None, data=None): - path = self.filepath_from_context(context) - self.log.info("Added copy to clipboard: {0}".format(path)) - self.copy_file_to_clipboard(path) - - @staticmethod - def copy_file_to_clipboard(path): - from qtpy import QtCore, QtWidgets - - clipboard = QtWidgets.QApplication.clipboard() - assert clipboard, "Must have running QApplication instance" - - # Build mime data for clipboard - data = QtCore.QMimeData() - url = QtCore.QUrl.fromLocalFile(path) - data.setUrls([url]) - - # Set to Clipboard - clipboard.setMimeData(data) diff --git a/client/ayon_core/plugins/load/copy_file_path.py b/client/ayon_core/plugins/load/copy_file_path.py deleted file mode 100644 index fdf31b5e02..0000000000 --- a/client/ayon_core/plugins/load/copy_file_path.py +++ /dev/null @@ -1,29 +0,0 @@ -import os - -from ayon_core.pipeline import load - - -class CopyFilePath(load.LoaderPlugin): - """Copy published file path to clipboard""" - representations = {"*"} - product_types = {"*"} - - label = "Copy File Path" - order = 20 - icon = "clipboard" - color = "#999999" - - def load(self, context, name=None, namespace=None, data=None): - path = self.filepath_from_context(context) - self.log.info("Added file path to clipboard: {0}".format(path)) - self.copy_path_to_clipboard(path) - - @staticmethod - def copy_path_to_clipboard(path): - from qtpy import QtWidgets - - clipboard = QtWidgets.QApplication.clipboard() - assert clipboard, "Must have running QApplication instance" - - # Set to Clipboard - clipboard.setText(os.path.normpath(path)) diff --git a/client/ayon_core/plugins/load/create_hero_version.py b/client/ayon_core/plugins/load/create_hero_version.py index aef0cf8863..531f024fc4 100644 --- a/client/ayon_core/plugins/load/create_hero_version.py +++ b/client/ayon_core/plugins/load/create_hero_version.py @@ -62,8 +62,8 @@ class CreateHeroVersion(load.ProductLoaderPlugin): ignored_representation_names: list[str] = [] db_representation_context_keys = [ - "project", "folder", "asset", "hierarchy", "task", "product", - "subset", "family", "representation", "username", "user", "output" + "project", "folder", "hierarchy", "task", "product", + "representation", "username", "user", "output" ] use_hardlinks = False @@ -75,6 +75,7 @@ class CreateHeroVersion(load.ProductLoaderPlugin): msgBox.setStyleSheet(style.load_stylesheet()) msgBox.setWindowFlags( msgBox.windowFlags() | QtCore.Qt.WindowType.FramelessWindowHint + | QtCore.Qt.WindowType.WindowStaysOnTopHint ) msgBox.exec_() diff --git a/client/ayon_core/plugins/load/delete_old_versions.py b/client/ayon_core/plugins/load/delete_old_versions.py deleted file mode 100644 index 3a42ccba7e..0000000000 --- a/client/ayon_core/plugins/load/delete_old_versions.py +++ /dev/null @@ -1,477 +0,0 @@ -import collections -import os -import uuid -from typing import List, Dict, Any - -import clique -import ayon_api -from ayon_api.operations import OperationsSession -import qargparse -from qtpy import QtWidgets, QtCore - -from ayon_core import style -from ayon_core.lib import format_file_size -from ayon_core.pipeline import load, Anatomy -from ayon_core.pipeline.load import ( - get_representation_path_with_anatomy, - InvalidRepresentationContext, -) - - -class DeleteOldVersions(load.ProductLoaderPlugin): - """Deletes specific number of old version""" - - is_multiple_contexts_compatible = True - sequence_splitter = "__sequence_splitter__" - - representations = ["*"] - product_types = {"*"} - tool_names = ["library_loader"] - - label = "Delete Old Versions" - order = 35 - icon = "trash" - color = "#d8d8d8" - - options = [ - qargparse.Integer( - "versions_to_keep", default=2, min=0, help="Versions to keep:" - ), - qargparse.Boolean( - "remove_publish_folder", help="Remove publish folder:" - ) - ] - - requires_confirmation = True - - def delete_whole_dir_paths(self, dir_paths, delete=True): - size = 0 - - for dir_path in dir_paths: - # Delete all files and folders in dir path - for root, dirs, files in os.walk(dir_path, topdown=False): - for name in files: - file_path = os.path.join(root, name) - size += os.path.getsize(file_path) - if delete: - os.remove(file_path) - self.log.debug("Removed file: {}".format(file_path)) - - for name in dirs: - if delete: - os.rmdir(os.path.join(root, name)) - - if not delete: - continue - - # Delete even the folder and it's parents folders if they are empty - while True: - if not os.path.exists(dir_path): - dir_path = os.path.dirname(dir_path) - continue - - if len(os.listdir(dir_path)) != 0: - break - - os.rmdir(os.path.join(dir_path)) - - return size - - def path_from_representation(self, representation, anatomy): - try: - context = representation["context"] - except KeyError: - return (None, None) - - try: - path = get_representation_path_with_anatomy( - representation, anatomy - ) - except InvalidRepresentationContext: - return (None, None) - - sequence_path = None - if "frame" in context: - context["frame"] = self.sequence_splitter - sequence_path = get_representation_path_with_anatomy( - representation, anatomy - ) - - if sequence_path: - sequence_path = sequence_path.normalized() - - return (path.normalized(), sequence_path) - - def delete_only_repre_files(self, dir_paths, file_paths, delete=True): - size = 0 - - for dir_id, dir_path in dir_paths.items(): - dir_files = os.listdir(dir_path) - collections, remainders = clique.assemble(dir_files) - for file_path, seq_path in file_paths[dir_id]: - file_path_base = os.path.split(file_path)[1] - # Just remove file if `frame` key was not in context or - # filled path is in remainders (single file sequence) - if not seq_path or file_path_base in remainders: - if not os.path.exists(file_path): - self.log.debug( - "File was not found: {}".format(file_path) - ) - continue - - size += os.path.getsize(file_path) - - if delete: - os.remove(file_path) - self.log.debug("Removed file: {}".format(file_path)) - - if file_path_base in remainders: - remainders.remove(file_path_base) - continue - - seq_path_base = os.path.split(seq_path)[1] - head, tail = seq_path_base.split(self.sequence_splitter) - - final_col = None - for collection in collections: - if head != collection.head or tail != collection.tail: - continue - final_col = collection - break - - if final_col is not None: - # Fill full path to head - final_col.head = os.path.join(dir_path, final_col.head) - for _file_path in final_col: - if os.path.exists(_file_path): - - size += os.path.getsize(_file_path) - - if delete: - os.remove(_file_path) - self.log.debug( - "Removed file: {}".format(_file_path) - ) - - _seq_path = final_col.format("{head}{padding}{tail}") - self.log.debug("Removed files: {}".format(_seq_path)) - collections.remove(final_col) - - elif os.path.exists(file_path): - size += os.path.getsize(file_path) - - if delete: - os.remove(file_path) - self.log.debug("Removed file: {}".format(file_path)) - else: - self.log.debug( - "File was not found: {}".format(file_path) - ) - - # Delete as much as possible parent folders - if not delete: - return size - - for dir_path in dir_paths.values(): - while True: - if not os.path.exists(dir_path): - dir_path = os.path.dirname(dir_path) - continue - - if len(os.listdir(dir_path)) != 0: - break - - self.log.debug("Removed folder: {}".format(dir_path)) - os.rmdir(dir_path) - - return size - - def message(self, text): - msgBox = QtWidgets.QMessageBox() - msgBox.setText(text) - msgBox.setStyleSheet(style.load_stylesheet()) - msgBox.setWindowFlags( - msgBox.windowFlags() | QtCore.Qt.FramelessWindowHint - ) - msgBox.exec_() - - def _confirm_delete(self, - contexts: List[Dict[str, Any]], - versions_to_keep: int) -> bool: - """Prompt user for a deletion confirmation""" - - contexts_list = "\n".join(sorted( - "- {folder[name]} > {product[name]}".format_map(context) - for context in contexts - )) - num_contexts = len(contexts) - s = "s" if num_contexts > 1 else "" - text = ( - "Are you sure you want to delete versions?\n\n" - f"This will keep only the last {versions_to_keep} " - f"versions for the {num_contexts} selected product{s}." - ) - informative_text = "Warning: This will delete files from disk" - detailed_text = ( - f"Keep only {versions_to_keep} versions for:\n{contexts_list}" - ) - - messagebox = QtWidgets.QMessageBox() - messagebox.setIcon(QtWidgets.QMessageBox.Warning) - messagebox.setWindowTitle("Delete Old Versions") - messagebox.setText(text) - messagebox.setInformativeText(informative_text) - messagebox.setDetailedText(detailed_text) - messagebox.setStandardButtons( - QtWidgets.QMessageBox.Yes - | QtWidgets.QMessageBox.Cancel - ) - messagebox.setDefaultButton(QtWidgets.QMessageBox.Cancel) - messagebox.setStyleSheet(style.load_stylesheet()) - messagebox.setAttribute(QtCore.Qt.WA_DeleteOnClose, True) - return messagebox.exec_() == QtWidgets.QMessageBox.Yes - - def get_data(self, context, versions_count): - product_entity = context["product"] - folder_entity = context["folder"] - project_name = context["project"]["name"] - anatomy = Anatomy(project_name, project_entity=context["project"]) - - version_fields = ayon_api.get_default_fields_for_type("version") - version_fields.add("tags") - versions = list(ayon_api.get_versions( - project_name, - product_ids=[product_entity["id"]], - active=None, - hero=False, - fields=version_fields - )) - self.log.debug( - "Version Number ({})".format(len(versions)) - ) - versions_by_parent = collections.defaultdict(list) - for ent in versions: - versions_by_parent[ent["productId"]].append(ent) - - def sort_func(ent): - return int(ent["version"]) - - all_last_versions = [] - for _parent_id, _versions in versions_by_parent.items(): - for idx, version in enumerate( - sorted(_versions, key=sort_func, reverse=True) - ): - if idx >= versions_count: - break - all_last_versions.append(version) - - self.log.debug("Collected versions ({})".format(len(versions))) - - # Filter latest versions - for version in all_last_versions: - versions.remove(version) - - # Update versions_by_parent without filtered versions - versions_by_parent = collections.defaultdict(list) - for ent in versions: - versions_by_parent[ent["productId"]].append(ent) - - # Filter already deleted versions - versions_to_pop = [] - for version in versions: - if "deleted" in version["tags"]: - versions_to_pop.append(version) - - for version in versions_to_pop: - msg = "Folder: \"{}\" | Product: \"{}\" | Version: \"{}\"".format( - folder_entity["path"], - product_entity["name"], - version["version"] - ) - self.log.debug(( - "Skipping version. Already tagged as inactive. < {} >" - ).format(msg)) - versions.remove(version) - - version_ids = [ent["id"] for ent in versions] - - self.log.debug( - "Filtered versions to delete ({})".format(len(version_ids)) - ) - - if not version_ids: - msg = "Skipping processing. Nothing to delete on {}/{}".format( - folder_entity["path"], product_entity["name"] - ) - self.log.info(msg) - print(msg) - return - - repres = list(ayon_api.get_representations( - project_name, version_ids=version_ids - )) - - self.log.debug( - "Collected representations to remove ({})".format(len(repres)) - ) - - dir_paths = {} - file_paths_by_dir = collections.defaultdict(list) - for repre in repres: - file_path, seq_path = self.path_from_representation( - repre, anatomy - ) - if file_path is None: - self.log.debug(( - "Could not format path for represenation \"{}\"" - ).format(str(repre))) - continue - - dir_path = os.path.dirname(file_path) - dir_id = None - for _dir_id, _dir_path in dir_paths.items(): - if _dir_path == dir_path: - dir_id = _dir_id - break - - if dir_id is None: - dir_id = uuid.uuid4() - dir_paths[dir_id] = dir_path - - file_paths_by_dir[dir_id].append([file_path, seq_path]) - - dir_ids_to_pop = [] - for dir_id, dir_path in dir_paths.items(): - if os.path.exists(dir_path): - continue - - dir_ids_to_pop.append(dir_id) - - # Pop dirs from both dictionaries - for dir_id in dir_ids_to_pop: - dir_paths.pop(dir_id) - paths = file_paths_by_dir.pop(dir_id) - # TODO report of missing directories? - paths_msg = ", ".join([ - "'{}'".format(path[0].replace("\\", "/")) for path in paths - ]) - self.log.debug(( - "Folder does not exist. Deleting its files skipped: {}" - ).format(paths_msg)) - - return { - "dir_paths": dir_paths, - "file_paths_by_dir": file_paths_by_dir, - "versions": versions, - "folder": folder_entity, - "product": product_entity, - "archive_product": versions_count == 0 - } - - def main(self, project_name, data, remove_publish_folder): - # Size of files. - size = 0 - if not data: - return size - - if remove_publish_folder: - size = self.delete_whole_dir_paths(data["dir_paths"].values()) - else: - size = self.delete_only_repre_files( - data["dir_paths"], data["file_paths_by_dir"] - ) - - op_session = OperationsSession() - for version in data["versions"]: - orig_version_tags = version["tags"] - version_tags = list(orig_version_tags) - changes = {} - if "deleted" not in version_tags: - version_tags.append("deleted") - changes["tags"] = version_tags - - if version["active"]: - changes["active"] = False - - if not changes: - continue - op_session.update_entity( - project_name, "version", version["id"], changes - ) - - op_session.commit() - - return size - - def load(self, contexts, name=None, namespace=None, options=None): - - # Get user options - versions_to_keep = 2 - remove_publish_folder = False - if options: - versions_to_keep = options.get( - "versions_to_keep", versions_to_keep - ) - remove_publish_folder = options.get( - "remove_publish_folder", remove_publish_folder - ) - - # Because we do not want this run by accident we will add an extra - # user confirmation - if ( - self.requires_confirmation - and not self._confirm_delete(contexts, versions_to_keep) - ): - return - - try: - size = 0 - for count, context in enumerate(contexts): - data = self.get_data(context, versions_to_keep) - if not data: - continue - project_name = context["project"]["name"] - size += self.main(project_name, data, remove_publish_folder) - print("Progressing {}/{}".format(count + 1, len(contexts))) - - msg = "Total size of files: {}".format(format_file_size(size)) - self.log.info(msg) - self.message(msg) - - except Exception: - self.log.error("Failed to delete versions.", exc_info=True) - - -class CalculateOldVersions(DeleteOldVersions): - """Calculate file size of old versions""" - label = "Calculate Old Versions" - order = 30 - tool_names = ["library_loader"] - - options = [ - qargparse.Integer( - "versions_to_keep", default=2, min=0, help="Versions to keep:" - ), - qargparse.Boolean( - "remove_publish_folder", help="Remove publish folder:" - ) - ] - - requires_confirmation = False - - def main(self, project_name, data, remove_publish_folder): - size = 0 - - if not data: - return size - - if remove_publish_folder: - size = self.delete_whole_dir_paths( - data["dir_paths"].values(), delete=False - ) - else: - size = self.delete_only_repre_files( - data["dir_paths"], data["file_paths_by_dir"], delete=False - ) - - return size diff --git a/client/ayon_core/plugins/load/open_file.py b/client/ayon_core/plugins/load/open_file.py deleted file mode 100644 index 3b5fbbc0c9..0000000000 --- a/client/ayon_core/plugins/load/open_file.py +++ /dev/null @@ -1,36 +0,0 @@ -import sys -import os -import subprocess - -from ayon_core.pipeline import load - - -def open(filepath): - """Open file with system default executable""" - if sys.platform.startswith('darwin'): - subprocess.call(('open', filepath)) - elif os.name == 'nt': - os.startfile(filepath) - elif os.name == 'posix': - subprocess.call(('xdg-open', filepath)) - - -class OpenFile(load.LoaderPlugin): - """Open Image Sequence or Video with system default""" - - product_types = {"render2d"} - representations = {"*"} - - label = "Open" - order = -10 - icon = "play-circle" - color = "orange" - - def load(self, context, name, namespace, data): - - path = self.filepath_from_context(context) - if not os.path.exists(path): - raise RuntimeError("File not found: {}".format(path)) - - self.log.info("Opening : {}".format(path)) - open(path) diff --git a/client/ayon_core/plugins/load/push_to_project.py b/client/ayon_core/plugins/load/push_to_project.py deleted file mode 100644 index 0b218d6ea1..0000000000 --- a/client/ayon_core/plugins/load/push_to_project.py +++ /dev/null @@ -1,56 +0,0 @@ -import os - -from ayon_core import AYON_CORE_ROOT -from ayon_core.lib import get_ayon_launcher_args, run_detached_process -from ayon_core.pipeline import load -from ayon_core.pipeline.load import LoadError - - -class PushToProject(load.ProductLoaderPlugin): - """Export selected versions to different project""" - - is_multiple_contexts_compatible = True - - representations = {"*"} - product_types = {"*"} - - label = "Push to project" - order = 35 - icon = "send" - color = "#d8d8d8" - - def load(self, contexts, name=None, namespace=None, options=None): - filtered_contexts = [ - context - for context in contexts - if context.get("project") and context.get("version") - ] - if not filtered_contexts: - raise LoadError("Nothing to push for your selection") - - folder_ids = set( - context["folder"]["id"] - for context in filtered_contexts - ) - if len(folder_ids) > 1: - raise LoadError("Please select products from single folder") - - push_tool_script_path = os.path.join( - AYON_CORE_ROOT, - "tools", - "push_to_project", - "main.py" - ) - project_name = filtered_contexts[0]["project"]["name"] - - version_ids = { - context["version"]["id"] - for context in filtered_contexts - } - - args = get_ayon_launcher_args( - push_tool_script_path, - "--project", project_name, - "--versions", ",".join(version_ids) - ) - run_detached_process(args) diff --git a/client/ayon_core/plugins/loader/copy_file.py b/client/ayon_core/plugins/loader/copy_file.py new file mode 100644 index 0000000000..a1a98a2bf0 --- /dev/null +++ b/client/ayon_core/plugins/loader/copy_file.py @@ -0,0 +1,122 @@ +import os +import collections + +from typing import Optional, Any + +from ayon_core.pipeline.load import get_representation_path_with_anatomy +from ayon_core.pipeline.actions import ( + LoaderActionPlugin, + LoaderActionItem, + LoaderActionSelection, + LoaderActionResult, +) + + +class CopyFileActionPlugin(LoaderActionPlugin): + """Copy published file path to clipboard""" + identifier = "core.copy-action" + + def get_action_items( + self, selection: LoaderActionSelection + ) -> list[LoaderActionItem]: + repres = [] + if selection.selected_type == "representation": + repres = selection.entities.get_representations( + selection.selected_ids + ) + + if selection.selected_type == "version": + repres = selection.entities.get_versions_representations( + selection.selected_ids + ) + + output = [] + if not repres: + return output + + repre_ids_by_name = collections.defaultdict(set) + for repre in repres: + repre_ids_by_name[repre["name"]].add(repre["id"]) + + for repre_name, repre_ids in repre_ids_by_name.items(): + repre_id = next(iter(repre_ids), None) + if not repre_id: + continue + output.append( + LoaderActionItem( + label=repre_name, + order=32, + group_label="Copy file path", + data={ + "representation_id": repre_id, + "action": "copy-path", + }, + icon={ + "type": "material-symbols", + "name": "content_copy", + "color": "#999999", + } + ) + ) + output.append( + LoaderActionItem( + label=repre_name, + order=33, + group_label="Copy file", + data={ + "representation_id": repre_id, + "action": "copy-file", + }, + icon={ + "type": "material-symbols", + "name": "file_copy", + "color": "#999999", + } + ) + ) + return output + + def execute_action( + self, + selection: LoaderActionSelection, + data: dict, + form_values: dict[str, Any], + ) -> Optional[LoaderActionResult]: + from qtpy import QtWidgets, QtCore + + action = data["action"] + repre_id = data["representation_id"] + repre = next(iter(selection.entities.get_representations({repre_id}))) + path = get_representation_path_with_anatomy( + repre, selection.get_project_anatomy() + ) + self.log.info(f"Added file path to clipboard: {path}") + + clipboard = QtWidgets.QApplication.clipboard() + if not clipboard: + return LoaderActionResult( + "Failed to copy file path to clipboard.", + success=False, + ) + + if action == "copy-path": + # Set to Clipboard + clipboard.setText(os.path.normpath(path)) + + return LoaderActionResult( + "Path stored to clipboard...", + success=True, + ) + + # Build mime data for clipboard + data = QtCore.QMimeData() + url = QtCore.QUrl.fromLocalFile(path) + data.setUrls([url]) + + # Set to Clipboard + clipboard.setMimeData(data) + + return LoaderActionResult( + "File added to clipboard...", + success=True, + ) diff --git a/client/ayon_core/plugins/loader/delete_old_versions.py b/client/ayon_core/plugins/loader/delete_old_versions.py new file mode 100644 index 0000000000..ce67df1c0c --- /dev/null +++ b/client/ayon_core/plugins/loader/delete_old_versions.py @@ -0,0 +1,388 @@ +from __future__ import annotations + +import os +import collections +import json +import shutil +from typing import Optional, Any + +from ayon_api.operations import OperationsSession + +from ayon_core.lib import ( + format_file_size, + AbstractAttrDef, + NumberDef, + BoolDef, + TextDef, + UILabelDef, +) +from ayon_core.pipeline import Anatomy +from ayon_core.pipeline.actions import ( + ActionForm, + LoaderActionPlugin, + LoaderActionItem, + LoaderActionSelection, + LoaderActionResult, +) + + +class DeleteOldVersions(LoaderActionPlugin): + """Deletes specific number of old version""" + + is_multiple_contexts_compatible = True + sequence_splitter = "__sequence_splitter__" + + requires_confirmation = True + + def get_action_items( + self, selection: LoaderActionSelection + ) -> list[LoaderActionItem]: + # Do not show in hosts + if self.host_name is not None: + return [] + + versions = selection.get_selected_version_entities() + if not versions: + return [] + + product_ids = { + version["productId"] + for version in versions + } + + return [ + LoaderActionItem( + label="Delete Versions", + order=35, + data={ + "product_ids": list(product_ids), + "action": "delete-versions", + }, + icon={ + "type": "material-symbols", + "name": "delete", + "color": "#d8d8d8", + } + ), + LoaderActionItem( + label="Calculate Versions size", + order=34, + data={ + "product_ids": list(product_ids), + "action": "calculate-versions-size", + }, + icon={ + "type": "material-symbols", + "name": "auto_delete", + "color": "#d8d8d8", + } + ) + ] + + def execute_action( + self, + selection: LoaderActionSelection, + data: dict[str, Any], + form_values: dict[str, Any], + ) -> Optional[LoaderActionResult]: + step = form_values.get("step") + action = data["action"] + versions_to_keep = form_values.get("versions_to_keep") + remove_publish_folder = form_values.get("remove_publish_folder") + if step is None: + return self._first_step( + action, + versions_to_keep, + remove_publish_folder, + ) + + if versions_to_keep is None: + versions_to_keep = 2 + if remove_publish_folder is None: + remove_publish_folder = False + + product_ids = data["product_ids"] + if step == "prepare-data": + return self._prepare_data_step( + action, + versions_to_keep, + remove_publish_folder, + product_ids, + selection, + ) + + if step == "delete-versions": + return self._delete_versions_step( + selection.project_name, form_values + ) + return None + + def _first_step( + self, + action: str, + versions_to_keep: Optional[int], + remove_publish_folder: Optional[bool], + ) -> LoaderActionResult: + fields: list[AbstractAttrDef] = [ + TextDef( + "step", + visible=False, + ), + NumberDef( + "versions_to_keep", + label="Versions to keep", + minimum=0, + default=2, + ), + ] + if action == "delete-versions": + fields.append( + BoolDef( + "remove_publish_folder", + label="Remove publish folder", + default=False, + ) + ) + + form_values = { + key: value + for key, value in ( + ("remove_publish_folder", remove_publish_folder), + ("versions_to_keep", versions_to_keep), + ) + if value is not None + } + form_values["step"] = "prepare-data" + return LoaderActionResult( + form=ActionForm( + title="Delete Old Versions", + fields=fields, + ), + form_values=form_values + ) + + def _prepare_data_step( + self, + action: str, + versions_to_keep: int, + remove_publish_folder: bool, + entity_ids: set[str], + selection: LoaderActionSelection, + ): + versions_by_product_id = collections.defaultdict(list) + for version in selection.entities.get_products_versions(entity_ids): + # Keep hero version + if versions_to_keep != 0 and version["version"] < 0: + continue + versions_by_product_id[version["productId"]].append(version) + + versions_to_delete = [] + for product_id, versions in versions_by_product_id.items(): + if versions_to_keep == 0: + versions_to_delete.extend(versions) + continue + + if len(versions) <= versions_to_keep: + continue + + versions.sort(key=lambda v: v["version"]) + for _ in range(versions_to_keep): + if not versions: + break + versions.pop(-1) + versions_to_delete.extend(versions) + + self.log.debug( + f"Collected versions to delete ({len(versions_to_delete)})" + ) + + version_ids = { + version["id"] + for version in versions_to_delete + } + if not version_ids: + return LoaderActionResult( + message="Skipping. Nothing to delete.", + success=False, + ) + + project = selection.entities.get_project() + anatomy = Anatomy(project["name"], project_entity=project) + + repres = selection.entities.get_versions_representations(version_ids) + + self.log.debug( + f"Collected representations to remove ({len(repres)})" + ) + + filepaths_by_repre_id = {} + repre_ids_by_version_id = { + version_id: [] + for version_id in version_ids + } + for repre in repres: + repre_ids_by_version_id[repre["versionId"]].append(repre["id"]) + filepaths_by_repre_id[repre["id"]] = [ + anatomy.fill_root(repre_file["path"]) + for repre_file in repre["files"] + ] + + size = 0 + for filepaths in filepaths_by_repre_id.values(): + for filepath in filepaths: + if os.path.exists(filepath): + size += os.path.getsize(filepath) + + if action == "calculate-versions-size": + return LoaderActionResult( + message="Calculated size", + success=True, + form=ActionForm( + title="Calculated versions size", + fields=[ + UILabelDef( + f"Total size of files: {format_file_size(size)}" + ), + ], + submit_label=None, + cancel_label="Close", + ), + ) + + form, form_values = self._get_delete_form( + size, + remove_publish_folder, + list(version_ids), + repre_ids_by_version_id, + filepaths_by_repre_id, + ) + return LoaderActionResult( + form=form, + form_values=form_values + ) + + def _delete_versions_step( + self, project_name: str, form_values: dict[str, Any] + ) -> LoaderActionResult: + delete_data = json.loads(form_values["delete_data"]) + remove_publish_folder = form_values["remove_publish_folder"] + if form_values["delete_value"].lower() != "delete": + size = delete_data["size"] + form, form_values = self._get_delete_form( + size, + remove_publish_folder, + delete_data["version_ids"], + delete_data["repre_ids_by_version_id"], + delete_data["filepaths_by_repre_id"], + True, + ) + return LoaderActionResult( + form=form, + form_values=form_values, + ) + + version_ids = delete_data["version_ids"] + repre_ids_by_version_id = delete_data["repre_ids_by_version_id"] + filepaths_by_repre_id = delete_data["filepaths_by_repre_id"] + op_session = OperationsSession() + total_versions = len(version_ids) + try: + for version_idx, version_id in enumerate(version_ids): + self.log.info( + f"Progressing version {version_idx + 1}/{total_versions}" + ) + for repre_id in repre_ids_by_version_id[version_id]: + for filepath in filepaths_by_repre_id[repre_id]: + publish_folder = os.path.dirname(filepath) + if remove_publish_folder: + if os.path.exists(publish_folder): + shutil.rmtree( + publish_folder, ignore_errors=True + ) + continue + + if os.path.exists(filepath): + os.remove(filepath) + + op_session.delete_entity( + project_name, "representation", repre_id + ) + op_session.delete_entity( + project_name, "version", version_id + ) + self.log.info("All done") + + except Exception: + self.log.error("Failed to delete versions.", exc_info=True) + return LoaderActionResult( + message="Failed to delete versions.", + success=False, + ) + + finally: + op_session.commit() + + return LoaderActionResult( + message="Deleted versions", + success=True, + ) + + def _get_delete_form( + self, + size: int, + remove_publish_folder: bool, + version_ids: list[str], + repre_ids_by_version_id: dict[str, list[str]], + filepaths_by_repre_id: dict[str, list[str]], + repeated: bool = False, + ) -> tuple[ActionForm, dict[str, Any]]: + versions_len = len(repre_ids_by_version_id) + fields = [ + UILabelDef( + f"Going to delete {versions_len} versions
" + f"- total size of files: {format_file_size(size)}
" + ), + UILabelDef("Are you sure you want to continue?"), + TextDef( + "delete_value", + placeholder="Type 'delete' to confirm...", + ), + ] + if repeated: + fields.append(UILabelDef( + "*Please fill in '**delete**' to confirm deletion.*" + )) + fields.extend([ + TextDef( + "delete_data", + visible=False, + ), + TextDef( + "step", + visible=False, + ), + BoolDef( + "remove_publish_folder", + label="Remove publish folder", + default=False, + visible=False, + ) + ]) + + form = ActionForm( + title="Delete versions", + submit_label="Delete", + cancel_label="Close", + fields=fields, + ) + form_values = { + "delete_data": json.dumps({ + "size": size, + "version_ids": version_ids, + "repre_ids_by_version_id": repre_ids_by_version_id, + "filepaths_by_repre_id": filepaths_by_repre_id, + }), + "step": "delete-versions", + "remove_publish_folder": remove_publish_folder, + } + return form, form_values diff --git a/client/ayon_core/plugins/load/delivery.py b/client/ayon_core/plugins/loader/delivery.py similarity index 88% rename from client/ayon_core/plugins/load/delivery.py rename to client/ayon_core/plugins/loader/delivery.py index 406040d936..5141bb1d3b 100644 --- a/client/ayon_core/plugins/load/delivery.py +++ b/client/ayon_core/plugins/loader/delivery.py @@ -1,5 +1,6 @@ import platform from collections import defaultdict +from typing import Optional, Any import ayon_api from qtpy import QtWidgets, QtCore, QtGui @@ -10,7 +11,12 @@ from ayon_core.lib import ( collect_frames, get_datetime_data, ) -from ayon_core.pipeline import load, Anatomy +from ayon_core.pipeline import Anatomy +from ayon_core.pipeline.actions import ( + LoaderSimpleActionPlugin, + LoaderActionSelection, + LoaderActionResult, +) from ayon_core.pipeline.load import get_representation_path_with_anatomy from ayon_core.pipeline.delivery import ( get_format_dict, @@ -20,43 +26,72 @@ from ayon_core.pipeline.delivery import ( ) -class Delivery(load.ProductLoaderPlugin): - """Export selected versions to folder structure from Template""" - - is_multiple_contexts_compatible = True - sequence_splitter = "__sequence_splitter__" - - representations = {"*"} - product_types = {"*"} - tool_names = ["library_loader"] - +class DeliveryAction(LoaderSimpleActionPlugin): + identifier = "core.delivery" label = "Deliver Versions" order = 35 - icon = "upload" - color = "#d8d8d8" + icon = { + "type": "material-symbols", + "name": "upload", + "color": "#d8d8d8", + } - def message(self, text): - msgBox = QtWidgets.QMessageBox() - msgBox.setText(text) - msgBox.setStyleSheet(style.load_stylesheet()) - msgBox.setWindowFlags( - msgBox.windowFlags() | QtCore.Qt.FramelessWindowHint + def is_compatible(self, selection: LoaderActionSelection) -> bool: + if self.host_name is not None: + return False + + if not selection.selected_ids: + return False + + return ( + selection.versions_selected() + or selection.representations_selected() ) - msgBox.exec_() - def load(self, contexts, name=None, namespace=None, options=None): + def execute_simple_action( + self, + selection: LoaderActionSelection, + form_values: dict[str, Any], + ) -> Optional[LoaderActionResult]: + version_ids = set() + if selection.selected_type == "representation": + versions = selection.entities.get_representations_versions( + selection.selected_ids + ) + version_ids = {version["id"] for version in versions} + + if selection.selected_type == "version": + version_ids = set(selection.selected_ids) + + if not version_ids: + return LoaderActionResult( + message="No versions found in your selection", + success=False, + ) + try: - dialog = DeliveryOptionsDialog(contexts, self.log) + # TODO run the tool in subprocess + dialog = DeliveryOptionsDialog( + selection.project_name, version_ids, self.log + ) dialog.exec_() except Exception: self.log.error("Failed to deliver versions.", exc_info=True) + return LoaderActionResult() + class DeliveryOptionsDialog(QtWidgets.QDialog): """Dialog to select template where to deliver selected representations.""" - def __init__(self, contexts, log=None, parent=None): - super(DeliveryOptionsDialog, self).__init__(parent=parent) + def __init__( + self, + project_name, + version_ids, + log=None, + parent=None, + ): + super().__init__(parent=parent) self.setWindowTitle("AYON - Deliver versions") icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) @@ -70,13 +105,12 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): self.setStyleSheet(style.load_stylesheet()) - project_name = contexts[0]["project"]["name"] self.anatomy = Anatomy(project_name) self._representations = None self.log = log self.currently_uploaded = 0 - self._set_representations(project_name, contexts) + self._set_representations(project_name, version_ids) dropdown = QtWidgets.QComboBox() self.templates = self._get_templates(self.anatomy) @@ -316,9 +350,7 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): return templates - def _set_representations(self, project_name, contexts): - version_ids = {context["version"]["id"] for context in contexts} - + def _set_representations(self, project_name, version_ids): repres = list(ayon_api.get_representations( project_name, version_ids=version_ids )) diff --git a/client/ayon_core/plugins/load/export_otio.py b/client/ayon_core/plugins/loader/export_otio.py similarity index 88% rename from client/ayon_core/plugins/load/export_otio.py rename to client/ayon_core/plugins/loader/export_otio.py index 8094490246..c86a72700e 100644 --- a/client/ayon_core/plugins/load/export_otio.py +++ b/client/ayon_core/plugins/loader/export_otio.py @@ -2,11 +2,10 @@ import logging import os from pathlib import Path from collections import defaultdict +from typing import Any, Optional from qtpy import QtWidgets, QtCore, QtGui -from ayon_api import get_representations -from ayon_core.pipeline import load, Anatomy from ayon_core import resources, style from ayon_core.lib.transcoding import ( IMAGE_EXTENSIONS, @@ -16,9 +15,16 @@ from ayon_core.lib import ( get_ffprobe_data, is_oiio_supported, ) +from ayon_core.pipeline import Anatomy from ayon_core.pipeline.load import get_representation_path_with_anatomy from ayon_core.tools.utils import show_message_dialog +from ayon_core.pipeline.actions import ( + LoaderSimpleActionPlugin, + LoaderActionSelection, + LoaderActionResult, +) + OTIO = None FRAME_SPLITTER = "__frame_splitter__" @@ -30,34 +36,99 @@ def _import_otio(): OTIO = opentimelineio -class ExportOTIO(load.ProductLoaderPlugin): - """Export selected versions to OpenTimelineIO.""" - - is_multiple_contexts_compatible = True - sequence_splitter = "__sequence_splitter__" - - representations = {"*"} - product_types = {"*"} - tool_names = ["library_loader"] - +class ExportOTIO(LoaderSimpleActionPlugin): + identifier = "core.export-otio" label = "Export OTIO" + group_label = None order = 35 - icon = "save" - color = "#d8d8d8" + icon = { + "type": "material-symbols", + "name": "save", + "color": "#d8d8d8", + } - def load(self, contexts, name=None, namespace=None, options=None): + def is_compatible( + self, selection: LoaderActionSelection + ) -> bool: + # Don't show in hosts + if self.host_name is not None: + return False + + return selection.versions_selected() + + def execute_simple_action( + self, + selection: LoaderActionSelection, + form_values: dict[str, Any], + ) -> Optional[LoaderActionResult]: _import_otio() + version_ids = set(selection.selected_ids) + + versions_by_id = { + version["id"]: version + for version in selection.entities.get_versions(version_ids) + } + product_ids = { + version["productId"] + for version in versions_by_id.values() + } + products_by_id = { + product["id"]: product + for product in selection.entities.get_products(product_ids) + } + folder_ids = { + product["folderId"] + for product in products_by_id.values() + } + folder_by_id = { + folder["id"]: folder + for folder in selection.entities.get_folders(folder_ids) + } + repre_entities = selection.entities.get_versions_representations( + version_ids + ) + + version_path_by_id = {} + for version in versions_by_id.values(): + version_id = version["id"] + product_id = version["productId"] + product = products_by_id[product_id] + folder_id = product["folderId"] + folder = folder_by_id[folder_id] + + version_path_by_id[version_id] = "/".join([ + folder["path"], + product["name"], + version["name"] + ]) + try: - dialog = ExportOTIOOptionsDialog(contexts, self.log) + # TODO this should probably trigger a subprocess? + dialog = ExportOTIOOptionsDialog( + selection.project_name, + versions_by_id, + repre_entities, + version_path_by_id, + self.log + ) dialog.exec_() except Exception: self.log.error("Failed to export OTIO.", exc_info=True) + return LoaderActionResult() class ExportOTIOOptionsDialog(QtWidgets.QDialog): """Dialog to select template where to deliver selected representations.""" - def __init__(self, contexts, log=None, parent=None): + def __init__( + self, + project_name, + versions_by_id, + repre_entities, + version_path_by_id, + log=None, + parent=None + ): # Not all hosts have OpenTimelineIO available. self.log = log @@ -73,30 +144,14 @@ class ExportOTIOOptionsDialog(QtWidgets.QDialog): | QtCore.Qt.WindowMinimizeButtonHint ) - project_name = contexts[0]["project"]["name"] - versions_by_id = { - context["version"]["id"]: context["version"] - for context in contexts - } - repre_entities = list(get_representations( - project_name, version_ids=set(versions_by_id) - )) version_by_representation_id = { repre_entity["id"]: versions_by_id[repre_entity["versionId"]] for repre_entity in repre_entities } - version_path_by_id = {} - representations_by_version_id = {} - for context in contexts: - version_id = context["version"]["id"] - if version_id in version_path_by_id: - continue - representations_by_version_id[version_id] = [] - version_path_by_id[version_id] = "/".join([ - context["folder"]["path"], - context["product"]["name"], - context["version"]["name"] - ]) + representations_by_version_id = { + version_id: [] + for version_id in versions_by_id + } for repre_entity in repre_entities: representations_by_version_id[repre_entity["versionId"]].append( diff --git a/client/ayon_core/plugins/loader/open_file.py b/client/ayon_core/plugins/loader/open_file.py new file mode 100644 index 0000000000..d226786bc2 --- /dev/null +++ b/client/ayon_core/plugins/loader/open_file.py @@ -0,0 +1,360 @@ +import os +import sys +import subprocess +import platform +import collections +import ctypes +from typing import Optional, Any, Callable + +from ayon_core.pipeline.load import get_representation_path_with_anatomy +from ayon_core.pipeline.actions import ( + LoaderActionPlugin, + LoaderActionItem, + LoaderActionSelection, + LoaderActionResult, +) + + +WINDOWS_USER_REG_PATH = ( + r"Software\Microsoft\Windows\CurrentVersion\Explorer\FileExts" + r"\{ext}\UserChoice" +) + + +class _Cache: + """Cache extensions information. + + Notes: + The cache is cleared when loader tool is refreshed so it might be + moved to other place which is not cleared on refresh. + + """ + supported_exts: set[str] = set() + unsupported_exts: set[str] = set() + + @classmethod + def is_supported(cls, ext: str) -> bool: + return ext in cls.supported_exts + + @classmethod + def already_checked(cls, ext: str) -> bool: + return ( + ext in cls.supported_exts + or ext in cls.unsupported_exts + ) + + @classmethod + def set_ext_support(cls, ext: str, supported: bool) -> None: + if supported: + cls.supported_exts.add(ext) + else: + cls.unsupported_exts.add(ext) + + +def _extension_has_assigned_app_windows(ext: str) -> bool: + import winreg + progid = None + try: + with winreg.OpenKey( + winreg.HKEY_CURRENT_USER, + WINDOWS_USER_REG_PATH.format(ext=ext), + ) as k: + progid, _ = winreg.QueryValueEx(k, "ProgId") + except OSError: + pass + + if progid: + return True + + try: + with winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, ext) as k: + progid = winreg.QueryValueEx(k, None)[0] + except OSError: + pass + return bool(progid) + + +def _linux_find_desktop_file(desktop: str) -> Optional[str]: + for dirpath in ( + os.path.expanduser("~/.local/share/applications"), + "/usr/share/applications", + "/usr/local/share/applications", + ): + path = os.path.join(dirpath, desktop) + if os.path.isfile(path): + return path + return None + + +def _extension_has_assigned_app_linux(ext: str) -> bool: + import mimetypes + + mime, _ = mimetypes.guess_type(f"file{ext}") + if not mime: + return False + + try: + # xdg-mime query default + desktop = subprocess.check_output( + ["xdg-mime", "query", "default", mime], + text=True + ).strip() or None + except Exception: + desktop = None + + if not desktop: + return False + + desktop_path = _linux_find_desktop_file(desktop) + if not desktop_path: + return False + if desktop_path and os.path.isfile(desktop_path): + return True + return False + + +def _extension_has_assigned_app_macos(ext: str) -> bool: + # Uses CoreServices/LaunchServices and Uniform Type Identifiers via + # ctypes. + # Steps: ext -> UTI -> default handler bundle id for role 'all'. + cf = ctypes.cdll.LoadLibrary( + "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation" + ) + ls = ctypes.cdll.LoadLibrary( + "/System/Library/Frameworks/CoreServices.framework/Frameworks" + "/LaunchServices.framework/LaunchServices" + ) + + # CFType/CFString helpers + CFStringRef = ctypes.c_void_p + CFAllocatorRef = ctypes.c_void_p + CFIndex = ctypes.c_long + + kCFStringEncodingUTF8 = 0x08000100 + + cf.CFStringCreateWithCString.argtypes = [ + CFAllocatorRef, ctypes.c_char_p, ctypes.c_uint32 + ] + cf.CFStringCreateWithCString.restype = CFStringRef + + cf.CFStringGetCStringPtr.argtypes = [CFStringRef, ctypes.c_uint32] + cf.CFStringGetCStringPtr.restype = ctypes.c_char_p + + cf.CFStringGetCString.argtypes = [ + CFStringRef, ctypes.c_char_p, CFIndex, ctypes.c_uint32 + ] + cf.CFStringGetCString.restype = ctypes.c_bool + + cf.CFRelease.argtypes = [ctypes.c_void_p] + cf.CFRelease.restype = None + + try: + UTTypeCreatePreferredIdentifierForTag = ctypes.cdll.LoadLibrary( + "/System/Library/Frameworks/CoreServices.framework/CoreServices" + ).UTTypeCreatePreferredIdentifierForTag + except OSError: + # Fallback path (older systems) + UTTypeCreatePreferredIdentifierForTag = ( + ls.UTTypeCreatePreferredIdentifierForTag + ) + UTTypeCreatePreferredIdentifierForTag.argtypes = [ + CFStringRef, CFStringRef, CFStringRef + ] + UTTypeCreatePreferredIdentifierForTag.restype = CFStringRef + + LSRolesMask = ctypes.c_uint + kLSRolesAll = 0xFFFFFFFF + ls.LSCopyDefaultRoleHandlerForContentType.argtypes = [ + CFStringRef, LSRolesMask + ] + ls.LSCopyDefaultRoleHandlerForContentType.restype = CFStringRef + + def cfstr(py_s: str) -> CFStringRef: + return cf.CFStringCreateWithCString( + None, py_s.encode("utf-8"), kCFStringEncodingUTF8 + ) + + def to_pystr(cf_s: CFStringRef) -> Optional[str]: + if not cf_s: + return None + # Try fast pointer + ptr = cf.CFStringGetCStringPtr(cf_s, kCFStringEncodingUTF8) + if ptr: + return ctypes.cast(ptr, ctypes.c_char_p).value.decode("utf-8") + + # Fallback buffer + buf_size = 1024 + buf = ctypes.create_string_buffer(buf_size) + ok = cf.CFStringGetCString( + cf_s, buf, buf_size, kCFStringEncodingUTF8 + ) + if ok: + return buf.value.decode("utf-8") + return None + + # Convert extension (without dot) to UTI + tag_class = cfstr("public.filename-extension") + tag_value = cfstr(ext.lstrip(".")) + + uti_ref = UTTypeCreatePreferredIdentifierForTag( + tag_class, tag_value, None + ) + + # Clean up temporary CFStrings + for ref in (tag_class, tag_value): + if ref: + cf.CFRelease(ref) + + bundle_id = None + if uti_ref: + # Get default handler for the UTI + default_bundle_ref = ls.LSCopyDefaultRoleHandlerForContentType( + uti_ref, kLSRolesAll + ) + bundle_id = to_pystr(default_bundle_ref) + if default_bundle_ref: + cf.CFRelease(default_bundle_ref) + cf.CFRelease(uti_ref) + return bundle_id is not None + + +def _filter_supported_exts( + extensions: set[str], test_func: Callable +) -> set[str]: + filtered_exs: set[str] = set() + for ext in extensions: + if not _Cache.already_checked(ext): + _Cache.set_ext_support(ext, test_func(ext)) + if _Cache.is_supported(ext): + filtered_exs.add(ext) + return filtered_exs + + +def filter_supported_exts(extensions: set[str]) -> set[str]: + if not extensions: + return set() + platform_name = platform.system().lower() + if platform_name == "windows": + return _filter_supported_exts( + extensions, _extension_has_assigned_app_windows + ) + if platform_name == "linux": + return _filter_supported_exts( + extensions, _extension_has_assigned_app_linux + ) + if platform_name == "darwin": + return _filter_supported_exts( + extensions, _extension_has_assigned_app_macos + ) + return set() + + +def open_file(filepath: str) -> None: + """Open file with system default executable""" + if sys.platform.startswith("darwin"): + subprocess.call(("open", filepath)) + elif os.name == "nt": + os.startfile(filepath) + elif os.name == "posix": + subprocess.call(("xdg-open", filepath)) + + +class OpenFileAction(LoaderActionPlugin): + """Open Image Sequence or Video with system default""" + identifier = "core.open-file" + + def get_action_items( + self, selection: LoaderActionSelection + ) -> list[LoaderActionItem]: + repres = [] + if selection.selected_type == "representation": + repres = selection.entities.get_representations( + selection.selected_ids + ) + + if selection.selected_type == "version": + repres = selection.entities.get_versions_representations( + selection.selected_ids + ) + + if not repres: + return [] + + repres_by_ext = collections.defaultdict(list) + for repre in repres: + repre_context = repre.get("context") + if not repre_context: + continue + ext = repre_context.get("ext") + if not ext: + path = repre["attrib"].get("path") + if path: + ext = os.path.splitext(path)[1] + + if ext: + ext = ext.lower() + if not ext.startswith("."): + ext = f".{ext}" + repres_by_ext[ext.lower()].append(repre) + + if not repres_by_ext: + return [] + + filtered_exts = filter_supported_exts(set(repres_by_ext)) + + repre_ids_by_name = collections.defaultdict(set) + for ext in filtered_exts: + for repre in repres_by_ext[ext]: + repre_ids_by_name[repre["name"]].add(repre["id"]) + + return [ + LoaderActionItem( + label=repre_name, + group_label="Open file", + order=30, + data={"representation_ids": list(repre_ids)}, + icon={ + "type": "material-symbols", + "name": "file_open", + "color": "#ffffff", + } + ) + for repre_name, repre_ids in repre_ids_by_name.items() + ] + + def execute_action( + self, + selection: LoaderActionSelection, + data: dict[str, Any], + form_values: dict[str, Any], + ) -> Optional[LoaderActionResult]: + path = None + repre_path = None + repre_ids = data["representation_ids"] + for repre in selection.entities.get_representations(repre_ids): + repre_path = get_representation_path_with_anatomy( + repre, selection.get_project_anatomy() + ) + if os.path.exists(repre_path): + path = repre_path + break + + if path is None: + if repre_path is None: + return LoaderActionResult( + "Failed to fill representation path...", + success=False, + ) + return LoaderActionResult( + "File to open was not found...", + success=False, + ) + + self.log.info(f"Opening: {path}") + + open_file(path) + + return LoaderActionResult( + "File was opened...", + success=True, + ) diff --git a/client/ayon_core/plugins/loader/push_to_project.py b/client/ayon_core/plugins/loader/push_to_project.py new file mode 100644 index 0000000000..d2ade736fd --- /dev/null +++ b/client/ayon_core/plugins/loader/push_to_project.py @@ -0,0 +1,69 @@ +import os +from typing import Optional, Any + +from ayon_core import AYON_CORE_ROOT +from ayon_core.lib import get_ayon_launcher_args, run_detached_process + +from ayon_core.pipeline.actions import ( + LoaderSimpleActionPlugin, + LoaderActionSelection, + LoaderActionResult, +) + + +class PushToProject(LoaderSimpleActionPlugin): + identifier = "core.push-to-project" + label = "Push to project" + order = 35 + icon = { + "type": "material-symbols", + "name": "send", + "color": "#d8d8d8", + } + + def is_compatible( + self, selection: LoaderActionSelection + ) -> bool: + if not selection.versions_selected(): + return False + + version_ids = set(selection.selected_ids) + product_ids = { + product["id"] + for product in selection.entities.get_versions_products( + version_ids + ) + } + folder_ids = { + folder["id"] + for folder in selection.entities.get_products_folders( + product_ids + ) + } + + if len(folder_ids) == 1: + return True + return False + + def execute_simple_action( + self, + selection: LoaderActionSelection, + form_values: dict[str, Any], + ) -> Optional[LoaderActionResult]: + push_tool_script_path = os.path.join( + AYON_CORE_ROOT, + "tools", + "push_to_project", + "main.py" + ) + + args = get_ayon_launcher_args( + push_tool_script_path, + "--project", selection.project_name, + "--versions", ",".join(selection.selected_ids) + ) + run_detached_process(args) + return LoaderActionResult( + message="Push to project tool opened...", + success=True, + ) diff --git a/client/ayon_core/plugins/publish/collect_anatomy_context_data.py b/client/ayon_core/plugins/publish/collect_anatomy_context_data.py index cccf392e40..5d2ecec433 100644 --- a/client/ayon_core/plugins/publish/collect_anatomy_context_data.py +++ b/client/ayon_core/plugins/publish/collect_anatomy_context_data.py @@ -16,6 +16,7 @@ Provides: import json import pyblish.api +from ayon_core.lib import get_ayon_user_entity from ayon_core.pipeline.template_data import get_template_data @@ -55,17 +56,18 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): if folder_entity: task_entity = context.data["taskEntity"] + username = context.data["user"] + user_entity = get_ayon_user_entity(username) anatomy_data = get_template_data( project_entity, folder_entity, task_entity, - host_name, - project_settings + host_name=host_name, + settings=project_settings, + user_entity=user_entity, ) anatomy_data.update(context.data.get("datetimeData") or {}) - username = context.data["user"] - anatomy_data["user"] = username # Backwards compatibility for 'username' key anatomy_data["username"] = username diff --git a/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py b/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py index 2cb2297bf7..554cf42aa2 100644 --- a/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py +++ b/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py @@ -301,8 +301,6 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): product_name = instance.data["productName"] product_type = instance.data["productType"] anatomy_data.update({ - "family": product_type, - "subset": product_name, "product": { "name": product_name, "type": product_type, diff --git a/client/ayon_core/plugins/publish/collect_audio.py b/client/ayon_core/plugins/publish/collect_audio.py index 2949ff1196..273e966cfd 100644 --- a/client/ayon_core/plugins/publish/collect_audio.py +++ b/client/ayon_core/plugins/publish/collect_audio.py @@ -52,7 +52,7 @@ class CollectAudio(pyblish.api.ContextPlugin): context, self.__class__ ): # Skip instances that already have audio filled - if instance.data.get("audio"): + if "audio" in instance.data: self.log.debug( "Skipping Audio collection. It is already collected" ) diff --git a/client/ayon_core/plugins/publish/collect_managed_staging_dir.py b/client/ayon_core/plugins/publish/collect_managed_staging_dir.py index 62b007461a..ee88dadfa0 100644 --- a/client/ayon_core/plugins/publish/collect_managed_staging_dir.py +++ b/client/ayon_core/plugins/publish/collect_managed_staging_dir.py @@ -25,7 +25,7 @@ class CollectManagedStagingDir(pyblish.api.InstancePlugin): Location of the folder is configured in: `ayon+anatomy://_/templates/staging`. - Which family/task type/subset is applicable is configured in: + Which product type/task type/product is applicable is configured in: `ayon+settings://core/tools/publish/custom_staging_dir_profiles` """ diff --git a/client/ayon_core/plugins/publish/collect_otio_frame_ranges.py b/client/ayon_core/plugins/publish/collect_otio_frame_ranges.py index d68970d428..543277f37e 100644 --- a/client/ayon_core/plugins/publish/collect_otio_frame_ranges.py +++ b/client/ayon_core/plugins/publish/collect_otio_frame_ranges.py @@ -71,6 +71,12 @@ class CollectOtioRanges(pyblish.api.InstancePlugin): import opentimelineio as otio otio_clip = instance.data["otioClip"] + if isinstance( + otio_clip.media_reference, + otio.schema.MissingReference + ): + self.log.info("Clip has no media reference") + return # Collect timeline ranges if workfile start frame is available if "workfileFrameStart" in instance.data: diff --git a/client/ayon_core/plugins/publish/collect_otio_subset_resources.py b/client/ayon_core/plugins/publish/collect_otio_subset_resources.py index 275b8a7f55..4d3c1cfb13 100644 --- a/client/ayon_core/plugins/publish/collect_otio_subset_resources.py +++ b/client/ayon_core/plugins/publish/collect_otio_subset_resources.py @@ -60,6 +60,13 @@ class CollectOtioSubsetResources( # get basic variables otio_clip = instance.data["otioClip"] + if isinstance( + otio_clip.media_reference, + otio.schema.MissingReference + ): + self.log.info("Clip has no media reference") + return + otio_available_range = otio_clip.available_range() media_fps = otio_available_range.start_time.rate available_duration = otio_available_range.duration.value diff --git a/client/ayon_core/plugins/publish/collect_scene_loaded_versions.py b/client/ayon_core/plugins/publish/collect_scene_loaded_versions.py index 524381f656..54eeefc60b 100644 --- a/client/ayon_core/plugins/publish/collect_scene_loaded_versions.py +++ b/client/ayon_core/plugins/publish/collect_scene_loaded_versions.py @@ -1,3 +1,5 @@ +from __future__ import annotations +from typing import Any import ayon_api import ayon_api.utils @@ -11,20 +13,6 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder + 0.0001 label = "Collect Versions Loaded in Scene" - hosts = [ - "aftereffects", - "blender", - "celaction", - "fusion", - "harmony", - "hiero", - "houdini", - "maya", - "nuke", - "photoshop", - "resolve", - "tvpaint" - ] def process(self, context): host = registered_host() @@ -46,6 +34,8 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): self.log.debug("No loaded containers found in scene.") return + containers = self._filter_invalid_containers(containers) + repre_ids = { container["representation"] for container in containers @@ -92,3 +82,28 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): self.log.debug(f"Collected {len(loaded_versions)} loaded versions.") context.data["loadedVersions"] = loaded_versions + + def _filter_invalid_containers( + self, + containers: list[dict[str, Any]] + ) -> list[dict[str, Any]]: + """Filter out invalid containers lacking required keys. + + Skip any invalid containers that lack 'representation' or 'name' + keys to avoid KeyError. + """ + # Only filter by what's required for this plug-in instead of validating + # a full container schema. + required_keys = {"name", "representation"} + valid = [] + for container in containers: + missing = [key for key in required_keys if key not in container] + if missing: + self.log.warning( + "Skipping invalid container, missing required keys:" + " {}. {}".format(", ".join(missing), container) + ) + continue + valid.append(container) + + return valid diff --git a/client/ayon_core/plugins/publish/extract_burnin.py b/client/ayon_core/plugins/publish/extract_burnin.py index 351d85a97f..6e7b4ef07e 100644 --- a/client/ayon_core/plugins/publish/extract_burnin.py +++ b/client/ayon_core/plugins/publish/extract_burnin.py @@ -316,22 +316,8 @@ class ExtractBurnin(publish.Extractor): burnin_values = {} for key in self.positions: value = burnin_def.get(key) - if not value: - continue - # TODO remove replacements - burnin_values[key] = ( - value - .replace("{task}", "{task[name]}") - .replace("{product[name]}", "{subset}") - .replace("{Product[name]}", "{Subset}") - .replace("{PRODUCT[NAME]}", "{SUBSET}") - .replace("{product[type]}", "{family}") - .replace("{Product[type]}", "{Family}") - .replace("{PRODUCT[TYPE]}", "{FAMILY}") - .replace("{folder[name]}", "{asset}") - .replace("{Folder[name]}", "{Asset}") - .replace("{FOLDER[NAME]}", "{ASSET}") - ) + if value: + burnin_values[key] = value # Remove "delete" tag from new representation if "delete" in new_repre["tags"]: diff --git a/client/ayon_core/plugins/publish/extract_color_transcode.py b/client/ayon_core/plugins/publish/extract_color_transcode.py index 8b351c7f31..63a73e07fa 100644 --- a/client/ayon_core/plugins/publish/extract_color_transcode.py +++ b/client/ayon_core/plugins/publish/extract_color_transcode.py @@ -11,6 +11,7 @@ from ayon_core.lib import ( is_oiio_supported, ) from ayon_core.lib.transcoding import ( + MissingRGBAChannelsError, oiio_color_convert, ) @@ -86,15 +87,19 @@ class ExtractOIIOTranscode(publish.Extractor): profile_output_defs = profile["outputs"] new_representations = [] repres = instance.data["representations"] - for idx, repre in enumerate(list(repres)): - # target space, display and view might be defined upstream - # TODO: address https://github.com/ynput/ayon-core/pull/1268#discussion_r2156555474 - # Implement upstream logic to handle target_colorspace, - # target_display, target_view in other DCCs - target_colorspace = False - target_display = instance.data.get("colorspaceDisplay") - target_view = instance.data.get("colorspaceView") + scene_display = instance.data.get( + "sceneDisplay", + # Backward compatibility + instance.data.get("colorspaceDisplay") + ) + scene_view = instance.data.get( + "sceneView", + # Backward compatibility + instance.data.get("colorspaceView") + ) + + for idx, repre in enumerate(list(repres)): self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"])) if not self._repre_is_valid(repre): continue @@ -111,7 +116,17 @@ class ExtractOIIOTranscode(publish.Extractor): self.log.warning("Config file doesn't exist, skipping") continue + # Get representation files to convert + if isinstance(repre["files"], list): + repre_files_to_convert = copy.deepcopy(repre["files"]) + else: + repre_files_to_convert = [repre["files"]] + + # Process each output definition for output_def in profile_output_defs: + # Local copy to avoid accidental mutable changes + files_to_convert = list(repre_files_to_convert) + output_name = output_def["name"] new_repre = copy.deepcopy(repre) @@ -122,11 +137,6 @@ class ExtractOIIOTranscode(publish.Extractor): ) new_repre["stagingDir"] = new_staging_dir - if isinstance(new_repre["files"], list): - files_to_convert = copy.deepcopy(new_repre["files"]) - else: - files_to_convert = [new_repre["files"]] - output_extension = output_def["extension"] output_extension = output_extension.replace('.', '') self._rename_in_representation(new_repre, @@ -136,24 +146,18 @@ class ExtractOIIOTranscode(publish.Extractor): transcoding_type = output_def["transcoding_type"] - # NOTE: we use colorspace_data as the fallback values for - # the target colorspace. + # Set target colorspace/display/view based on transcoding type + target_colorspace = None + target_view = None + target_display = None if transcoding_type == "colorspace": - # TODO: Should we fallback to the colorspace - # (which used as source above) ? - # or should we compute the target colorspace from - # current view and display ? - target_colorspace = (output_def["colorspace"] or - colorspace_data.get("colorspace")) + target_colorspace = output_def["colorspace"] elif transcoding_type == "display_view": display_view = output_def["display_view"] - target_view = ( - display_view["view"] - or colorspace_data.get("view")) - target_display = ( - display_view["display"] - or colorspace_data.get("display") - ) + # If empty values are provided in output definition, + # fallback to scene display/view that is collected from DCC + target_view = display_view["view"] or scene_view + target_display = display_view["display"] or scene_display # both could be already collected by DCC, # but could be overwritten when transcoding @@ -168,30 +172,65 @@ class ExtractOIIOTranscode(publish.Extractor): additional_command_args = (output_def["oiiotool_args"] ["additional_command_args"]) - files_to_convert = self._translate_to_sequence( + sequence_files = self._translate_to_sequence( files_to_convert) - self.log.debug("Files to convert: {}".format(files_to_convert)) - for file_name in files_to_convert: + self.log.debug("Files to convert: {}".format(sequence_files)) + missing_rgba_review_channels = False + for file_name in sequence_files: + if isinstance(file_name, clique.Collection): + # Support sequences with holes by supplying + # dedicated `--frames` argument to `oiiotool` + # Create `frames` string like "1001-1002,1004,1010-1012 + # Create `filename` string like "file.#.exr" + frames = file_name.format("{ranges}").replace(" ", "") + frame_padding = file_name.padding + file_name = file_name.format("{head}#{tail}") + parallel_frames = True + elif isinstance(file_name, str): + # Single file + frames = None + frame_padding = None + parallel_frames = False + else: + raise TypeError( + f"Unsupported file name type: {type(file_name)}." + " Expected str or clique.Collection." + ) + self.log.debug("Transcoding file: `{}`".format(file_name)) - input_path = os.path.join(original_staging_dir, - file_name) + input_path = os.path.join(original_staging_dir, file_name) output_path = self._get_output_file_path(input_path, new_staging_dir, output_extension) + try: + oiio_color_convert( + input_path=input_path, + output_path=output_path, + config_path=config_path, + source_colorspace=source_colorspace, + target_colorspace=target_colorspace, + target_display=target_display, + target_view=target_view, + source_display=source_display, + source_view=source_view, + additional_command_args=additional_command_args, + frames=frames, + frame_padding=frame_padding, + parallel_frames=parallel_frames, + logger=self.log + ) + except MissingRGBAChannelsError as exc: + missing_rgba_review_channels = True + self.log.error(exc) + self.log.error( + "Skipping OIIO Transcode. Unknown RGBA channels" + f" for colorspace conversion in file: {input_path}" + ) + break - oiio_color_convert( - input_path=input_path, - output_path=output_path, - config_path=config_path, - source_colorspace=source_colorspace, - target_colorspace=target_colorspace, - target_display=target_display, - target_view=target_view, - source_display=source_display, - source_view=source_view, - additional_command_args=additional_command_args, - logger=self.log - ) + if missing_rgba_review_channels: + # Stop processing this representation + break # cleanup temporary transcoded files for file_name in new_repre["files"]: @@ -217,11 +256,11 @@ class ExtractOIIOTranscode(publish.Extractor): added_review = True # If there is only 1 file outputted then convert list to - # string, cause that'll indicate that its not a sequence. + # string, because that'll indicate that it is not a sequence. if len(new_repre["files"]) == 1: new_repre["files"] = new_repre["files"][0] - # If the source representation has "review" tag, but its not + # If the source representation has "review" tag, but it's not # part of the output definition tags, then both the # representations will be transcoded in ExtractReview and # their outputs will clash in integration. @@ -271,42 +310,29 @@ class ExtractOIIOTranscode(publish.Extractor): new_repre["files"] = renamed_files def _translate_to_sequence(self, files_to_convert): - """Returns original list or list with filename formatted in single - sequence format. + """Returns original individual filepaths or list of clique.Collection. - Uses clique to find frame sequence, in this case it merges all frames - into sequence format (FRAMESTART-FRAMEEND#) and returns it. - If sequence not found, it returns original list + Uses clique to find frame sequence, and return the collections instead. + If sequence not detected in input filenames, it returns original list. Args: - files_to_convert (list): list of file names + files_to_convert (list[str]): list of file names Returns: - (list) of [file.1001-1010#.exr] or [fileA.exr, fileB.exr] + list[str | clique.Collection]: List of + filepaths ['fileA.exr', 'fileB.exr'] + or clique.Collection for a sequence. + """ pattern = [clique.PATTERNS["frames"]] collections, _ = clique.assemble( files_to_convert, patterns=pattern, assume_padded_when_ambiguous=True) - if collections: if len(collections) > 1: raise ValueError( "Too many collections {}".format(collections)) - collection = collections[0] - frames = list(collection.indexes) - if collection.holes().indexes: - return files_to_convert - - # Get the padding from the collection - # This is the number of digits used in the frame numbers - padding = collection.padding - - frame_str = "{}-{}%0{}d".format(frames[0], frames[-1], padding) - file_name = "{}{}{}".format(collection.head, frame_str, - collection.tail) - - files_to_convert = [file_name] + return collections return files_to_convert diff --git a/client/ayon_core/plugins/publish/extract_oiio_postprocess.py b/client/ayon_core/plugins/publish/extract_oiio_postprocess.py new file mode 100644 index 0000000000..2b432f2a0a --- /dev/null +++ b/client/ayon_core/plugins/publish/extract_oiio_postprocess.py @@ -0,0 +1,353 @@ +from __future__ import annotations +from typing import Any, Optional +import os +import copy +import clique +import pyblish.api + +from ayon_core.pipeline import ( + publish, + get_temp_dir +) +from ayon_core.lib import ( + is_oiio_supported, + get_oiio_tool_args, + run_subprocess +) +from ayon_core.lib.transcoding import IMAGE_EXTENSIONS +from ayon_core.lib.profiles_filtering import filter_profiles + + +class ExtractOIIOPostProcess(publish.Extractor): + """Process representations through `oiiotool` with profile defined + settings so that e.g. color space conversions can be applied or images + could be converted to scanline, resized, etc. regardless of colorspace + data. + """ + + label = "OIIO Post Process" + order = pyblish.api.ExtractorOrder + 0.020 + + settings_category = "core" + + optional = True + + # Supported extensions + supported_exts = {ext.lstrip(".") for ext in IMAGE_EXTENSIONS} + + # Configurable by Settings + profiles = None + options = None + + def process(self, instance): + if instance.data.get("farm"): + self.log.debug("Should be processed on farm, skipping.") + return + + if not self.profiles: + self.log.debug("No profiles present for OIIO Post Process") + return + + if not instance.data.get("representations"): + self.log.debug("No representations, skipping.") + return + + if not is_oiio_supported(): + self.log.warning("OIIO not supported, no transcoding possible.") + return + + new_representations = [] + for idx, repre in enumerate(list(instance.data["representations"])): + self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"])) + if not self._repre_is_valid(repre): + continue + + # We check profile per representation name and extension because + # it's included in the profile check. As such, an instance may have + # a different profile applied per representation. + profile = self._get_profile( + instance, + repre + ) + if not profile: + continue + + # Get representation files to convert + if isinstance(repre["files"], list): + repre_files_to_convert = copy.deepcopy(repre["files"]) + else: + repre_files_to_convert = [repre["files"]] + + added_representations = False + added_review = False + + # Process each output definition + for output_def in profile["outputs"]: + + # Local copy to avoid accidental mutable changes + files_to_convert = list(repre_files_to_convert) + + output_name = output_def["name"] + new_repre = copy.deepcopy(repre) + + original_staging_dir = new_repre["stagingDir"] + new_staging_dir = get_temp_dir( + project_name=instance.context.data["projectName"], + use_local_temp=True, + ) + new_repre["stagingDir"] = new_staging_dir + + output_extension = output_def["extension"] + output_extension = output_extension.replace('.', '') + self._rename_in_representation(new_repre, + files_to_convert, + output_name, + output_extension) + + sequence_files = self._translate_to_sequence(files_to_convert) + self.log.debug("Files to convert: {}".format(sequence_files)) + for file_name in sequence_files: + if isinstance(file_name, clique.Collection): + # Convert to filepath that can be directly converted + # by oiio like `frame.1001-1025%04d.exr` + file_name: str = file_name.format( + "{head}{range}{padding}{tail}" + ) + + self.log.debug("Transcoding file: `{}`".format(file_name)) + input_path = os.path.join(original_staging_dir, + file_name) + output_path = self._get_output_file_path(input_path, + new_staging_dir, + output_extension) + + # TODO: Support formatting with dynamic keys from the + # representation, like e.g. colorspace config, display, + # view, etc. + input_arguments: list[str] = output_def.get( + "input_arguments", [] + ) + output_arguments: list[str] = output_def.get( + "output_arguments", [] + ) + + # Prepare subprocess arguments + oiio_cmd = get_oiio_tool_args( + "oiiotool", + *input_arguments, + input_path, + *output_arguments, + "-o", + output_path + ) + + self.log.debug( + "Conversion command: {}".format(" ".join(oiio_cmd))) + run_subprocess(oiio_cmd, logger=self.log) + + # cleanup temporary transcoded files + for file_name in new_repre["files"]: + transcoded_file_path = os.path.join(new_staging_dir, + file_name) + instance.context.data["cleanupFullPaths"].append( + transcoded_file_path) + + custom_tags = output_def.get("custom_tags") + if custom_tags: + if new_repre.get("custom_tags") is None: + new_repre["custom_tags"] = [] + new_repre["custom_tags"].extend(custom_tags) + + # Add additional tags from output definition to representation + if new_repre.get("tags") is None: + new_repre["tags"] = [] + for tag in output_def["tags"]: + if tag not in new_repre["tags"]: + new_repre["tags"].append(tag) + + if tag == "review": + added_review = True + + # If there is only 1 file outputted then convert list to + # string, because that'll indicate that it is not a sequence. + if len(new_repre["files"]) == 1: + new_repre["files"] = new_repre["files"][0] + + # If the source representation has "review" tag, but it's not + # part of the output definition tags, then both the + # representations will be transcoded in ExtractReview and + # their outputs will clash in integration. + if "review" in repre.get("tags", []): + added_review = True + + new_representations.append(new_repre) + added_representations = True + + if added_representations: + self._mark_original_repre_for_deletion( + repre, profile, added_review + ) + + tags = repre.get("tags") or [] + if "delete" in tags and "thumbnail" not in tags: + instance.data["representations"].remove(repre) + + instance.data["representations"].extend(new_representations) + + def _rename_in_representation(self, new_repre, files_to_convert, + output_name, output_extension): + """Replace old extension with new one everywhere in representation. + + Args: + new_repre (dict) + files_to_convert (list): of filenames from repre["files"], + standardized to always list + output_name (str): key of output definition from Settings, + if "" token used, keep original repre name + output_extension (str): extension from output definition + """ + if output_name != "passthrough": + new_repre["name"] = output_name + if not output_extension: + return + + new_repre["ext"] = output_extension + new_repre["outputName"] = output_name + + renamed_files = [] + for file_name in files_to_convert: + file_name, _ = os.path.splitext(file_name) + file_name = '{}.{}'.format(file_name, + output_extension) + renamed_files.append(file_name) + new_repre["files"] = renamed_files + + def _translate_to_sequence(self, files_to_convert): + """Returns original list or a clique.Collection of a sequence. + + Uses clique to find frame sequence Collection. + If sequence not found, it returns original list. + + Args: + files_to_convert (list): list of file names + Returns: + list[str | clique.Collection]: List of filepaths or a list + of Collections (usually one, unless there are holes) + """ + pattern = [clique.PATTERNS["frames"]] + collections, _ = clique.assemble( + files_to_convert, patterns=pattern, + assume_padded_when_ambiguous=True) + if collections: + if len(collections) > 1: + raise ValueError( + "Too many collections {}".format(collections)) + + collection = collections[0] + # TODO: Technically oiiotool supports holes in the sequence as well + # using the dedicated --frames argument to specify the frames. + # We may want to use that too so conversions of sequences with + # holes will perform faster as well. + # Separate the collection so that we have no holes/gaps per + # collection. + return collection.separate() + + return files_to_convert + + def _get_output_file_path(self, input_path, output_dir, + output_extension): + """Create output file name path.""" + file_name = os.path.basename(input_path) + file_name, input_extension = os.path.splitext(file_name) + if not output_extension: + output_extension = input_extension.replace(".", "") + new_file_name = '{}.{}'.format(file_name, + output_extension) + return os.path.join(output_dir, new_file_name) + + def _get_profile( + self, + instance: pyblish.api.Instance, + repre: dict + ) -> Optional[dict[str, Any]]: + """Returns profile if it should process this instance.""" + host_name = instance.context.data["hostName"] + product_type = instance.data["productType"] + product_name = instance.data["productName"] + task_data = instance.data["anatomyData"].get("task", {}) + task_name = task_data.get("name") + task_type = task_data.get("type") + repre_name: str = repre["name"] + repre_ext: str = repre["ext"] + filtering_criteria = { + "host_names": host_name, + "product_types": product_type, + "product_names": product_name, + "task_names": task_name, + "task_types": task_type, + "representation_names": repre_name, + "representation_exts": repre_ext, + } + profile = filter_profiles(self.profiles, filtering_criteria, + logger=self.log) + + if not profile: + self.log.debug( + "Skipped instance. None of profiles in presets are for" + f" Host: \"{host_name}\" |" + f" Product types: \"{product_type}\" |" + f" Product names: \"{product_name}\" |" + f" Task name \"{task_name}\" |" + f" Task type \"{task_type}\" |" + f" Representation: \"{repre_name}\" (.{repre_ext})" + ) + + return profile + + def _repre_is_valid(self, repre: dict) -> bool: + """Validation if representation should be processed. + + Args: + repre (dict): Representation which should be checked. + + Returns: + bool: False if can't be processed else True. + """ + if repre.get("ext") not in self.supported_exts: + self.log.debug(( + "Representation '{}' has unsupported extension: '{}'. Skipped." + ).format(repre["name"], repre.get("ext"))) + return False + + if not repre.get("files"): + self.log.debug(( + "Representation '{}' has empty files. Skipped." + ).format(repre["name"])) + return False + + if "delete" in repre.get("tags", []): + self.log.debug(( + "Representation '{}' has 'delete' tag. Skipped." + ).format(repre["name"])) + return False + + return True + + def _mark_original_repre_for_deletion( + self, + repre: dict, + profile: dict, + added_review: bool + ): + """If new transcoded representation created, delete old.""" + if not repre.get("tags"): + repre["tags"] = [] + + delete_original = profile["delete_original"] + + if delete_original: + if "delete" not in repre["tags"]: + repre["tags"].append("delete") + + if added_review and "review" in repre["tags"]: + repre["tags"].remove("review") diff --git a/client/ayon_core/plugins/publish/extract_otio_audio_tracks.py b/client/ayon_core/plugins/publish/extract_otio_audio_tracks.py index 3a450a4f33..1df96b2918 100644 --- a/client/ayon_core/plugins/publish/extract_otio_audio_tracks.py +++ b/client/ayon_core/plugins/publish/extract_otio_audio_tracks.py @@ -1,12 +1,83 @@ +import collections +import hashlib import os import tempfile +import uuid +from pathlib import Path import pyblish +from ayon_core.lib import get_ffmpeg_tool_args, run_subprocess -from ayon_core.lib import ( - get_ffmpeg_tool_args, - run_subprocess -) + +def get_audio_instances(context): + """Return only instances which are having audio in families + + Args: + context (pyblish.context): context of publisher + + Returns: + list: list of selected instances + """ + audio_instances = [] + for instance in context: + if not instance.data.get("parent_instance_id"): + continue + if ( + instance.data["productType"] == "audio" + or instance.data.get("reviewAudio") + ): + audio_instances.append(instance) + return audio_instances + + +def map_instances_by_parent_id(context): + """Create a mapping of instances by their parent id + + Args: + context (pyblish.context): context of publisher + + Returns: + dict: mapping of instances by their parent id + """ + instances_by_parent_id = collections.defaultdict(list) + for instance in context: + parent_instance_id = instance.data.get("parent_instance_id") + if not parent_instance_id: + continue + instances_by_parent_id[parent_instance_id].append(instance) + return instances_by_parent_id + + +class CollectParentAudioInstanceAttribute(pyblish.api.ContextPlugin): + """Collect audio instance attribute""" + + order = pyblish.api.CollectorOrder + label = "Collect Audio Instance Attribute" + + def process(self, context): + + audio_instances = get_audio_instances(context) + + # no need to continue if no audio instances found + if not audio_instances: + return + + # create mapped instances by parent id + instances_by_parent_id = map_instances_by_parent_id(context) + + # distribute audio related attribute + for audio_instance in audio_instances: + parent_instance_id = audio_instance.data["parent_instance_id"] + + for sibl_instance in instances_by_parent_id[parent_instance_id]: + # exclude the same audio instance + if sibl_instance.id == audio_instance.id: + continue + self.log.info( + "Adding audio to Sibling instance: " + f"{sibl_instance.data['label']}" + ) + sibl_instance.data["audio"] = None class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): @@ -19,7 +90,8 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): order = pyblish.api.ExtractorOrder - 0.44 label = "Extract OTIO Audio Tracks" - hosts = ["hiero", "resolve", "flame"] + + temp_dir_path = None def process(self, context): """Convert otio audio track's content to audio representations @@ -28,13 +100,14 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): context (pyblish.Context): context of publisher """ # split the long audio file to peces devided by isntances - audio_instances = self.get_audio_instances(context) - self.log.debug("Audio instances: {}".format(len(audio_instances))) + audio_instances = get_audio_instances(context) - if len(audio_instances) < 1: - self.log.info("No audio instances available") + # no need to continue if no audio instances found + if not audio_instances: return + self.log.debug("Audio instances: {}".format(len(audio_instances))) + # get sequence otio_timeline = context.data["otioTimeline"] @@ -44,8 +117,8 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): if not audio_inputs: return - # temp file - audio_temp_fpath = self.create_temp_file("audio") + # Convert all available audio into single file for trimming + audio_temp_fpath = self.create_temp_file("timeline_audio_track") # create empty audio with longest duration empty = self.create_empty(audio_inputs) @@ -59,19 +132,25 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): # remove empty os.remove(empty["mediaPath"]) + # create mapped instances by parent id + instances_by_parent_id = map_instances_by_parent_id(context) + # cut instance framerange and add to representations - self.add_audio_to_instances(audio_temp_fpath, audio_instances) + self.add_audio_to_instances( + audio_temp_fpath, audio_instances, instances_by_parent_id) # remove full mixed audio file os.remove(audio_temp_fpath) - def add_audio_to_instances(self, audio_file, instances): + def add_audio_to_instances( + self, audio_file, audio_instances, instances_by_parent_id): created_files = [] - for inst in instances: - name = inst.data["folderPath"] + for audio_instance in audio_instances: + folder_path = audio_instance.data["folderPath"] + file_suffix = folder_path.replace("/", "-") - recycling_file = [f for f in created_files if name in f] - audio_clip = inst.data["otioClip"] + recycling_file = [f for f in created_files if file_suffix in f] + audio_clip = audio_instance.data["otioClip"] audio_range = audio_clip.range_in_parent() duration = audio_range.duration.to_frames() @@ -84,68 +163,70 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): start_sec = relative_start_time.to_seconds() duration_sec = audio_range.duration.to_seconds() - # temp audio file - audio_fpath = self.create_temp_file(name) + # shot related audio file + shot_audio_fpath = self.create_temp_file(file_suffix) cmd = get_ffmpeg_tool_args( "ffmpeg", "-ss", str(start_sec), "-t", str(duration_sec), "-i", audio_file, - audio_fpath + shot_audio_fpath ) # run subprocess self.log.debug("Executing: {}".format(" ".join(cmd))) run_subprocess(cmd, logger=self.log) - else: - audio_fpath = recycling_file.pop() - if "audio" in ( - inst.data["families"] + [inst.data["productType"]] - ): + # add generated audio file to created files for recycling + if shot_audio_fpath not in created_files: + created_files.append(shot_audio_fpath) + else: + shot_audio_fpath = recycling_file.pop() + + # audio file needs to be published as representation + if audio_instance.data["productType"] == "audio": # create empty representation attr - if "representations" not in inst.data: - inst.data["representations"] = [] + if "representations" not in audio_instance.data: + audio_instance.data["representations"] = [] # add to representations - inst.data["representations"].append({ - "files": os.path.basename(audio_fpath), + audio_instance.data["representations"].append({ + "files": os.path.basename(shot_audio_fpath), "name": "wav", "ext": "wav", - "stagingDir": os.path.dirname(audio_fpath), + "stagingDir": os.path.dirname(shot_audio_fpath), "frameStart": 0, "frameEnd": duration }) - elif "reviewAudio" in inst.data.keys(): - audio_attr = inst.data.get("audio") or [] + # audio file needs to be reviewable too + elif "reviewAudio" in audio_instance.data.keys(): + audio_attr = audio_instance.data.get("audio") or [] audio_attr.append({ - "filename": audio_fpath, + "filename": shot_audio_fpath, "offset": 0 }) - inst.data["audio"] = audio_attr + audio_instance.data["audio"] = audio_attr - # add generated audio file to created files for recycling - if audio_fpath not in created_files: - created_files.append(audio_fpath) - - def get_audio_instances(self, context): - """Return only instances which are having audio in families - - Args: - context (pyblish.context): context of publisher - - Returns: - list: list of selected instances - """ - return [ - _i for _i in context - # filter only those with audio product type or family - # and also with reviewAudio data key - if bool("audio" in ( - _i.data.get("families", []) + [_i.data["productType"]]) - ) or _i.data.get("reviewAudio") - ] + # Make sure if the audio instance is having siblink instances + # which needs audio for reviewable media so it is also added + # to its instance data + # Retrieve instance data from parent instance shot instance. + parent_instance_id = audio_instance.data["parent_instance_id"] + for sibl_instance in instances_by_parent_id[parent_instance_id]: + # exclude the same audio instance + if sibl_instance.id == audio_instance.id: + continue + self.log.info( + "Adding audio to Sibling instance: " + f"{sibl_instance.data['label']}" + ) + audio_attr = sibl_instance.data.get("audio") or [] + audio_attr.append({ + "filename": shot_audio_fpath, + "offset": 0 + }) + sibl_instance.data["audio"] = audio_attr def get_audio_track_items(self, otio_timeline): """Get all audio clips form OTIO audio tracks @@ -321,19 +402,23 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): os.remove(filters_tmp_filepath) - def create_temp_file(self, name): + def create_temp_file(self, file_suffix): """Create temp wav file Args: - name (str): name to be used in file name + file_suffix (str): name to be used in file name Returns: str: temp fpath """ - name = name.replace("/", "_") - return os.path.normpath( - tempfile.mktemp( - prefix="pyblish_tmp_{}_".format(name), - suffix=".wav" - ) - ) + extension = ".wav" + # get 8 characters + hash = hashlib.md5(str(uuid.uuid4()).encode()).hexdigest()[:8] + file_name = f"{hash}_{file_suffix}{extension}" + + if not self.temp_dir_path: + audio_temp_dir_path = tempfile.mkdtemp(prefix="AYON_audio_") + self.temp_dir_path = Path(audio_temp_dir_path) + self.temp_dir_path.mkdir(parents=True, exist_ok=True) + + return (self.temp_dir_path / file_name).as_posix() diff --git a/client/ayon_core/plugins/publish/extract_otio_review.py b/client/ayon_core/plugins/publish/extract_otio_review.py index 90215bd2c9..f338fba746 100644 --- a/client/ayon_core/plugins/publish/extract_otio_review.py +++ b/client/ayon_core/plugins/publish/extract_otio_review.py @@ -130,7 +130,7 @@ class ExtractOTIOReview( # NOTE it looks like it is set only in hiero integration res_data = {"width": self.to_width, "height": self.to_height} for key in res_data: - for meta_prefix in ("ayon.source.", "openpype.source."): + for meta_prefix in ("ayon.source", "openpype.source"): meta_key = f"{meta_prefix}.{key}" value = media_metadata.get(meta_key) if value is not None: diff --git a/client/ayon_core/plugins/publish/extract_review.py b/client/ayon_core/plugins/publish/extract_review.py index 580aa27eef..dda69470cf 100644 --- a/client/ayon_core/plugins/publish/extract_review.py +++ b/client/ayon_core/plugins/publish/extract_review.py @@ -163,12 +163,15 @@ class ExtractReview(pyblish.api.InstancePlugin): "flame", "unreal", "batchdelivery", - "photoshop" + "photoshop", + "substancepainter", ] settings_category = "core" # Supported extensions - image_exts = {"exr", "jpg", "jpeg", "png", "dpx", "tga", "tiff", "tif"} + image_exts = { + "exr", "jpg", "jpeg", "png", "dpx", "tga", "tiff", "tif", "psd" + } video_exts = {"mov", "mp4"} supported_exts = image_exts | video_exts @@ -361,14 +364,14 @@ class ExtractReview(pyblish.api.InstancePlugin): if not filtered_output_defs: self.log.debug(( "Repre: {} - All output definitions were filtered" - " out by single frame filter. Skipping" + " out by single frame filter. Skipped." ).format(repre["name"])) continue # Skip if file is not set if first_input_path is None: self.log.warning(( - "Representation \"{}\" have empty files. Skipped." + "Representation \"{}\" has empty files. Skipped." ).format(repre["name"])) continue @@ -400,6 +403,10 @@ class ExtractReview(pyblish.api.InstancePlugin): new_staging_dir, self.log ) + # The OIIO conversion will remap the RGBA channels just to + # `R,G,B,A` so we will pass the intermediate file to FFMPEG + # without layer name. + layer_name = "" try: self._render_output_definitions( diff --git a/client/ayon_core/plugins/publish/extract_thumbnail.py b/client/ayon_core/plugins/publish/extract_thumbnail.py index b5885178d0..1dde8cfb55 100644 --- a/client/ayon_core/plugins/publish/extract_thumbnail.py +++ b/client/ayon_core/plugins/publish/extract_thumbnail.py @@ -1,8 +1,9 @@ import copy +from dataclasses import dataclass, field, fields import os import subprocess import tempfile -import re +from typing import Dict, Any, List, Tuple, Optional import pyblish.api from ayon_core.lib import ( @@ -15,8 +16,10 @@ from ayon_core.lib import ( path_to_subprocess_arg, run_subprocess, + filter_profiles, ) from ayon_core.lib.transcoding import ( + MissingRGBAChannelsError, oiio_color_convert, get_oiio_input_and_channel_args, get_oiio_info_for_input, @@ -25,6 +28,61 @@ from ayon_core.lib.transcoding import ( from ayon_core.lib.transcoding import VIDEO_EXTENSIONS, IMAGE_EXTENSIONS +@dataclass +class ThumbnailDef: + """ + Data class representing the full configuration for selected profile + + Any change of controllable fields in Settings must propagate here! + """ + integrate_thumbnail: bool = False + + target_size: Dict[str, Any] = field( + default_factory=lambda: { + "type": "source", + "resize": {"width": 1920, "height": 1080}, + } + ) + + duration_split: float = 0.5 + + oiiotool_defaults: Dict[str, str] = field( + default_factory=lambda: { + "type": "colorspace", + "colorspace": "color_picking" + } + ) + + ffmpeg_args: Dict[str, List[Any]] = field( + default_factory=lambda: {"input": [], "output": []} + ) + + # Background color defined as (R, G, B, A) tuple. + # Note: Use float for alpha channel (0.0 to 1.0). + background_color: Tuple[int, int, int, float] = (0, 0, 0, 0.0) + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "ThumbnailDef": + """ + Creates a ThumbnailDef instance from a dictionary, safely ignoring + any keys in the dictionary that are not fields in the dataclass. + + Args: + data (Dict[str, Any]): The dictionary containing configuration data + + Returns: + MediaConfig: A new instance of the dataclass. + """ + # Get all field names defined in the dataclass + field_names = {f.name for f in fields(cls)} + + # Filter the input dictionary to include only keys matching field names + filtered_data = {k: v for k, v in data.items() if k in field_names} + + # Unpack the filtered dictionary into the constructor + return cls(**filtered_data) + + class ExtractThumbnail(pyblish.api.InstancePlugin): """Create jpg thumbnail from sequence using ffmpeg""" @@ -51,30 +109,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): settings_category = "core" enabled = False - integrate_thumbnail = False - target_size = { - "type": "source", - "resize": { - "width": 1920, - "height": 1080 - } - } - background_color = (0, 0, 0, 0.0) - duration_split = 0.5 - # attribute presets from settings - oiiotool_defaults = { - "type": "colorspace", - "colorspace": "color_picking", - "display_and_view": { - "display": "default", - "view": "sRGB" - } - } - ffmpeg_args = { - "input": [], - "output": [] - } - product_names = [] + profiles = [] def process(self, instance): # run main process @@ -97,6 +132,13 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): instance.data["representations"].remove(repre) def _main_process(self, instance): + if not self.profiles: + self.log.debug("No profiles present for extract review thumbnail.") + return + thumbnail_def = self._get_config_from_profile(instance) + if not thumbnail_def: + return + product_name = instance.data["productName"] instance_repres = instance.data.get("representations") if not instance_repres: @@ -129,24 +171,6 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): self.log.debug("Skipping crypto passes.") return - # We only want to process the produces needed from settings. - def validate_string_against_patterns(input_str, patterns): - for pattern in patterns: - if re.match(pattern, input_str): - return True - return False - - product_names = self.product_names - if product_names: - result = validate_string_against_patterns( - product_name, product_names - ) - if not result: - self.log.debug(( - "Product name \"{}\" did not match settings filters: {}" - ).format(product_name, product_names)) - return - # first check for any explicitly marked representations for thumbnail explicit_repres = self._get_explicit_repres_for_thumbnail(instance) if explicit_repres: @@ -191,7 +215,8 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): ) file_path = self._create_frame_from_video( video_file_path, - dst_staging + dst_staging, + thumbnail_def ) if file_path: src_staging, input_file = os.path.split(file_path) @@ -204,7 +229,8 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): if "slate-frame" in repre.get("tags", []): repre_files_thumb = repre_files_thumb[1:] file_index = int( - float(len(repre_files_thumb)) * self.duration_split) + float(len(repre_files_thumb)) * thumbnail_def.duration_split # noqa: E501 + ) input_file = repre_files[file_index] full_input_path = os.path.join(src_staging, input_file) @@ -233,7 +259,8 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): repre_thumb_created = self._create_colorspace_thumbnail( full_input_path, full_output_path, - colorspace_data + colorspace_data, + thumbnail_def, ) # Try to use FFMPEG if OIIO is not supported or for cases when @@ -241,13 +268,13 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): # colorspace data if not repre_thumb_created: repre_thumb_created = self._create_thumbnail_ffmpeg( - full_input_path, full_output_path + full_input_path, full_output_path, thumbnail_def ) # Skip representation and try next one if wasn't created if not repre_thumb_created and oiio_supported: repre_thumb_created = self._create_thumbnail_oiio( - full_input_path, full_output_path + full_input_path, full_output_path, thumbnail_def ) if not repre_thumb_created: @@ -275,7 +302,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): new_repre_tags = ["thumbnail"] # for workflows which needs to have thumbnails published as # separate representations `delete` tag should not be added - if not self.integrate_thumbnail: + if not thumbnail_def.integrate_thumbnail: new_repre_tags.append("delete") new_repre = { @@ -374,7 +401,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): return review_repres + other_repres - def _is_valid_images_repre(self, repre): + def _is_valid_images_repre(self, repre: dict) -> bool: """Check if representation contains valid image files Args: @@ -394,9 +421,10 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): def _create_colorspace_thumbnail( self, - src_path, - dst_path, - colorspace_data, + src_path: str, + dst_path: str, + colorspace_data: dict, + thumbnail_def: ThumbnailDef, ): """Create thumbnail using OIIO tool oiiotool @@ -409,12 +437,15 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): config (dict) display (Optional[str]) view (Optional[str]) + thumbnail_def (ThumbnailDefinition): Thumbnail definition. Returns: str: path to created thumbnail """ - self.log.info("Extracting thumbnail {}".format(dst_path)) - resolution_arg = self._get_resolution_arg("oiiotool", src_path) + self.log.info(f"Extracting thumbnail {dst_path}") + resolution_arg = self._get_resolution_args( + "oiiotool", src_path, thumbnail_def + ) repre_display = colorspace_data.get("display") repre_view = colorspace_data.get("view") @@ -433,12 +464,13 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): ) # if representation doesn't have display and view then use # oiiotool_defaults - elif self.oiiotool_defaults: - oiio_default_type = self.oiiotool_defaults["type"] + elif thumbnail_def.oiiotool_defaults: + oiiotool_defaults = thumbnail_def.oiiotool_defaults + oiio_default_type = oiiotool_defaults["type"] if "colorspace" == oiio_default_type: - oiio_default_colorspace = self.oiiotool_defaults["colorspace"] + oiio_default_colorspace = oiiotool_defaults["colorspace"] else: - display_and_view = self.oiiotool_defaults["display_and_view"] + display_and_view = oiiotool_defaults["display_and_view"] oiio_default_display = display_and_view["display"] oiio_default_view = display_and_view["view"] @@ -465,19 +497,34 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): return True - def _create_thumbnail_oiio(self, src_path, dst_path): + def _create_thumbnail_oiio(self, src_path, dst_path, thumbnail_def): self.log.debug(f"Extracting thumbnail with OIIO: {dst_path}") try: - resolution_arg = self._get_resolution_arg("oiiotool", src_path) + resolution_arg = self._get_resolution_args( + "oiiotool", src_path, thumbnail_def + ) except RuntimeError: self.log.warning( "Failed to create thumbnail using oiio", exc_info=True ) return False - input_info = get_oiio_info_for_input(src_path, logger=self.log) - input_arg, channels_arg = get_oiio_input_and_channel_args(input_info) + input_info = get_oiio_info_for_input( + src_path, + logger=self.log, + verbose=False, + ) + try: + input_arg, channels_arg = get_oiio_input_and_channel_args( + input_info + ) + except MissingRGBAChannelsError: + self.log.debug( + "Unable to find relevant reviewable channel for thumbnail " + "creation" + ) + return False oiio_cmd = get_oiio_tool_args( "oiiotool", input_arg, src_path, @@ -500,9 +547,11 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): ) return False - def _create_thumbnail_ffmpeg(self, src_path, dst_path): + def _create_thumbnail_ffmpeg(self, src_path, dst_path, thumbnail_def): try: - resolution_arg = self._get_resolution_arg("ffmpeg", src_path) + resolution_arg = self._get_resolution_args( + "ffmpeg", src_path, thumbnail_def + ) except RuntimeError: self.log.warning( "Failed to create thumbnail using ffmpeg", exc_info=True @@ -510,7 +559,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): return False ffmpeg_path_args = get_ffmpeg_tool_args("ffmpeg") - ffmpeg_args = self.ffmpeg_args or {} + ffmpeg_args = thumbnail_def.ffmpeg_args or {} jpeg_items = [ subprocess.list2cmdline(ffmpeg_path_args) @@ -550,7 +599,12 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): ) return False - def _create_frame_from_video(self, video_file_path, output_dir): + def _create_frame_from_video( + self, + video_file_path: str, + output_dir: str, + thumbnail_def: ThumbnailDef, + ) -> Optional[str]: """Convert video file to one frame image via ffmpeg""" # create output file path base_name = os.path.basename(video_file_path) @@ -575,7 +629,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): seek_position = 0.0 # Only use timestamp calculation for videos longer than 0.1 seconds if duration > 0.1: - seek_position = duration * self.duration_split + seek_position = duration * thumbnail_def.duration_split # Build command args cmd_args = [] @@ -649,16 +703,17 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): ): os.remove(output_thumb_file_path) - def _get_resolution_arg( + def _get_resolution_args( self, - application, - input_path, - ): + application: str, + input_path: str, + thumbnail_def: ThumbnailDef, + ) -> list: # get settings - if self.target_size["type"] == "source": + if thumbnail_def.target_size["type"] == "source": return [] - resize = self.target_size["resize"] + resize = thumbnail_def.target_size["resize"] target_width = resize["width"] target_height = resize["height"] @@ -668,6 +723,43 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): input_path, target_width, target_height, - bg_color=self.background_color, + bg_color=thumbnail_def.background_color, log=self.log ) + + def _get_config_from_profile( + self, + instance: pyblish.api.Instance + ) -> Optional[ThumbnailDef]: + """Returns profile if and how repre should be color transcoded.""" + host_name = instance.context.data["hostName"] + product_type = instance.data["productType"] + product_name = instance.data["productName"] + task_data = instance.data["anatomyData"].get("task", {}) + task_name = task_data.get("name") + task_type = task_data.get("type") + filtering_criteria = { + "host_names": host_name, + "product_types": product_type, + "product_names": product_name, + "task_names": task_name, + "task_types": task_type, + } + profile = filter_profiles( + self.profiles, + filtering_criteria, + logger=self.log + ) + + if not profile: + self.log.debug( + "Skipped instance. None of profiles in presets are for" + f' Host: "{host_name}"' + f' | Product types: "{product_type}"' + f' | Product names: "{product_name}"' + f' | Task name "{task_name}"' + f' | Task type "{task_type}"' + ) + return None + + return ThumbnailDef.from_dict(profile) diff --git a/client/ayon_core/plugins/publish/extract_thumbnail_from_source.py b/client/ayon_core/plugins/publish/extract_thumbnail_from_source.py index 59a62b1d7b..5535c503f3 100644 --- a/client/ayon_core/plugins/publish/extract_thumbnail_from_source.py +++ b/client/ayon_core/plugins/publish/extract_thumbnail_from_source.py @@ -14,6 +14,7 @@ Todos: import os import tempfile +from typing import List, Optional import pyblish.api from ayon_core.lib import ( @@ -22,6 +23,7 @@ from ayon_core.lib import ( is_oiio_supported, run_subprocess, + get_rescaled_command_arguments, ) @@ -31,17 +33,20 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin): Thumbnail source must be a single image or video filepath. """ - label = "Extract Thumbnail (from source)" + label = "Extract Thumbnail from source" # Before 'ExtractThumbnail' in global plugins order = pyblish.api.ExtractorOrder - 0.00001 - def process(self, instance): + # Settings + target_size = { + "type": "resize", + "resize": {"width": 1920, "height": 1080} + } + background_color = (0, 0, 0, 0.0) + + def process(self, instance: pyblish.api.Instance): self._create_context_thumbnail(instance.context) - product_name = instance.data["productName"] - self.log.debug( - "Processing instance with product name {}".format(product_name) - ) thumbnail_source = instance.data.get("thumbnailSource") if not thumbnail_source: self.log.debug("Thumbnail source not filled. Skipping.") @@ -69,6 +74,8 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin): "outputName": "thumbnail", } + new_repre["tags"].append("delete") + # adding representation self.log.debug( "Adding thumbnail representation: {}".format(new_repre) @@ -76,7 +83,11 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin): instance.data["representations"].append(new_repre) instance.data["thumbnailPath"] = dst_filepath - def _create_thumbnail(self, context, thumbnail_source): + def _create_thumbnail( + self, + context: pyblish.api.Context, + thumbnail_source: str, + ) -> Optional[str]: if not thumbnail_source: self.log.debug("Thumbnail source not filled. Skipping.") return @@ -131,7 +142,7 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin): self.log.warning("Thumbnail has not been created.") - def _instance_has_thumbnail(self, instance): + def _instance_has_thumbnail(self, instance: pyblish.api.Instance) -> bool: if "representations" not in instance.data: self.log.warning( "Instance does not have 'representations' key filled" @@ -143,14 +154,29 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin): return True return False - def create_thumbnail_oiio(self, src_path, dst_path): + def create_thumbnail_oiio( + self, + src_path: str, + dst_path: str, + ) -> bool: self.log.debug("Outputting thumbnail with OIIO: {}".format(dst_path)) - oiio_cmd = get_oiio_tool_args( - "oiiotool", - "-a", src_path, - "--ch", "R,G,B", - "-o", dst_path - ) + try: + resolution_args = self._get_resolution_args( + "oiiotool", src_path + ) + except Exception: + self.log.warning("Failed to get resolution args for OIIO.") + return False + + oiio_cmd = get_oiio_tool_args("oiiotool", "-a", src_path) + if resolution_args: + # resize must be before -o + oiio_cmd.extend(resolution_args) + else: + # resize provides own -ch, must be only one + oiio_cmd.extend(["--ch", "R,G,B"]) + + oiio_cmd.extend(["-o", dst_path]) self.log.debug("Running: {}".format(" ".join(oiio_cmd))) try: run_subprocess(oiio_cmd, logger=self.log) @@ -162,7 +188,19 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin): ) return False - def create_thumbnail_ffmpeg(self, src_path, dst_path): + def create_thumbnail_ffmpeg( + self, + src_path: str, + dst_path: str, + ) -> bool: + try: + resolution_args = self._get_resolution_args( + "ffmpeg", src_path + ) + except Exception: + self.log.warning("Failed to get resolution args for ffmpeg.") + return False + max_int = str(2147483647) ffmpeg_cmd = get_ffmpeg_tool_args( "ffmpeg", @@ -171,9 +209,13 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin): "-probesize", max_int, "-i", src_path, "-frames:v", "1", - dst_path ) + ffmpeg_cmd.extend(resolution_args) + + # possible resize must be before output args + ffmpeg_cmd.append(dst_path) + self.log.debug("Running: {}".format(" ".join(ffmpeg_cmd))) try: run_subprocess(ffmpeg_cmd, logger=self.log) @@ -185,10 +227,37 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin): ) return False - def _create_context_thumbnail(self, context): + def _create_context_thumbnail( + self, + context: pyblish.api.Context, + ): if "thumbnailPath" in context.data: return thumbnail_source = context.data.get("thumbnailSource") - thumbnail_path = self._create_thumbnail(context, thumbnail_source) - context.data["thumbnailPath"] = thumbnail_path + context.data["thumbnailPath"] = self._create_thumbnail( + context, thumbnail_source + ) + + def _get_resolution_args( + self, + application: str, + input_path: str, + ) -> List[str]: + # get settings + if self.target_size["type"] == "source": + return [] + + resize = self.target_size["resize"] + target_width = resize["width"] + target_height = resize["height"] + + # form arg string per application + return get_rescaled_command_arguments( + application, + input_path, + target_width, + target_height, + bg_color=self.background_color, + log=self.log, + ) diff --git a/client/ayon_core/plugins/publish/extract_usd_layer_contributions.py b/client/ayon_core/plugins/publish/extract_usd_layer_contributions.py index 0dc9a5e34d..ed3c16b5c2 100644 --- a/client/ayon_core/plugins/publish/extract_usd_layer_contributions.py +++ b/client/ayon_core/plugins/publish/extract_usd_layer_contributions.py @@ -1,6 +1,8 @@ from operator import attrgetter import dataclasses import os +import platform +from collections import defaultdict from typing import Any, Dict, List import pyblish.api @@ -12,10 +14,11 @@ except ImportError: from ayon_core.lib import ( TextDef, BoolDef, + NumberDef, UISeparatorDef, UILabelDef, EnumDef, - filter_profiles + filter_profiles, ) try: from ayon_core.pipeline.usdlib import ( @@ -24,7 +27,8 @@ try: variant_nested_prim_path, setup_asset_layer, add_ordered_sublayer, - set_layer_defaults + set_layer_defaults, + get_standard_default_prim_name ) except ImportError: pass @@ -175,10 +179,17 @@ def get_instance_uri_path( # If for whatever reason we were unable to retrieve from the context # then get the path from an existing database entry - path = get_representation_path_by_names(**query) + path = get_representation_path_by_names( + anatomy=context.data["anatomy"], + **names + ) + if not path: + raise RuntimeError(f"Unable to resolve publish path for: {names}") # Ensure `None` for now is also a string path = str(path) + if platform.system().lower() == "windows": + path = path.replace("\\", "/") return path @@ -266,22 +277,26 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin, # the contributions so that we can design a system where custom # contributions outside the predefined orders are possible to be # managed. So that if a particular asset requires an extra contribution - # level, you can add itdirectly from the publisher at that particular + # level, you can add it directly from the publisher at that particular # order. Future publishes will then see the existing contribution and will # persist adding it to future bootstraps at that order - contribution_layers: Dict[str, int] = { + contribution_layers: Dict[str, Dict[str, int]] = { # asset layers - "model": 100, - "assembly": 150, - "groom": 175, - "look": 200, - "rig": 300, + "asset": { + "model": 100, + "assembly": 150, + "groom": 175, + "look": 200, + "rig": 300, + }, # shot layers - "layout": 200, - "animation": 300, - "simulation": 400, - "fx": 500, - "lighting": 600, + "shot": { + "layout": 200, + "animation": 300, + "simulation": 400, + "fx": 500, + "lighting": 600, + } } # Default profiles to set certain instance attribute defaults based on # profiles in settings @@ -296,12 +311,18 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin, cls.enabled = plugin_settings.get("enabled", cls.enabled) - # Define contribution layers via settings - contribution_layers = {} + # Define contribution layers via settings by their scope + contribution_layers = defaultdict(dict) for entry in plugin_settings.get("contribution_layers", []): - contribution_layers[entry["name"]] = int(entry["order"]) + for scope in entry.get("scope", []): + contribution_layers[scope][entry["name"]] = int(entry["order"]) if contribution_layers: - cls.contribution_layers = contribution_layers + cls.contribution_layers = dict(contribution_layers) + else: + cls.log.warning( + "No scoped contribution layers found in settings, falling back" + " to CollectUSDLayerContributions plug-in defaults..." + ) cls.profiles = plugin_settings.get("profiles", []) @@ -325,10 +346,7 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin, attr_values[key] = attr_values[key].format(**data) # Define contribution - order = self.contribution_layers.get( - attr_values["contribution_layer"], 0 - ) - + in_layer_order: int = attr_values.get("contribution_in_layer_order", 0) if attr_values["contribution_apply_as_variant"]: contribution = VariantContribution( instance=instance, @@ -337,19 +355,23 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin, variant_set_name=attr_values["contribution_variant_set_name"], variant_name=attr_values["contribution_variant"], variant_is_default=attr_values["contribution_variant_is_default"], # noqa: E501 - order=order + order=in_layer_order ) else: contribution = SublayerContribution( instance=instance, layer_id=attr_values["contribution_layer"], target_product=attr_values["contribution_target_product"], - order=order + order=in_layer_order ) asset_product = contribution.target_product layer_product = "{}_{}".format(asset_product, contribution.layer_id) + scope: str = attr_values["contribution_target_product_init"] + layer_order: int = ( + self.contribution_layers[scope][attr_values["contribution_layer"]] + ) # Layer contribution instance layer_instance = self.get_or_create_instance( product_name=layer_product, @@ -361,7 +383,7 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin, contribution ) layer_instance.data["usd_layer_id"] = contribution.layer_id - layer_instance.data["usd_layer_order"] = contribution.order + layer_instance.data["usd_layer_order"] = layer_order layer_instance.data["productGroup"] = ( instance.data.get("productGroup") or "USD Layer" @@ -480,18 +502,18 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin, profile = {} # Define defaults - default_enabled = profile.get("contribution_enabled", True) + default_enabled: bool = profile.get("contribution_enabled", True) default_contribution_layer = profile.get( "contribution_layer", None) - default_apply_as_variant = profile.get( + default_apply_as_variant: bool = profile.get( "contribution_apply_as_variant", False) - default_target_product = profile.get( + default_target_product: str = profile.get( "contribution_target_product", "usdAsset") - default_init_as = ( + default_init_as: str = ( "asset" if profile.get("contribution_target_product") == "usdAsset" else "shot") - init_as_visible = False + init_as_visible = True # Attributes logic publish_attributes = instance["publish_attributes"].get( @@ -500,6 +522,12 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin, visible = publish_attributes.get("contribution_enabled", True) variant_visible = visible and publish_attributes.get( "contribution_apply_as_variant", True) + init_as: str = publish_attributes.get( + "contribution_target_product_init", default_init_as) + + contribution_layers = cls.contribution_layers.get( + init_as, {} + ) return [ UISeparatorDef("usd_container_settings1"), @@ -549,9 +577,22 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin, "predefined ordering.\nA higher order (further down " "the list) will contribute as a stronger opinion." ), - items=list(cls.contribution_layers.keys()), + items=list(contribution_layers.keys()), default=default_contribution_layer, visible=visible), + # TODO: We may want to make the visibility of this optional + # based on studio preference, to avoid complexity when not needed + NumberDef("contribution_in_layer_order", + label="Strength order", + tooltip=( + "The contribution inside the department layer will be " + "made with this offset applied. A higher number means " + "a stronger opinion." + ), + default=0, + minimum=-99999, + maximum=99999, + visible=visible), BoolDef("contribution_apply_as_variant", label="Add as variant", tooltip=( @@ -597,7 +638,11 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin, # Update attributes if any of the following plug-in attributes # change: - keys = ["contribution_enabled", "contribution_apply_as_variant"] + keys = { + "contribution_enabled", + "contribution_apply_as_variant", + "contribution_target_product_init", + } for instance_change in event["changes"]: instance = instance_change["instance"] @@ -637,6 +682,7 @@ class ExtractUSDLayerContribution(publish.Extractor): settings_category = "core" use_ayon_entity_uri = False + enforce_default_prim = False def process(self, instance): @@ -647,9 +693,18 @@ class ExtractUSDLayerContribution(publish.Extractor): path = get_last_publish(instance) if path and BUILD_INTO_LAST_VERSIONS: sdf_layer = Sdf.Layer.OpenAsAnonymous(path) + + # If enabled in settings, ignore any default prim specified on + # older publish versions and always publish with the AYON + # standard default prim + if self.enforce_default_prim: + sdf_layer.defaultPrim = get_standard_default_prim_name( + folder_path + ) + default_prim = sdf_layer.defaultPrim else: - default_prim = folder_path.rsplit("/", 1)[-1] # use folder name + default_prim = get_standard_default_prim_name(folder_path) sdf_layer = Sdf.Layer.CreateAnonymous() set_layer_defaults(sdf_layer, default_prim=default_prim) @@ -710,7 +765,7 @@ class ExtractUSDLayerContribution(publish.Extractor): layer=sdf_layer, contribution_path=path, layer_id=product_name, - order=None, # unordered + order=contribution.order, add_sdf_arguments_metadata=True ) else: @@ -807,7 +862,7 @@ class ExtractUSDAssetContribution(publish.Extractor): folder_path = instance.data["folderPath"] product_name = instance.data["productName"] self.log.debug(f"Building asset: {folder_path} > {product_name}") - folder_name = folder_path.rsplit("/", 1)[-1] + asset_name = get_standard_default_prim_name(folder_path) # Contribute layers to asset # Use existing asset and add to it, or initialize a new asset layer @@ -825,8 +880,9 @@ class ExtractUSDAssetContribution(publish.Extractor): # If no existing publish of this product exists then we initialize # the layer as either a default asset or shot structure. init_type = instance.data["contribution_target_product_init"] + self.log.debug("Initializing layer as type: %s", init_type) asset_layer, payload_layer = self.init_layer( - asset_name=folder_name, init_type=init_type + asset_name=asset_name, init_type=init_type ) # Author timeCodesPerSecond and framesPerSecond if the asset layer @@ -906,7 +962,7 @@ class ExtractUSDAssetContribution(publish.Extractor): payload_layer.Export(payload_path, args={"format": "usda"}) self.add_relative_file(instance, payload_path) - def init_layer(self, asset_name, init_type): + def init_layer(self, asset_name: str, init_type: str): """Initialize layer if no previous version exists""" if init_type == "asset": diff --git a/client/ayon_core/plugins/publish/help/upload_file.xml b/client/ayon_core/plugins/publish/help/upload_file.xml new file mode 100644 index 0000000000..8c270c7b19 --- /dev/null +++ b/client/ayon_core/plugins/publish/help/upload_file.xml @@ -0,0 +1,21 @@ + + + +{upload_type} upload timed out + +## {upload_type} upload failed after retries + +The connection to the AYON server timed out while uploading a file. + +### How to resolve? + +1. Try publishing again. Intermittent network hiccups often resolve on retry. +2. Ensure your network/VPN is stable and large uploads are allowed. +3. If it keeps failing, try again later or contact your admin. + +
File: {file}
+Error: {error}
+ +
+
+
diff --git a/client/ayon_core/plugins/publish/integrate.py b/client/ayon_core/plugins/publish/integrate.py index f1e066018c..2e5a733533 100644 --- a/client/ayon_core/plugins/publish/integrate.py +++ b/client/ayon_core/plugins/publish/integrate.py @@ -28,6 +28,7 @@ from ayon_core.pipeline.publish import ( KnownPublishError, get_publish_template_name, ) +from ayon_core.pipeline import is_product_base_type_supported log = logging.getLogger(__name__) @@ -121,12 +122,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): "version", "representation", "username", - "user", "output", - # OpenPype keys - should be removed - "asset", # folder[name] - "subset", # product[name] - "family", # product[type] ] def process(self, instance): @@ -368,6 +364,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin): folder_entity = instance.data["folderEntity"] product_name = instance.data["productName"] product_type = instance.data["productType"] + product_base_type = instance.data.get("productBaseType") + self.log.debug("Product: {}".format(product_name)) # Get existing product if it exists @@ -395,14 +393,33 @@ class IntegrateAsset(pyblish.api.InstancePlugin): product_id = None if existing_product_entity: product_id = existing_product_entity["id"] - product_entity = new_product_entity( - product_name, - product_type, - folder_entity["id"], - data=data, - attribs=attributes, - entity_id=product_id - ) + + new_product_entity_kwargs = { + "name": product_name, + "product_type": product_type, + "folder_id": folder_entity["id"], + "data": data, + "attribs": attributes, + "entity_id": product_id, + "product_base_type": product_base_type, + } + + if not is_product_base_type_supported(): + new_product_entity_kwargs.pop("product_base_type") + if ( + product_base_type is not None + and product_base_type != product_type): + self.log.warning(( + "Product base type %s is not supported by the server, " + "but it's defined - and it differs from product type %s. " + "Using product base type as product type." + ), product_base_type, product_type) + + new_product_entity_kwargs["product_type"] = ( + product_base_type + ) + + product_entity = new_product_entity(**new_product_entity_kwargs) if existing_product_entity is None: # Create a new product @@ -458,6 +475,9 @@ class IntegrateAsset(pyblish.api.InstancePlugin): else: version_data[key] = value + host_name = instance.context.data["hostName"] + version_data["host_name"] = host_name + version_entity = new_version_entity( version_number, product_entity["id"], @@ -796,6 +816,14 @@ class IntegrateAsset(pyblish.api.InstancePlugin): if value is not None: repre_context[key] = value + # Keep only username + # NOTE This is to avoid storing all user attributes and data + # to representation + if "user" not in repre_context: + repre_context["user"] = { + "name": template_data["user"]["name"] + } + # Use previous representation's id if there is a name match existing = existing_repres_by_name.get(repre["name"].lower()) repre_id = None @@ -892,8 +920,12 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # Include optional data if present in optionals = [ - "frameStart", "frameEnd", "step", - "handleEnd", "handleStart", "sourceHashes" + "frameStart", "frameEnd", + "handleEnd", "handleStart", + "step", + "resolutionWidth", "resolutionHeight", + "pixelAspect", + "sourceHashes" ] for key in optionals: if key in instance.data: @@ -917,6 +949,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): host_name = context.data["hostName"] anatomy_data = instance.data["anatomyData"] product_type = instance.data["productType"] + product_base_type = instance.data.get("productBaseType") task_info = anatomy_data.get("task") or {} return get_publish_template_name( @@ -926,7 +959,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin): task_name=task_info.get("name"), task_type=task_info.get("type"), project_settings=context.data["project_settings"], - logger=self.log + logger=self.log, + product_base_type=product_base_type ) def get_rootless_path(self, anatomy, path): diff --git a/client/ayon_core/plugins/publish/integrate_hero_version.py b/client/ayon_core/plugins/publish/integrate_hero_version.py index 90e6f15568..ee499d6d45 100644 --- a/client/ayon_core/plugins/publish/integrate_hero_version.py +++ b/client/ayon_core/plugins/publish/integrate_hero_version.py @@ -1,11 +1,8 @@ import os +import sys import copy -import errno import itertools import shutil -from concurrent.futures import ThreadPoolExecutor - -from speedcopy import copyfile import clique import pyblish.api @@ -16,11 +13,15 @@ from ayon_api.operations import ( ) from ayon_api.utils import create_entity_id -from ayon_core.lib import create_hard_link, source_hash -from ayon_core.lib.file_transaction import wait_for_future_errors +from ayon_core.lib import source_hash +from ayon_core.lib.file_transaction import ( + FileTransaction, + DuplicateDestinationError, +) from ayon_core.pipeline.publish import ( get_publish_template_name, OptionalPyblishPluginMixin, + KnownPublishError, ) @@ -81,15 +82,11 @@ class IntegrateHeroVersion( db_representation_context_keys = [ "project", "folder", - "asset", "hierarchy", "task", "product", - "subset", - "family", "representation", "username", - "user", "output" ] # QUESTION/TODO this process should happen on server if crashed due to @@ -364,6 +361,14 @@ class IntegrateHeroVersion( if value is not None: repre_context[key] = value + # Keep only username + # NOTE This is to avoid storing all user attributes and data + # to representation + if "user" not in repre_context: + repre_context["user"] = { + "name": anatomy_data["user"]["name"] + } + # Prepare new repre repre_entity = copy.deepcopy(repre_info["representation"]) repre_entity.pop("id", None) @@ -417,19 +422,40 @@ class IntegrateHeroVersion( (repre_entity, dst_paths) ) - self.path_checks = [] + file_transactions = FileTransaction( + log=self.log, + # Enforce unique transfers + allow_queue_replacements=False + ) + mode = FileTransaction.MODE_COPY + if self.use_hardlinks: + mode = FileTransaction.MODE_LINK - # Copy(hardlink) paths of source and destination files - # TODO should we *only* create hardlinks? - # TODO should we keep files for deletion until this is successful? - with ThreadPoolExecutor(max_workers=8) as executor: - futures = [ - executor.submit(self.copy_file, src_path, dst_path) - for src_path, dst_path in itertools.chain( - src_to_dst_file_paths, other_file_paths_mapping - ) - ] - wait_for_future_errors(executor, futures) + try: + for src_path, dst_path in itertools.chain( + src_to_dst_file_paths, + other_file_paths_mapping + ): + file_transactions.add(src_path, dst_path, mode=mode) + + self.log.debug("Integrating source files to destination ...") + file_transactions.process() + + except DuplicateDestinationError as exc: + # Raise DuplicateDestinationError as KnownPublishError + # and rollback the transactions + file_transactions.rollback() + raise KnownPublishError(exc).with_traceback(sys.exc_info()[2]) + + except Exception as exc: + # Rollback the transactions + file_transactions.rollback() + self.log.critical("Error when copying files", exc_info=True) + raise exc + + # Finalizing can't rollback safely so no use for moving it to + # the try, except. + file_transactions.finalize() # Update prepared representation etity data with files # and integrate it to server. @@ -618,48 +644,6 @@ class IntegrateHeroVersion( ).format(path)) return path - def copy_file(self, src_path, dst_path): - # TODO check drives if are the same to check if cas hardlink - dirname = os.path.dirname(dst_path) - - try: - os.makedirs(dirname) - self.log.debug("Folder(s) created: \"{}\"".format(dirname)) - except OSError as exc: - if exc.errno != errno.EEXIST: - self.log.error("An unexpected error occurred.", exc_info=True) - raise - - self.log.debug("Folder already exists: \"{}\"".format(dirname)) - - if self.use_hardlinks: - # First try hardlink and copy if paths are cross drive - self.log.debug("Hardlinking file \"{}\" to \"{}\"".format( - src_path, dst_path - )) - try: - create_hard_link(src_path, dst_path) - # Return when successful - return - - except OSError as exc: - # re-raise exception if different than - # EXDEV - cross drive path - # EINVAL - wrong format, must be NTFS - self.log.debug( - "Hardlink failed with errno:'{}'".format(exc.errno)) - if exc.errno not in [errno.EXDEV, errno.EINVAL]: - raise - - self.log.debug( - "Hardlinking failed, falling back to regular copy...") - - self.log.debug("Copying file \"{}\" to \"{}\"".format( - src_path, dst_path - )) - - copyfile(src_path, dst_path) - def version_from_representations(self, project_name, repres): for repre in repres: version = ayon_api.get_version_by_id( diff --git a/client/ayon_core/plugins/publish/integrate_inputlinks.py b/client/ayon_core/plugins/publish/integrate_inputlinks.py index be399a95fc..671e55905a 100644 --- a/client/ayon_core/plugins/publish/integrate_inputlinks.py +++ b/client/ayon_core/plugins/publish/integrate_inputlinks.py @@ -105,7 +105,7 @@ class IntegrateInputLinksAYON(pyblish.api.ContextPlugin): created links by its type """ if workfile_instance is None: - self.log.warning("No workfile in this publish session.") + self.log.debug("No workfile in this publish session.") return workfile_version_id = workfile_instance.data["versionEntity"]["id"] diff --git a/client/ayon_core/plugins/publish/integrate_product_group.py b/client/ayon_core/plugins/publish/integrate_product_group.py index 8904d21d69..107f409312 100644 --- a/client/ayon_core/plugins/publish/integrate_product_group.py +++ b/client/ayon_core/plugins/publish/integrate_product_group.py @@ -62,10 +62,8 @@ class IntegrateProductGroup(pyblish.api.InstancePlugin): product_type = instance.data["productType"] fill_pairs = prepare_template_data({ - "family": product_type, "task": filter_criteria["tasks"], "host": filter_criteria["hosts"], - "subset": product_name, "product": { "name": product_name, "type": product_type, diff --git a/client/ayon_core/plugins/publish/integrate_review.py b/client/ayon_core/plugins/publish/integrate_review.py index 0a6b24adb4..b0cc41acc9 100644 --- a/client/ayon_core/plugins/publish/integrate_review.py +++ b/client/ayon_core/plugins/publish/integrate_review.py @@ -1,11 +1,17 @@ import os +import time -import pyblish.api import ayon_api +from ayon_api import TransferProgress from ayon_api.server_api import RequestTypes +import pyblish.api -from ayon_core.lib import get_media_mime_type -from ayon_core.pipeline.publish import get_publish_repre_path +from ayon_core.lib import get_media_mime_type, format_file_size +from ayon_core.pipeline.publish import ( + PublishXmlValidationError, + get_publish_repre_path, +) +import requests.exceptions class IntegrateAYONReview(pyblish.api.InstancePlugin): @@ -44,7 +50,7 @@ class IntegrateAYONReview(pyblish.api.InstancePlugin): if "webreview" not in repre_tags: continue - # exclude representations with are going to be published on farm + # exclude representations going to be published on farm if "publish_on_farm" in repre_tags: continue @@ -75,18 +81,13 @@ class IntegrateAYONReview(pyblish.api.InstancePlugin): f"/projects/{project_name}" f"/versions/{version_id}/reviewables{query}" ) - filename = os.path.basename(repre_path) - # Upload the reviewable - self.log.info(f"Uploading reviewable '{label or filename}' ...") - - headers = ayon_con.get_headers(content_type) - headers["x-file-name"] = filename self.log.info(f"Uploading reviewable {repre_path}") - ayon_con.upload_file( + # Upload with retries and clear help if it keeps failing + self._upload_with_retries( + ayon_con, endpoint, repre_path, - headers=headers, - request_type=RequestTypes.post, + content_type, ) def _get_review_label(self, repre, uploaded_labels): @@ -100,3 +101,74 @@ class IntegrateAYONReview(pyblish.api.InstancePlugin): idx += 1 label = f"{orig_label}_{idx}" return label + + def _upload_with_retries( + self, + ayon_con: ayon_api.ServerAPI, + endpoint: str, + repre_path: str, + content_type: str, + ): + """Upload file with simple retries.""" + filename = os.path.basename(repre_path) + + headers = ayon_con.get_headers(content_type) + headers["x-file-name"] = filename + max_retries = ayon_con.get_default_max_retries() + # Retries are already implemented in 'ayon_api.upload_file' + # - added in ayon api 1.2.7 + if hasattr(TransferProgress, "get_attempt"): + max_retries = 1 + + size = os.path.getsize(repre_path) + self.log.info( + f"Uploading '{repre_path}' (size: {format_file_size(size)})" + ) + + # How long to sleep before next attempt + wait_time = 1 + last_error = None + for attempt in range(max_retries): + attempt += 1 + start = time.time() + try: + output = ayon_con.upload_file( + endpoint, + repre_path, + headers=headers, + request_type=RequestTypes.post, + ) + self.log.debug(f"Uploaded in {time.time() - start}s.") + return output + + except ( + requests.exceptions.Timeout, + requests.exceptions.ConnectionError + ) as exc: + # Log and retry with backoff if attempts remain + if attempt >= max_retries: + last_error = exc + break + + self.log.warning( + f"Review upload failed ({attempt}/{max_retries})" + f" after {time.time() - start}s." + f" Retrying in {wait_time}s...", + exc_info=True, + ) + time.sleep(wait_time) + + # Exhausted retries - raise a user-friendly validation error with help + raise PublishXmlValidationError( + self, + ( + "Upload of reviewable timed out or failed after multiple" + " attempts. Please try publishing again." + ), + formatting_data={ + "upload_type": "Review", + "file": repre_path, + "error": str(last_error), + }, + help_filename="upload_file.xml", + ) diff --git a/client/ayon_core/plugins/publish/integrate_thumbnail.py b/client/ayon_core/plugins/publish/integrate_thumbnail.py index 067c3470e8..60b3a97639 100644 --- a/client/ayon_core/plugins/publish/integrate_thumbnail.py +++ b/client/ayon_core/plugins/publish/integrate_thumbnail.py @@ -24,11 +24,16 @@ import os import collections +import time -import pyblish.api import ayon_api -from ayon_api import RequestTypes +from ayon_api import RequestTypes, TransferProgress from ayon_api.operations import OperationsSession +import pyblish.api +import requests + +from ayon_core.lib import get_media_mime_type, format_file_size +from ayon_core.pipeline.publish import PublishXmlValidationError InstanceFilterResult = collections.namedtuple( @@ -164,25 +169,17 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin): return os.path.normpath(filled_path) def _create_thumbnail(self, project_name: str, src_filepath: str) -> str: - """Upload thumbnail to AYON and return its id. - - This is temporary fix of 'create_thumbnail' function in ayon_api to - fix jpeg mime type. - - """ - mime_type = None - with open(src_filepath, "rb") as stream: - if b"\xff\xd8\xff" == stream.read(3): - mime_type = "image/jpeg" - + """Upload thumbnail to AYON and return its id.""" + mime_type = get_media_mime_type(src_filepath) if mime_type is None: - return ayon_api.create_thumbnail(project_name, src_filepath) + return ayon_api.create_thumbnail( + project_name, src_filepath + ) - response = ayon_api.upload_file( + response = self._upload_with_retries( f"projects/{project_name}/thumbnails", src_filepath, - request_type=RequestTypes.post, - headers={"Content-Type": mime_type}, + mime_type, ) response.raise_for_status() return response.json()["id"] @@ -248,3 +245,71 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin): or instance.data.get("name") or "N/A" ) + + def _upload_with_retries( + self, + endpoint: str, + repre_path: str, + content_type: str, + ): + """Upload file with simple retries.""" + ayon_con = ayon_api.get_server_api_connection() + headers = ayon_con.get_headers(content_type) + max_retries = ayon_con.get_default_max_retries() + # Retries are already implemented in 'ayon_api.upload_file' + # - added in ayon api 1.2.7 + if hasattr(TransferProgress, "get_attempt"): + max_retries = 1 + + size = os.path.getsize(repre_path) + self.log.info( + f"Uploading '{repre_path}' (size: {format_file_size(size)})" + ) + + # How long to sleep before next attempt + wait_time = 1 + last_error = None + for attempt in range(max_retries): + attempt += 1 + start = time.time() + try: + output = ayon_con.upload_file( + endpoint, + repre_path, + headers=headers, + request_type=RequestTypes.post, + ) + self.log.debug(f"Uploaded in {time.time() - start}s.") + return output + + except ( + requests.exceptions.Timeout, + requests.exceptions.ConnectionError + ) as exc: + # Log and retry with backoff if attempts remain + if attempt >= max_retries: + last_error = exc + break + + self.log.warning( + f"Review upload failed ({attempt}/{max_retries})" + f" after {time.time() - start}s." + f" Retrying in {wait_time}s...", + exc_info=True, + ) + time.sleep(wait_time) + + # Exhausted retries - raise a user-friendly validation error with help + raise PublishXmlValidationError( + self, + ( + "Upload of thumbnail timed out or failed after multiple" + " attempts. Please try publishing again." + ), + formatting_data={ + "upload_type": "Thumbnail", + "file": repre_path, + "error": str(last_error), + }, + help_filename="upload_file.xml", + ) diff --git a/client/ayon_core/style/style.css b/client/ayon_core/style/style.css index 0d057beb7b..23a6998316 100644 --- a/client/ayon_core/style/style.css +++ b/client/ayon_core/style/style.css @@ -969,12 +969,6 @@ SearchItemDisplayWidget #ValueWidget { background: {color:bg-buttons}; } -/* Subset Manager */ -#SubsetManagerDetailsText {} -#SubsetManagerDetailsText[state="invalid"] { - border: 1px solid #ff0000; -} - /* Creator */ #CreatorsView::item { padding: 1px 5px; diff --git a/client/ayon_core/tools/attribute_defs/dialog.py b/client/ayon_core/tools/attribute_defs/dialog.py index 7423d58475..4d8e41199e 100644 --- a/client/ayon_core/tools/attribute_defs/dialog.py +++ b/client/ayon_core/tools/attribute_defs/dialog.py @@ -56,6 +56,7 @@ class AttributeDefinitionsDialog(QtWidgets.QDialog): btns_layout.addWidget(cancel_btn, 0) main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(10, 10, 10, 10) main_layout.addWidget(attrs_widget, 0) main_layout.addStretch(1) main_layout.addWidget(btns_widget, 0) diff --git a/client/ayon_core/tools/attribute_defs/widgets.py b/client/ayon_core/tools/attribute_defs/widgets.py index 1e948b2d28..f7766f50ac 100644 --- a/client/ayon_core/tools/attribute_defs/widgets.py +++ b/client/ayon_core/tools/attribute_defs/widgets.py @@ -182,6 +182,7 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget): layout.deleteLater() new_layout = QtWidgets.QGridLayout() + new_layout.setContentsMargins(0, 0, 0, 0) new_layout.setColumnStretch(0, 0) new_layout.setColumnStretch(1, 1) self.setLayout(new_layout) @@ -210,12 +211,8 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget): if not attr_def.visible: continue + col_num = 0 expand_cols = 2 - if attr_def.is_value_def and attr_def.is_label_horizontal: - expand_cols = 1 - - col_num = 2 - expand_cols - if attr_def.is_value_def and attr_def.label: label_widget = AttributeDefinitionsLabel( attr_def.id, attr_def.label, self @@ -233,9 +230,12 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget): | QtCore.Qt.AlignVCenter ) layout.addWidget( - label_widget, row, 0, 1, expand_cols + label_widget, row, col_num, 1, 1 ) - if not attr_def.is_label_horizontal: + if attr_def.is_label_horizontal: + col_num += 1 + expand_cols = 1 + else: row += 1 if attr_def.is_value_def: diff --git a/client/ayon_core/tools/common_models/projects.py b/client/ayon_core/tools/common_models/projects.py index 250c3b020d..0c1f912fd1 100644 --- a/client/ayon_core/tools/common_models/projects.py +++ b/client/ayon_core/tools/common_models/projects.py @@ -1,11 +1,13 @@ from __future__ import annotations +import json import contextlib from abc import ABC, abstractmethod from typing import Any, Optional from dataclasses import dataclass import ayon_api +from ayon_api.graphql_queries import projects_graphql_query from ayon_core.style import get_default_entity_icon_color from ayon_core.lib import CacheItem, NestedCacheItem @@ -275,7 +277,7 @@ class ProductTypeIconMapping: return self._definitions_by_name -def _get_project_items_from_entitiy( +def _get_project_items_from_entity( projects: list[dict[str, Any]] ) -> list[ProjectItem]: """ @@ -290,6 +292,7 @@ def _get_project_items_from_entitiy( return [ ProjectItem.from_entity(project) for project in projects + if project["active"] ] @@ -538,8 +541,32 @@ class ProjectsModel(object): self._projects_cache.update_data(project_items) return self._projects_cache.get_data() + def _fetch_graphql_projects(self) -> list[dict[str, Any]]: + """Fetch projects using GraphQl. + + This method was added because ayon_api had a bug in 'get_projects'. + + Returns: + list[dict[str, Any]]: List of projects. + + """ + api = ayon_api.get_server_api_connection() + query = projects_graphql_query({"name", "active", "library", "data"}) + + projects = [] + for parsed_data in query.continuous_query(api): + for project in parsed_data["projects"]: + project_data = project["data"] + if project_data is None: + project["data"] = {} + elif isinstance(project_data, str): + project["data"] = json.loads(project_data) + projects.append(project) + return projects + def _query_projects(self) -> list[ProjectItem]: - projects = ayon_api.get_projects(fields=["name", "active", "library"]) + projects = self._fetch_graphql_projects() + user = ayon_api.get_user() pinned_projects = ( user @@ -548,7 +575,7 @@ class ProjectsModel(object): .get("pinnedProjects") ) or [] pinned_projects = set(pinned_projects) - project_items = _get_project_items_from_entitiy(list(projects)) + project_items = _get_project_items_from_entity(list(projects)) for project in project_items: project.is_pinned = project.name in pinned_projects return project_items diff --git a/client/ayon_core/tools/common_models/users.py b/client/ayon_core/tools/common_models/users.py index f7939e5cd3..42a76d8d7d 100644 --- a/client/ayon_core/tools/common_models/users.py +++ b/client/ayon_core/tools/common_models/users.py @@ -1,10 +1,13 @@ import json import collections +from typing import Optional import ayon_api from ayon_api.graphql import FIELD_VALUE, GraphQlQuery, fields_to_dict -from ayon_core.lib import NestedCacheItem +from ayon_core.lib import NestedCacheItem, get_ayon_username + +NOT_SET = object() # --- Implementation that should be in ayon-python-api --- @@ -105,9 +108,18 @@ class UserItem: class UsersModel: def __init__(self, controller): + self._current_username = NOT_SET self._controller = controller self._users_cache = NestedCacheItem(default_factory=list) + def get_current_username(self) -> Optional[str]: + if self._current_username is NOT_SET: + self._current_username = get_ayon_username() + return self._current_username + + def reset(self) -> None: + self._users_cache.reset() + def get_user_items(self, project_name): """Get user items. diff --git a/client/ayon_core/tools/console_interpreter/abstract.py b/client/ayon_core/tools/console_interpreter/abstract.py index a945e6e498..953365d18c 100644 --- a/client/ayon_core/tools/console_interpreter/abstract.py +++ b/client/ayon_core/tools/console_interpreter/abstract.py @@ -1,6 +1,8 @@ +from __future__ import annotations + from abc import ABC, abstractmethod from dataclasses import dataclass, field -from typing import List, Dict, Optional +from typing import Optional @dataclass @@ -13,8 +15,8 @@ class TabItem: class InterpreterConfig: width: Optional[int] height: Optional[int] - splitter_sizes: List[int] = field(default_factory=list) - tabs: List[TabItem] = field(default_factory=list) + splitter_sizes: list[int] = field(default_factory=list) + tabs: list[TabItem] = field(default_factory=list) class AbstractInterpreterController(ABC): @@ -27,7 +29,7 @@ class AbstractInterpreterController(ABC): self, width: int, height: int, - splitter_sizes: List[int], - tabs: List[Dict[str, str]], - ): + splitter_sizes: list[int], + tabs: list[dict[str, str]], + ) -> None: pass diff --git a/client/ayon_core/tools/console_interpreter/control.py b/client/ayon_core/tools/console_interpreter/control.py index b931b6252c..4c5a4b3419 100644 --- a/client/ayon_core/tools/console_interpreter/control.py +++ b/client/ayon_core/tools/console_interpreter/control.py @@ -1,4 +1,5 @@ -from typing import List, Dict +from __future__ import annotations +from typing import Optional from ayon_core.lib import JSONSettingRegistry from ayon_core.lib.local_settings import get_launcher_local_dir @@ -11,13 +12,15 @@ from .abstract import ( class InterpreterController(AbstractInterpreterController): - def __init__(self): + def __init__(self, name: Optional[str] = None) -> None: + if name is None: + name = "python_interpreter_tool" self._registry = JSONSettingRegistry( - "python_interpreter_tool", + name, get_launcher_local_dir(), ) - def get_config(self): + def get_config(self) -> InterpreterConfig: width = None height = None splitter_sizes = [] @@ -54,9 +57,9 @@ class InterpreterController(AbstractInterpreterController): self, width: int, height: int, - splitter_sizes: List[int], - tabs: List[Dict[str, str]], - ): + splitter_sizes: list[int], + tabs: list[dict[str, str]], + ) -> None: self._registry.set_item("width", width) self._registry.set_item("height", height) self._registry.set_item("splitter_sizes", splitter_sizes) diff --git a/client/ayon_core/tools/console_interpreter/ui/utils.py b/client/ayon_core/tools/console_interpreter/ui/utils.py index 427483215d..c073b784ef 100644 --- a/client/ayon_core/tools/console_interpreter/ui/utils.py +++ b/client/ayon_core/tools/console_interpreter/ui/utils.py @@ -1,42 +1,42 @@ -import os import sys import collections +class _CustomSTD: + def __init__(self, orig_std, write_callback): + self.orig_std = orig_std + self._valid_orig = bool(orig_std) + self._write_callback = write_callback + + def __getattr__(self, attr): + return getattr(self.orig_std, attr) + + def __setattr__(self, key, value): + if key in ("orig_std", "_valid_orig", "_write_callback"): + super().__setattr__(key, value) + else: + setattr(self.orig_std, key, value) + + def write(self, text): + if self._valid_orig: + self.orig_std.write(text) + self._write_callback(text) + + class StdOEWrap: def __init__(self): - self._origin_stdout_write = None - self._origin_stderr_write = None - self._listening = False self.lines = collections.deque() - - if not sys.stdout: - sys.stdout = open(os.devnull, "w") - - if not sys.stderr: - sys.stderr = open(os.devnull, "w") - - if self._origin_stdout_write is None: - self._origin_stdout_write = sys.stdout.write - - if self._origin_stderr_write is None: - self._origin_stderr_write = sys.stderr.write - self._listening = True - sys.stdout.write = self._stdout_listener - sys.stderr.write = self._stderr_listener + + self._stdout_wrap = _CustomSTD(sys.stdout, self._listener) + self._stderr_wrap = _CustomSTD(sys.stderr, self._listener) + + sys.stdout = self._stdout_wrap + sys.stderr = self._stderr_wrap def stop_listen(self): self._listening = False - def _stdout_listener(self, text): + def _listener(self, text): if self._listening: self.lines.append(text) - if self._origin_stdout_write is not None: - self._origin_stdout_write(text) - - def _stderr_listener(self, text): - if self._listening: - self.lines.append(text) - if self._origin_stderr_write is not None: - self._origin_stderr_write(text) diff --git a/client/ayon_core/tools/launcher/control.py b/client/ayon_core/tools/launcher/control.py index 85b362f9d7..f4656de787 100644 --- a/client/ayon_core/tools/launcher/control.py +++ b/client/ayon_core/tools/launcher/control.py @@ -1,10 +1,14 @@ from typing import Optional -from ayon_core.lib import Logger, get_ayon_username +from ayon_core.lib import Logger from ayon_core.lib.events import QueuedEventSystem from ayon_core.addon import AddonsManager from ayon_core.settings import get_project_settings, get_studio_settings -from ayon_core.tools.common_models import ProjectsModel, HierarchyModel +from ayon_core.tools.common_models import ( + ProjectsModel, + HierarchyModel, + UsersModel, +) from .abstract import ( AbstractLauncherFrontEnd, @@ -30,13 +34,12 @@ class BaseLauncherController( self._addons_manager = None - self._username = NOT_SET - self._selection_model = LauncherSelectionModel(self) self._projects_model = ProjectsModel(self) self._hierarchy_model = HierarchyModel(self) self._actions_model = ActionsModel(self) self._workfiles_model = WorkfilesModel(self) + self._users_model = UsersModel(self) @property def log(self): @@ -209,6 +212,7 @@ class BaseLauncherController( self._projects_model.reset() self._hierarchy_model.reset() + self._users_model.reset() self._actions_model.refresh() self._projects_model.refresh() @@ -229,8 +233,10 @@ class BaseLauncherController( self._emit_event("controller.refresh.actions.finished") - def get_my_tasks_entity_ids(self, project_name: str): - username = self._get_my_username() + def get_my_tasks_entity_ids( + self, project_name: str + ) -> dict[str, list[str]]: + username = self._users_model.get_current_username() assignees = [] if username: assignees.append(username) @@ -238,10 +244,5 @@ class BaseLauncherController( project_name, assignees ) - def _get_my_username(self): - if self._username is NOT_SET: - self._username = get_ayon_username() - return self._username - def _emit_event(self, topic, data=None): self.emit_event(topic, data, "controller") diff --git a/client/ayon_core/tools/launcher/ui/actions_widget.py b/client/ayon_core/tools/launcher/ui/actions_widget.py index 31b303ca2b..0e763a208a 100644 --- a/client/ayon_core/tools/launcher/ui/actions_widget.py +++ b/client/ayon_core/tools/launcher/ui/actions_widget.py @@ -1,22 +1,12 @@ import time -import uuid import collections from qtpy import QtWidgets, QtCore, QtGui from ayon_core.lib import Logger -from ayon_core.lib.attribute_definitions import ( - UILabelDef, - EnumDef, - TextDef, - BoolDef, - NumberDef, - HiddenDef, -) +from ayon_core.pipeline.actions import webaction_fields_to_attribute_defs from ayon_core.tools.flickcharm import FlickCharm -from ayon_core.tools.utils import ( - get_qt_icon, -) +from ayon_core.tools.utils import get_qt_icon from ayon_core.tools.attribute_defs import AttributeDefinitionsDialog from ayon_core.tools.launcher.abstract import WebactionContext @@ -1173,74 +1163,7 @@ class ActionsWidget(QtWidgets.QWidget): float - 'label', 'value', 'placeholder', 'min', 'max' """ - attr_defs = [] - for config_field in config_fields: - field_type = config_field["type"] - attr_def = None - if field_type == "label": - label = config_field.get("value") - if label is None: - label = config_field.get("text") - attr_def = UILabelDef( - label, key=uuid.uuid4().hex - ) - elif field_type == "boolean": - value = config_field["value"] - if isinstance(value, str): - value = value.lower() == "true" - - attr_def = BoolDef( - config_field["name"], - default=value, - label=config_field.get("label"), - ) - elif field_type == "text": - attr_def = TextDef( - config_field["name"], - default=config_field.get("value"), - label=config_field.get("label"), - placeholder=config_field.get("placeholder"), - multiline=config_field.get("multiline", False), - regex=config_field.get("regex"), - # syntax=config_field["syntax"], - ) - elif field_type in ("integer", "float"): - value = config_field.get("value") - if isinstance(value, str): - if field_type == "integer": - value = int(value) - else: - value = float(value) - attr_def = NumberDef( - config_field["name"], - default=value, - label=config_field.get("label"), - decimals=0 if field_type == "integer" else 5, - # placeholder=config_field.get("placeholder"), - minimum=config_field.get("min"), - maximum=config_field.get("max"), - ) - elif field_type in ("select", "multiselect"): - attr_def = EnumDef( - config_field["name"], - items=config_field["options"], - default=config_field.get("value"), - label=config_field.get("label"), - multiselection=field_type == "multiselect", - ) - elif field_type == "hidden": - attr_def = HiddenDef( - config_field["name"], - default=config_field.get("value"), - ) - - if attr_def is None: - print(f"Unknown config field type: {field_type}") - attr_def = UILabelDef( - f"Unknown field type '{field_type}", - key=uuid.uuid4().hex - ) - attr_defs.append(attr_def) + attr_defs = webaction_fields_to_attribute_defs(config_fields) dialog = AttributeDefinitionsDialog( attr_defs, diff --git a/client/ayon_core/tools/launcher/ui/hierarchy_page.py b/client/ayon_core/tools/launcher/ui/hierarchy_page.py index 47388d9685..9d5cb8e8d0 100644 --- a/client/ayon_core/tools/launcher/ui/hierarchy_page.py +++ b/client/ayon_core/tools/launcher/ui/hierarchy_page.py @@ -2,19 +2,47 @@ import qtawesome from qtpy import QtWidgets, QtCore from ayon_core.tools.utils import ( - PlaceholderLineEdit, SquareButton, RefreshButton, ProjectsCombobox, FoldersWidget, TasksWidget, - NiceCheckbox, ) -from ayon_core.tools.utils.lib import checkstate_int_to_enum +from ayon_core.tools.utils.folders_widget import FoldersFiltersWidget from .workfiles_page import WorkfilesPage +class LauncherFoldersWidget(FoldersWidget): + focused_in = QtCore.Signal() + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self._folders_view.installEventFilter(self) + + def eventFilter(self, obj, event): + if event.type() == QtCore.QEvent.FocusIn: + self.focused_in.emit() + return False + + +class LauncherTasksWidget(TasksWidget): + focused_in = QtCore.Signal() + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self._tasks_view.installEventFilter(self) + + def deselect(self): + sel_model = self._tasks_view.selectionModel() + sel_model.clearSelection() + + def eventFilter(self, obj, event): + if event.type() == QtCore.QEvent.FocusIn: + self.focused_in.emit() + return False + + class HierarchyPage(QtWidgets.QWidget): def __init__(self, controller, parent): super().__init__(parent) @@ -46,34 +74,15 @@ class HierarchyPage(QtWidgets.QWidget): content_body.setOrientation(QtCore.Qt.Horizontal) # - filters - filters_widget = QtWidgets.QWidget(self) - - folders_filter_text = PlaceholderLineEdit(filters_widget) - folders_filter_text.setPlaceholderText("Filter folders...") - - my_tasks_tooltip = ( - "Filter folders and task to only those you are assigned to." - ) - my_tasks_label = QtWidgets.QLabel("My tasks", filters_widget) - my_tasks_label.setToolTip(my_tasks_tooltip) - - my_tasks_checkbox = NiceCheckbox(filters_widget) - my_tasks_checkbox.setChecked(False) - my_tasks_checkbox.setToolTip(my_tasks_tooltip) - - filters_layout = QtWidgets.QHBoxLayout(filters_widget) - filters_layout.setContentsMargins(0, 0, 0, 0) - filters_layout.addWidget(folders_filter_text, 1) - filters_layout.addWidget(my_tasks_label, 0) - filters_layout.addWidget(my_tasks_checkbox, 0) + filters_widget = FoldersFiltersWidget(self) # - Folders widget - folders_widget = FoldersWidget(controller, content_body) + folders_widget = LauncherFoldersWidget(controller, content_body) folders_widget.set_header_visible(True) folders_widget.set_deselectable(True) # - Tasks widget - tasks_widget = TasksWidget(controller, content_body) + tasks_widget = LauncherTasksWidget(controller, content_body) # - Third page - Workfiles workfiles_page = WorkfilesPage(controller, content_body) @@ -93,17 +102,19 @@ class HierarchyPage(QtWidgets.QWidget): btn_back.clicked.connect(self._on_back_clicked) refresh_btn.clicked.connect(self._on_refresh_clicked) - folders_filter_text.textChanged.connect(self._on_filter_text_changed) - my_tasks_checkbox.stateChanged.connect( + filters_widget.text_changed.connect(self._on_filter_text_changed) + filters_widget.my_tasks_changed.connect( self._on_my_tasks_checkbox_state_changed ) + folders_widget.focused_in.connect(self._on_folders_focus) + tasks_widget.focused_in.connect(self._on_tasks_focus) self._is_visible = False self._controller = controller + self._filters_widget = filters_widget self._btn_back = btn_back self._projects_combobox = projects_combobox - self._my_tasks_checkbox = my_tasks_checkbox self._folders_widget = folders_widget self._tasks_widget = tasks_widget self._workfiles_page = workfiles_page @@ -126,8 +137,9 @@ class HierarchyPage(QtWidgets.QWidget): self._folders_widget.refresh() self._tasks_widget.refresh() self._workfiles_page.refresh() + # Update my tasks self._on_my_tasks_checkbox_state_changed( - self._my_tasks_checkbox.checkState() + self._filters_widget.is_my_tasks_checked() ) def _on_back_clicked(self): @@ -139,15 +151,21 @@ class HierarchyPage(QtWidgets.QWidget): def _on_filter_text_changed(self, text): self._folders_widget.set_name_filter(text) - def _on_my_tasks_checkbox_state_changed(self, state): + def _on_my_tasks_checkbox_state_changed(self, enabled: bool) -> None: folder_ids = None task_ids = None - state = checkstate_int_to_enum(state) - if state == QtCore.Qt.Checked: + if enabled: entity_ids = self._controller.get_my_tasks_entity_ids( self._project_name ) folder_ids = entity_ids["folder_ids"] task_ids = entity_ids["task_ids"] + self._folders_widget.set_folder_ids_filter(folder_ids) self._tasks_widget.set_task_ids_filter(task_ids) + + def _on_folders_focus(self): + self._workfiles_page.deselect() + + def _on_tasks_focus(self): + self._workfiles_page.deselect() diff --git a/client/ayon_core/tools/launcher/ui/workfiles_page.py b/client/ayon_core/tools/launcher/ui/workfiles_page.py index 1ea223031e..d81221f38d 100644 --- a/client/ayon_core/tools/launcher/ui/workfiles_page.py +++ b/client/ayon_core/tools/launcher/ui/workfiles_page.py @@ -3,7 +3,7 @@ from typing import Optional import ayon_api from qtpy import QtCore, QtWidgets, QtGui -from ayon_core.tools.utils import get_qt_icon +from ayon_core.tools.utils import get_qt_icon, DeselectableTreeView from ayon_core.tools.launcher.abstract import AbstractLauncherFrontEnd VERSION_ROLE = QtCore.Qt.UserRole + 1 @@ -127,7 +127,7 @@ class WorkfilesModel(QtGui.QStandardItemModel): return icon -class WorkfilesView(QtWidgets.QTreeView): +class WorkfilesView(DeselectableTreeView): def drawBranches(self, painter, rect, index): return @@ -165,6 +165,10 @@ class WorkfilesPage(QtWidgets.QWidget): def refresh(self) -> None: self._workfiles_model.refresh() + def deselect(self): + sel_model = self._workfiles_view.selectionModel() + sel_model.clearSelection() + def _on_refresh(self) -> None: self._workfiles_proxy.sort(0, QtCore.Qt.DescendingOrder) diff --git a/client/ayon_core/tools/loader/abstract.py b/client/ayon_core/tools/loader/abstract.py index 9c7934d2db..a11663a56f 100644 --- a/client/ayon_core/tools/loader/abstract.py +++ b/client/ayon_core/tools/loader/abstract.py @@ -316,43 +316,34 @@ class ActionItem: Args: identifier (str): Action identifier. label (str): Action label. - icon (dict[str, Any]): Action icon definition. - tooltip (str): Action tooltip. + group_label (Optional[str]): Group label. + icon (Optional[dict[str, Any]]): Action icon definition. + tooltip (Optional[str]): Action tooltip. + order (int): Action order. + data (Optional[dict[str, Any]]): Additional action data. options (Union[list[AbstractAttrDef], list[qargparse.QArgument]]): Action options. Note: 'qargparse' is considered as deprecated. - order (int): Action order. - project_name (str): Project name. - folder_ids (list[str]): Folder ids. - product_ids (list[str]): Product ids. - version_ids (list[str]): Version ids. - representation_ids (list[str]): Representation ids. - """ + """ def __init__( self, - identifier, - label, - icon, - tooltip, - options, - order, - project_name, - folder_ids, - product_ids, - version_ids, - representation_ids, + identifier: str, + label: str, + group_label: Optional[str], + icon: Optional[dict[str, Any]], + tooltip: Optional[str], + order: int, + data: Optional[dict[str, Any]], + options: Optional[list], ): self.identifier = identifier self.label = label + self.group_label = group_label self.icon = icon self.tooltip = tooltip - self.options = options + self.data = data self.order = order - self.project_name = project_name - self.folder_ids = folder_ids - self.product_ids = product_ids - self.version_ids = version_ids - self.representation_ids = representation_ids + self.options = options def _options_to_data(self): options = self.options @@ -364,30 +355,26 @@ class ActionItem: # future development of detached UI tools it would be better to be # prepared for it. raise NotImplementedError( - "{}.to_data is not implemented. Use Attribute definitions" - " from 'ayon_core.lib' instead of 'qargparse'.".format( - self.__class__.__name__ - ) + f"{self.__class__.__name__}.to_data is not implemented." + " Use Attribute definitions from 'ayon_core.lib'" + " instead of 'qargparse'." ) - def to_data(self): + def to_data(self) -> dict[str, Any]: options = self._options_to_data() return { "identifier": self.identifier, "label": self.label, + "group_label": self.group_label, "icon": self.icon, "tooltip": self.tooltip, - "options": options, "order": self.order, - "project_name": self.project_name, - "folder_ids": self.folder_ids, - "product_ids": self.product_ids, - "version_ids": self.version_ids, - "representation_ids": self.representation_ids, + "data": self.data, + "options": options, } @classmethod - def from_data(cls, data): + def from_data(cls, data) -> "ActionItem": options = data["options"] if options: options = deserialize_attr_defs(options) @@ -666,6 +653,21 @@ class FrontendLoaderController(_BaseLoaderController): """ pass + @abstractmethod + def get_my_tasks_entity_ids( + self, project_name: str + ) -> dict[str, list[str]]: + """Get entity ids for my tasks. + + Args: + project_name (str): Project name. + + Returns: + dict[str, list[str]]: Folder and task ids. + + """ + pass + @abstractmethod def get_available_tags_by_entity_type( self, project_name: str @@ -990,43 +992,35 @@ class FrontendLoaderController(_BaseLoaderController): # Load action items @abstractmethod - def get_versions_action_items(self, project_name, version_ids): + def get_action_items( + self, + project_name: str, + entity_ids: set[str], + entity_type: str, + ) -> list[ActionItem]: """Action items for versions selection. Args: project_name (str): Project name. - version_ids (Iterable[str]): Version ids. + entity_ids (set[str]): Entity ids. + entity_type (str): Entity type. Returns: list[ActionItem]: List of action items. + """ - - pass - - @abstractmethod - def get_representations_action_items( - self, project_name, representation_ids - ): - """Action items for representations selection. - - Args: - project_name (str): Project name. - representation_ids (Iterable[str]): Representation ids. - - Returns: - list[ActionItem]: List of action items. - """ - pass @abstractmethod def trigger_action_item( self, - identifier, - options, - project_name, - version_ids, - representation_ids + identifier: str, + project_name: str, + selected_ids: set[str], + selected_entity_type: str, + data: Optional[dict[str, Any]], + options: dict[str, Any], + form_values: dict[str, Any], ): """Trigger action item. @@ -1044,13 +1038,15 @@ class FrontendLoaderController(_BaseLoaderController): } Args: - identifier (str): Action identifier. - options (dict[str, Any]): Action option values from UI. + identifier (sttr): Plugin identifier. project_name (str): Project name. - version_ids (Iterable[str]): Version ids. - representation_ids (Iterable[str]): Representation ids. - """ + selected_ids (set[str]): Selected entity ids. + selected_entity_type (str): Selected entity type. + data (Optional[dict[str, Any]]): Additional action item data. + options (dict[str, Any]): Action option values from UI. + form_values (dict[str, Any]): Action form values from UI. + """ pass @abstractmethod diff --git a/client/ayon_core/tools/loader/control.py b/client/ayon_core/tools/loader/control.py index 9f159bfb21..2802ad7040 100644 --- a/client/ayon_core/tools/loader/control.py +++ b/client/ayon_core/tools/loader/control.py @@ -2,13 +2,17 @@ from __future__ import annotations import logging import uuid -from typing import Optional +from typing import Optional, Any import ayon_api from ayon_core.settings import get_project_settings from ayon_core.pipeline import get_current_host_name -from ayon_core.lib import NestedCacheItem, CacheItem, filter_profiles +from ayon_core.lib import ( + NestedCacheItem, + CacheItem, + filter_profiles, +) from ayon_core.lib.events import QueuedEventSystem from ayon_core.pipeline import Anatomy, get_current_context from ayon_core.host import ILoadHost @@ -18,12 +22,14 @@ from ayon_core.tools.common_models import ( ThumbnailsModel, TagItem, ProductTypeIconMapping, + UsersModel, ) from .abstract import ( BackendLoaderController, FrontendLoaderController, - ProductTypesFilter + ProductTypesFilter, + ActionItem, ) from .models import ( SelectionModel, @@ -32,6 +38,8 @@ from .models import ( SiteSyncModel ) +NOT_SET = object() + class ExpectedSelection: def __init__(self, controller): @@ -124,6 +132,7 @@ class LoaderController(BackendLoaderController, FrontendLoaderController): self._loader_actions_model = LoaderActionsModel(self) self._thumbnails_model = ThumbnailsModel() self._sitesync_model = SiteSyncModel(self) + self._users_model = UsersModel(self) @property def log(self): @@ -160,6 +169,7 @@ class LoaderController(BackendLoaderController, FrontendLoaderController): self._projects_model.reset() self._thumbnails_model.reset() self._sitesync_model.reset() + self._users_model.reset() self._projects_model.refresh() @@ -235,6 +245,17 @@ class LoaderController(BackendLoaderController, FrontendLoaderController): output[folder_id] = label return output + def get_my_tasks_entity_ids( + self, project_name: str + ) -> dict[str, list[str]]: + username = self._users_model.get_current_username() + assignees = [] + if username: + assignees.append(username) + return self._hierarchy_model.get_entity_ids_for_assignees( + project_name, assignees + ) + def get_available_tags_by_entity_type( self, project_name: str ) -> dict[str, list[str]]: @@ -296,45 +317,47 @@ class LoaderController(BackendLoaderController, FrontendLoaderController): project_name, product_ids, group_name ) - def get_versions_action_items(self, project_name, version_ids): - return self._loader_actions_model.get_versions_action_items( - project_name, version_ids) - - def get_representations_action_items( - self, project_name, representation_ids): - action_items = ( - self._loader_actions_model.get_representations_action_items( - project_name, representation_ids) + def get_action_items( + self, + project_name: str, + entity_ids: set[str], + entity_type: str, + ) -> list[ActionItem]: + action_items = self._loader_actions_model.get_action_items( + project_name, entity_ids, entity_type ) - action_items.extend(self._sitesync_model.get_sitesync_action_items( - project_name, representation_ids) + site_sync_items = self._sitesync_model.get_sitesync_action_items( + project_name, entity_ids, entity_type ) - + action_items.extend(site_sync_items) return action_items def trigger_action_item( self, - identifier, - options, - project_name, - version_ids, - representation_ids + identifier: str, + project_name: str, + selected_ids: set[str], + selected_entity_type: str, + data: Optional[dict[str, Any]], + options: dict[str, Any], + form_values: dict[str, Any], ): if self._sitesync_model.is_sitesync_action(identifier): self._sitesync_model.trigger_action_item( - identifier, project_name, - representation_ids + data, ) return self._loader_actions_model.trigger_action_item( - identifier, - options, - project_name, - version_ids, - representation_ids + identifier=identifier, + project_name=project_name, + selected_ids=selected_ids, + selected_entity_type=selected_entity_type, + data=data, + options=options, + form_values=form_values, ) # Selection model wrappers @@ -476,20 +499,6 @@ class LoaderController(BackendLoaderController, FrontendLoaderController): def is_standard_projects_filter_enabled(self): return self._host is not None - def _get_project_anatomy(self, project_name): - if not project_name: - return None - cache = self._project_anatomy_cache[project_name] - if not cache.is_valid: - cache.update_data(Anatomy(project_name)) - return cache.get_data() - - def _create_event_system(self): - return QueuedEventSystem() - - def _emit_event(self, topic, data=None): - self._event_system.emit(topic, data or {}, "controller") - def get_product_types_filter(self): output = ProductTypesFilter( is_allow_list=False, @@ -545,3 +554,17 @@ class LoaderController(BackendLoaderController, FrontendLoaderController): product_types=profile["filter_product_types"] ) return output + + def _create_event_system(self): + return QueuedEventSystem() + + def _emit_event(self, topic, data=None): + self._event_system.emit(topic, data or {}, "controller") + + def _get_project_anatomy(self, project_name): + if not project_name: + return None + cache = self._project_anatomy_cache[project_name] + if not cache.is_valid: + cache.update_data(Anatomy(project_name)) + return cache.get_data() diff --git a/client/ayon_core/tools/loader/models/actions.py b/client/ayon_core/tools/loader/models/actions.py index b792f92dfd..3db1792247 100644 --- a/client/ayon_core/tools/loader/models/actions.py +++ b/client/ayon_core/tools/loader/models/actions.py @@ -5,10 +5,16 @@ import traceback import inspect import collections import uuid +from typing import Optional, Callable, Any import ayon_api -from ayon_core.lib import NestedCacheItem +from ayon_core.lib import NestedCacheItem, Logger +from ayon_core.pipeline.actions import ( + LoaderActionsContext, + LoaderActionSelection, + SelectionEntitiesCache, +) from ayon_core.pipeline.load import ( discover_loader_plugins, ProductLoaderPlugin, @@ -23,6 +29,7 @@ from ayon_core.pipeline.load import ( from ayon_core.tools.loader.abstract import ActionItem ACTIONS_MODEL_SENDER = "actions.model" +LOADER_PLUGIN_ID = "__loader_plugin__" NOT_SET = object() @@ -44,6 +51,7 @@ class LoaderActionsModel: loaders_cache_lifetime = 30 def __init__(self, controller): + self._log = Logger.get_logger(self.__class__.__name__) self._controller = controller self._current_context_project = NOT_SET self._loaders_by_identifier = NestedCacheItem( @@ -52,6 +60,15 @@ class LoaderActionsModel: levels=1, lifetime=self.loaders_cache_lifetime) self._repre_loaders = NestedCacheItem( levels=1, lifetime=self.loaders_cache_lifetime) + self._loader_actions = LoaderActionsContext() + + self._projects_cache = NestedCacheItem(levels=1, lifetime=60) + self._folders_cache = NestedCacheItem(levels=2, lifetime=300) + self._tasks_cache = NestedCacheItem(levels=2, lifetime=300) + self._products_cache = NestedCacheItem(levels=2, lifetime=300) + self._versions_cache = NestedCacheItem(levels=2, lifetime=1200) + self._representations_cache = NestedCacheItem(levels=2, lifetime=1200) + self._repre_parents_cache = NestedCacheItem(levels=2, lifetime=1200) def reset(self): """Reset the model with all cached items.""" @@ -60,64 +77,58 @@ class LoaderActionsModel: self._loaders_by_identifier.reset() self._product_loaders.reset() self._repre_loaders.reset() + self._loader_actions.reset() - def get_versions_action_items(self, project_name, version_ids): - """Get action items for given version ids. + self._folders_cache.reset() + self._tasks_cache.reset() + self._products_cache.reset() + self._versions_cache.reset() + self._representations_cache.reset() + self._repre_parents_cache.reset() - Args: - project_name (str): Project name. - version_ids (Iterable[str]): Version ids. + def get_action_items( + self, + project_name: str, + entity_ids: set[str], + entity_type: str, + ) -> list[ActionItem]: + version_context_by_id = {} + repre_context_by_id = {} + if entity_type == "representation": + ( + version_context_by_id, + repre_context_by_id + ) = self._contexts_for_representations(project_name, entity_ids) - Returns: - list[ActionItem]: List of action items. - """ + if entity_type == "version": + ( + version_context_by_id, + repre_context_by_id + ) = self._contexts_for_versions(project_name, entity_ids) - ( - version_context_by_id, - repre_context_by_id - ) = self._contexts_for_versions( - project_name, - version_ids - ) - return self._get_action_items_for_contexts( + action_items = self._get_action_items_for_contexts( project_name, version_context_by_id, repre_context_by_id ) - - def get_representations_action_items( - self, project_name, representation_ids - ): - """Get action items for given representation ids. - - Args: - project_name (str): Project name. - representation_ids (Iterable[str]): Representation ids. - - Returns: - list[ActionItem]: List of action items. - """ - - ( - product_context_by_id, - repre_context_by_id - ) = self._contexts_for_representations( + action_items.extend(self._get_loader_action_items( project_name, - representation_ids - ) - return self._get_action_items_for_contexts( - project_name, - product_context_by_id, - repre_context_by_id - ) + entity_ids, + entity_type, + version_context_by_id, + repre_context_by_id, + )) + return action_items def trigger_action_item( self, - identifier, - options, - project_name, - version_ids, - representation_ids + identifier: str, + project_name: str, + selected_ids: set[str], + selected_entity_type: str, + data: Optional[dict[str, Any]], + options: dict[str, Any], + form_values: dict[str, Any], ): """Trigger action by identifier. @@ -128,15 +139,21 @@ class LoaderActionsModel: happened. Args: - identifier (str): Loader identifier. - options (dict[str, Any]): Loader option values. + identifier (str): Plugin identifier. project_name (str): Project name. - version_ids (Iterable[str]): Version ids. - representation_ids (Iterable[str]): Representation ids. - """ + selected_ids (set[str]): Selected entity ids. + selected_entity_type (str): Selected entity type. + data (Optional[dict[str, Any]]): Additional action item data. + options (dict[str, Any]): Loader option values. + form_values (dict[str, Any]): Form values. + """ event_data = { "identifier": identifier, + "project_name": project_name, + "selected_ids": list(selected_ids), + "selected_entity_type": selected_entity_type, + "data": data, "id": uuid.uuid4().hex, } self._controller.emit_event( @@ -144,24 +161,60 @@ class LoaderActionsModel: event_data, ACTIONS_MODEL_SENDER, ) - loader = self._get_loader_by_identifier(project_name, identifier) - if representation_ids is not None: - error_info = self._trigger_representation_loader( - loader, - options, - project_name, - representation_ids, + if identifier != LOADER_PLUGIN_ID: + result = None + crashed = False + try: + result = self._loader_actions.execute_action( + identifier=identifier, + selection=LoaderActionSelection( + project_name, + selected_ids, + selected_entity_type, + ), + data=data, + form_values=form_values, + ) + + except Exception: + crashed = True + self._log.warning( + f"Failed to execute action '{identifier}'", + exc_info=True, + ) + + event_data["result"] = result + event_data["crashed"] = crashed + self._controller.emit_event( + "loader.action.finished", + event_data, + ACTIONS_MODEL_SENDER, ) - elif version_ids is not None: + return + + loader = self._get_loader_by_identifier( + project_name, data["loader"] + ) + entity_type = data["entity_type"] + entity_ids = data["entity_ids"] + if entity_type == "version": error_info = self._trigger_version_loader( loader, options, project_name, - version_ids, + entity_ids, + ) + elif entity_type == "representation": + error_info = self._trigger_representation_loader( + loader, + options, + project_name, + entity_ids, ) else: raise NotImplementedError( - "Invalid arguments to trigger action item") + f"Invalid entity type '{entity_type}' to trigger action item" + ) event_data["error_info"] = error_info self._controller.emit_event( @@ -276,28 +329,26 @@ class LoaderActionsModel: self, loader, contexts, - project_name, - folder_ids=None, - product_ids=None, - version_ids=None, - representation_ids=None, + entity_ids, + entity_type, repre_name=None, ): label = self._get_action_label(loader) if repre_name: - label = "{} ({})".format(label, repre_name) + label = f"{label} ({repre_name})" return ActionItem( - get_loader_identifier(loader), + LOADER_PLUGIN_ID, + data={ + "entity_ids": entity_ids, + "entity_type": entity_type, + "loader": get_loader_identifier(loader), + }, label=label, + group_label=None, icon=self._get_action_icon(loader), tooltip=self._get_action_tooltip(loader), - options=loader.get_options(contexts), order=loader.order, - project_name=project_name, - folder_ids=folder_ids, - product_ids=product_ids, - version_ids=version_ids, - representation_ids=representation_ids, + options=loader.get_options(contexts), ) def _get_loaders(self, project_name): @@ -351,15 +402,6 @@ class LoaderActionsModel: loaders_by_identifier = loaders_by_identifier_c.get_data() return loaders_by_identifier.get(identifier) - def _actions_sorter(self, action_item): - """Sort the Loaders by their order and then their name. - - Returns: - tuple[int, str]: Sort keys. - """ - - return action_item.order, action_item.label - def _contexts_for_versions(self, project_name, version_ids): """Get contexts for given version ids. @@ -385,8 +427,8 @@ class LoaderActionsModel: if not project_name and not version_ids: return version_context_by_id, repre_context_by_id - version_entities = ayon_api.get_versions( - project_name, version_ids=version_ids + version_entities = self._get_versions( + project_name, version_ids ) version_entities_by_id = {} version_entities_by_product_id = collections.defaultdict(list) @@ -397,18 +439,18 @@ class LoaderActionsModel: version_entities_by_product_id[product_id].append(version_entity) _product_ids = set(version_entities_by_product_id.keys()) - _product_entities = ayon_api.get_products( - project_name, product_ids=_product_ids + _product_entities = self._get_products( + project_name, _product_ids ) product_entities_by_id = {p["id"]: p for p in _product_entities} _folder_ids = {p["folderId"] for p in product_entities_by_id.values()} - _folder_entities = ayon_api.get_folders( - project_name, folder_ids=_folder_ids + _folder_entities = self._get_folders( + project_name, _folder_ids ) folder_entities_by_id = {f["id"]: f for f in _folder_entities} - project_entity = ayon_api.get_project(project_name) + project_entity = self._get_project(project_name) for version_id, version_entity in version_entities_by_id.items(): product_id = version_entity["productId"] @@ -422,8 +464,15 @@ class LoaderActionsModel: "version": version_entity, } - repre_entities = ayon_api.get_representations( - project_name, version_ids=version_ids) + all_repre_ids = set() + for repre_ids in self._get_repre_ids_by_version_ids( + project_name, version_ids + ).values(): + all_repre_ids |= repre_ids + + repre_entities = self._get_representations( + project_name, all_repre_ids + ) for repre_entity in repre_entities: version_id = repre_entity["versionId"] version_entity = version_entities_by_id[version_id] @@ -459,49 +508,54 @@ class LoaderActionsModel: Returns: tuple[list[dict[str, Any]], list[dict[str, Any]]]: Version and representation contexts. - """ - product_context_by_id = {} + """ + version_context_by_id = {} repre_context_by_id = {} if not project_name and not repre_ids: - return product_context_by_id, repre_context_by_id + return version_context_by_id, repre_context_by_id - repre_entities = list(ayon_api.get_representations( - project_name, representation_ids=repre_ids - )) + repre_entities = self._get_representations( + project_name, repre_ids + ) version_ids = {r["versionId"] for r in repre_entities} - version_entities = ayon_api.get_versions( - project_name, version_ids=version_ids + version_entities = self._get_versions( + project_name, version_ids ) version_entities_by_id = { v["id"]: v for v in version_entities } product_ids = {v["productId"] for v in version_entities_by_id.values()} - product_entities = ayon_api.get_products( - project_name, product_ids=product_ids + product_entities = self._get_products( + project_name, product_ids + ) product_entities_by_id = { p["id"]: p for p in product_entities } folder_ids = {p["folderId"] for p in product_entities_by_id.values()} - folder_entities = ayon_api.get_folders( - project_name, folder_ids=folder_ids + folder_entities = self._get_folders( + project_name, folder_ids ) folder_entities_by_id = { f["id"]: f for f in folder_entities } - project_entity = ayon_api.get_project(project_name) + project_entity = self._get_project(project_name) - for product_id, product_entity in product_entities_by_id.items(): + version_context_by_id = {} + for version_id, version_entity in version_entities_by_id.items(): + product_id = version_entity["productId"] + product_entity = product_entities_by_id[product_id] folder_id = product_entity["folderId"] folder_entity = folder_entities_by_id[folder_id] - product_context_by_id[product_id] = { + version_context_by_id[version_id] = { "project": project_entity, "folder": folder_entity, "product": product_entity, + "version": version_entity, } for repre_entity in repre_entities: @@ -519,7 +573,125 @@ class LoaderActionsModel: "version": version_entity, "representation": repre_entity, } - return product_context_by_id, repre_context_by_id + return version_context_by_id, repre_context_by_id + + def _get_project(self, project_name: str) -> dict[str, Any]: + cache = self._projects_cache[project_name] + if not cache.is_valid: + cache.update_data(ayon_api.get_project(project_name)) + return cache.get_data() + + def _get_folders( + self, project_name: str, folder_ids: set[str] + ) -> list[dict[str, Any]]: + """Get folders by ids.""" + return self._get_entities( + project_name, + folder_ids, + self._folders_cache, + ayon_api.get_folders, + "folder_ids", + ) + + def _get_products( + self, project_name: str, product_ids: set[str] + ) -> list[dict[str, Any]]: + """Get products by ids.""" + return self._get_entities( + project_name, + product_ids, + self._products_cache, + ayon_api.get_products, + "product_ids", + ) + + def _get_versions( + self, project_name: str, version_ids: set[str] + ) -> list[dict[str, Any]]: + """Get versions by ids.""" + return self._get_entities( + project_name, + version_ids, + self._versions_cache, + ayon_api.get_versions, + "version_ids", + ) + + def _get_representations( + self, project_name: str, representation_ids: set[str] + ) -> list[dict[str, Any]]: + """Get representations by ids.""" + return self._get_entities( + project_name, + representation_ids, + self._representations_cache, + ayon_api.get_representations, + "representation_ids", + ) + + def _get_repre_ids_by_version_ids( + self, project_name: str, version_ids: set[str] + ) -> dict[str, set[str]]: + output = {} + if not version_ids: + return output + + project_cache = self._repre_parents_cache[project_name] + missing_ids = set() + for version_id in version_ids: + cache = project_cache[version_id] + if cache.is_valid: + output[version_id] = cache.get_data() + else: + missing_ids.add(version_id) + + if missing_ids: + repre_cache = self._representations_cache[project_name] + repres_by_parent_id = collections.defaultdict(list) + for repre in ayon_api.get_representations( + project_name, version_ids=missing_ids + ): + version_id = repre["versionId"] + repre_cache[repre["id"]].update_data(repre) + repres_by_parent_id[version_id].append(repre) + + for version_id, repres in repres_by_parent_id.items(): + repre_ids = { + repre["id"] + for repre in repres + } + output[version_id] = set(repre_ids) + project_cache[version_id].update_data(repre_ids) + + return output + + def _get_entities( + self, + project_name: str, + entity_ids: set[str], + cache: NestedCacheItem, + getter: Callable, + filter_arg: str, + ) -> list[dict[str, Any]]: + entities = [] + if not entity_ids: + return entities + + missing_ids = set() + project_cache = cache[project_name] + for entity_id in entity_ids: + entity_cache = project_cache[entity_id] + if entity_cache.is_valid: + entities.append(entity_cache.get_data()) + else: + missing_ids.add(entity_id) + + if missing_ids: + for entity in getter(project_name, **{filter_arg: missing_ids}): + entities.append(entity) + entity_id = entity["id"] + project_cache[entity_id].update_data(entity) + return entities def _get_action_items_for_contexts( self, @@ -557,51 +729,137 @@ class LoaderActionsModel: if not filtered_repre_contexts: continue - repre_ids = set() - repre_version_ids = set() - repre_product_ids = set() - repre_folder_ids = set() - for repre_context in filtered_repre_contexts: - repre_ids.add(repre_context["representation"]["id"]) - repre_product_ids.add(repre_context["product"]["id"]) - repre_version_ids.add(repre_context["version"]["id"]) - repre_folder_ids.add(repre_context["folder"]["id"]) + repre_ids = { + repre_context["representation"]["id"] + for repre_context in filtered_repre_contexts + } item = self._create_loader_action_item( loader, repre_contexts, - project_name=project_name, - folder_ids=repre_folder_ids, - product_ids=repre_product_ids, - version_ids=repre_version_ids, - representation_ids=repre_ids, + repre_ids, + "representation", repre_name=repre_name, ) action_items.append(item) # Product Loaders. - version_ids = set(version_context_by_id.keys()) product_folder_ids = set() product_ids = set() for product_context in version_context_by_id.values(): product_ids.add(product_context["product"]["id"]) product_folder_ids.add(product_context["folder"]["id"]) + version_ids = set(version_context_by_id.keys()) version_contexts = list(version_context_by_id.values()) for loader in product_loaders: item = self._create_loader_action_item( loader, version_contexts, - project_name=project_name, - folder_ids=product_folder_ids, - product_ids=product_ids, - version_ids=version_ids, + version_ids, + "version", ) action_items.append(item) - - action_items.sort(key=self._actions_sorter) return action_items + def _get_loader_action_items( + self, + project_name: str, + entity_ids: set[str], + entity_type: str, + version_context_by_id: dict[str, dict[str, Any]], + repre_context_by_id: dict[str, dict[str, Any]], + ) -> list[ActionItem]: + """ + + Args: + project_name (str): Project name. + entity_ids (set[str]): Selected entity ids. + entity_type (str): Selected entity type. + version_context_by_id (dict[str, dict[str, Any]]): Version context + by id. + repre_context_by_id (dict[str, dict[str, Any]]): Representation + context by id. + + Returns: + list[ActionItem]: List of action items. + + """ + entities_cache = self._prepare_entities_cache( + project_name, + entity_type, + version_context_by_id, + repre_context_by_id, + ) + selection = LoaderActionSelection( + project_name, + entity_ids, + entity_type, + entities_cache=entities_cache + ) + items = [] + for action in self._loader_actions.get_action_items(selection): + items.append(ActionItem( + action.identifier, + label=action.label, + group_label=action.group_label, + icon=action.icon, + tooltip=None, # action.tooltip, + order=action.order, + data=action.data, + options=None, # action.options, + )) + return items + + def _prepare_entities_cache( + self, + project_name: str, + entity_type: str, + version_context_by_id: dict[str, dict[str, Any]], + repre_context_by_id: dict[str, dict[str, Any]], + ): + project_entity = None + folders_by_id = {} + products_by_id = {} + versions_by_id = {} + representations_by_id = {} + for context in version_context_by_id.values(): + if project_entity is None: + project_entity = context["project"] + folder_entity = context["folder"] + product_entity = context["product"] + version_entity = context["version"] + folders_by_id[folder_entity["id"]] = folder_entity + products_by_id[product_entity["id"]] = product_entity + versions_by_id[version_entity["id"]] = version_entity + + for context in repre_context_by_id.values(): + repre_entity = context["representation"] + representations_by_id[repre_entity["id"]] = repre_entity + + # Mapping has to be for all child entities which is available for + # representations only if version is selected + representation_ids_by_version_id = {} + if entity_type == "version": + representation_ids_by_version_id = { + version_id: set() + for version_id in versions_by_id + } + for context in repre_context_by_id.values(): + repre_entity = context["representation"] + v_id = repre_entity["versionId"] + representation_ids_by_version_id[v_id].add(repre_entity["id"]) + + return SelectionEntitiesCache( + project_name, + project_entity=project_entity, + folders_by_id=folders_by_id, + products_by_id=products_by_id, + versions_by_id=versions_by_id, + representations_by_id=representations_by_id, + representation_ids_by_version_id=representation_ids_by_version_id, + ) + def _trigger_version_loader( self, loader, @@ -634,12 +892,12 @@ class LoaderActionsModel: project_name, version_ids=version_ids )) product_ids = {v["productId"] for v in version_entities} - product_entities = ayon_api.get_products( - project_name, product_ids=product_ids + product_entities = self._get_products( + project_name, product_ids ) product_entities_by_id = {p["id"]: p for p in product_entities} folder_ids = {p["folderId"] for p in product_entities_by_id.values()} - folder_entities = ayon_api.get_folders( + folder_entities = self._get_folders( project_name, folder_ids=folder_ids ) folder_entities_by_id = {f["id"]: f for f in folder_entities} diff --git a/client/ayon_core/tools/loader/models/products.py b/client/ayon_core/tools/loader/models/products.py index 7915a75bcf..83a017613d 100644 --- a/client/ayon_core/tools/loader/models/products.py +++ b/client/ayon_core/tools/loader/models/products.py @@ -7,6 +7,7 @@ from typing import TYPE_CHECKING, Iterable, Optional import arrow import ayon_api +from ayon_api.graphql_queries import project_graphql_query from ayon_api.operations import OperationsSession from ayon_core.lib import NestedCacheItem @@ -202,7 +203,7 @@ class ProductsModel: cache = self._product_type_items_cache[project_name] if not cache.is_valid: icons_mapping = self._get_product_type_icons(project_name) - product_types = ayon_api.get_project_product_types(project_name) + product_types = self._get_project_product_types(project_name) cache.update_data([ ProductTypeItem( product_type["name"], @@ -462,6 +463,24 @@ class ProductsModel: PRODUCTS_MODEL_SENDER ) + def _get_project_product_types(self, project_name: str) -> list[dict]: + """This is a temporary solution for product types fetching. + + There was a bug in ayon_api.get_project(...) which did not use GraphQl + but REST instead. That is fixed in ayon-python-api 1.2.6 that will + be as part of ayon launcher 1.4.3 release. + + """ + if not project_name: + return [] + query = project_graphql_query({"productTypes.name"}) + query.set_variable_value("projectName", project_name) + parsed_data = query.query(ayon_api.get_server_api_connection()) + project = parsed_data["project"] + if project is None: + return [] + return project["productTypes"] + def _get_product_type_icons( self, project_name: Optional[str] ) -> ProductTypeIconMapping: diff --git a/client/ayon_core/tools/loader/models/sitesync.py b/client/ayon_core/tools/loader/models/sitesync.py index 3a54a1b5f8..a7bbda18a3 100644 --- a/client/ayon_core/tools/loader/models/sitesync.py +++ b/client/ayon_core/tools/loader/models/sitesync.py @@ -1,6 +1,7 @@ from __future__ import annotations import collections +from typing import Any from ayon_api import ( get_representations, @@ -246,26 +247,32 @@ class SiteSyncModel: output[repre_id] = repre_cache.get_data() return output - def get_sitesync_action_items(self, project_name, representation_ids): + def get_sitesync_action_items( + self, project_name, entity_ids, entity_type + ): """ Args: project_name (str): Project name. - representation_ids (Iterable[str]): Representation ids. + entity_ids (set[str]): Selected entity ids. + entity_type (str): Selected entity type. Returns: list[ActionItem]: Actions that can be shown in loader. + """ + if entity_type != "representation": + return [] if not self.is_sitesync_enabled(project_name): return [] repres_status = self.get_representations_sync_status( - project_name, representation_ids + project_name, entity_ids ) repre_ids_per_identifier = collections.defaultdict(set) - for repre_id in representation_ids: + for repre_id in entity_ids: repre_status = repres_status[repre_id] local_status, remote_status = repre_status @@ -293,36 +300,32 @@ class SiteSyncModel: return action_items - def is_sitesync_action(self, identifier): + def is_sitesync_action(self, identifier: str) -> bool: """Should be `identifier` handled by SiteSync. Args: - identifier (str): Action identifier. + identifier (str): Plugin identifier. Returns: bool: Should action be handled by SiteSync. - """ - return identifier in { - UPLOAD_IDENTIFIER, - DOWNLOAD_IDENTIFIER, - REMOVE_IDENTIFIER, - } + """ + return identifier == "sitesync.loader.action" def trigger_action_item( self, - identifier, - project_name, - representation_ids + project_name: str, + data: dict[str, Any], ): """Resets status for site_name or remove local files. Args: - identifier (str): Action identifier. project_name (str): Project name. - representation_ids (Iterable[str]): Representation ids. - """ + data (dict[str, Any]): Action item data. + """ + representation_ids = data["representation_ids"] + action_identifier = data["action_identifier"] active_site = self.get_active_site(project_name) remote_site = self.get_remote_site(project_name) @@ -346,17 +349,17 @@ class SiteSyncModel: for repre_id in representation_ids: repre_entity = repre_entities_by_id.get(repre_id) product_type = product_type_by_repre_id[repre_id] - if identifier == DOWNLOAD_IDENTIFIER: + if action_identifier == DOWNLOAD_IDENTIFIER: self._add_site( project_name, repre_entity, active_site, product_type ) - elif identifier == UPLOAD_IDENTIFIER: + elif action_identifier == UPLOAD_IDENTIFIER: self._add_site( project_name, repre_entity, remote_site, product_type ) - elif identifier == REMOVE_IDENTIFIER: + elif action_identifier == REMOVE_IDENTIFIER: self._sitesync_addon.remove_site( project_name, repre_id, @@ -476,27 +479,27 @@ class SiteSyncModel: self, project_name, representation_ids, - identifier, + action_identifier, label, tooltip, icon_name ): return ActionItem( - identifier, - label, + "sitesync.loader.action", + label=label, + group_label=None, icon={ "type": "awesome-font", "name": icon_name, "color": "#999999" }, tooltip=tooltip, - options={}, order=1, - project_name=project_name, - folder_ids=[], - product_ids=[], - version_ids=[], - representation_ids=representation_ids, + data={ + "representation_ids": representation_ids, + "action_identifier": action_identifier, + }, + options=None, ) def _add_site(self, project_name, repre_entity, site_name, product_type): diff --git a/client/ayon_core/tools/loader/ui/actions_utils.py b/client/ayon_core/tools/loader/ui/actions_utils.py index b601cd95bd..cf39bc348c 100644 --- a/client/ayon_core/tools/loader/ui/actions_utils.py +++ b/client/ayon_core/tools/loader/ui/actions_utils.py @@ -1,6 +1,7 @@ import uuid +from typing import Optional, Any -from qtpy import QtWidgets, QtGui +from qtpy import QtWidgets, QtGui, QtCore import qtawesome from ayon_core.lib.attribute_definitions import AbstractAttrDef @@ -11,9 +12,29 @@ from ayon_core.tools.utils.widgets import ( OptionDialog, ) from ayon_core.tools.utils import get_qt_icon +from ayon_core.tools.loader.abstract import ActionItem -def show_actions_menu(action_items, global_point, one_item_selected, parent): +def _actions_sorter(item: tuple[ActionItem, str, str]): + """Sort the Loaders by their order and then their name. + + Returns: + tuple[int, str]: Sort keys. + + """ + action_item, group_label, label = item + if group_label is None: + group_label = label + label = "" + return action_item.order, group_label, label + + +def show_actions_menu( + action_items: list[ActionItem], + global_point: QtCore.QPoint, + one_item_selected: bool, + parent: QtWidgets.QWidget, +) -> tuple[Optional[ActionItem], Optional[dict[str, Any]]]: selected_action_item = None selected_options = None @@ -26,8 +47,16 @@ def show_actions_menu(action_items, global_point, one_item_selected, parent): menu = OptionalMenu(parent) - action_items_by_id = {} + action_items_with_labels = [] for action_item in action_items: + action_items_with_labels.append( + (action_item, action_item.group_label, action_item.label) + ) + + group_menu_by_label = {} + action_items_by_id = {} + for item in sorted(action_items_with_labels, key=_actions_sorter): + action_item, _, _ = item item_id = uuid.uuid4().hex action_items_by_id[item_id] = action_item item_options = action_item.options @@ -50,7 +79,18 @@ def show_actions_menu(action_items, global_point, one_item_selected, parent): action.setData(item_id) - menu.addAction(action) + group_label = action_item.group_label + if group_label: + group_menu = group_menu_by_label.get(group_label) + if group_menu is None: + group_menu = OptionalMenu(group_label, menu) + if icon is not None: + group_menu.setIcon(icon) + menu.addMenu(group_menu) + group_menu_by_label[group_label] = group_menu + group_menu.addAction(action) + else: + menu.addAction(action) action = menu.exec_(global_point) if action is not None: diff --git a/client/ayon_core/tools/loader/ui/folders_widget.py b/client/ayon_core/tools/loader/ui/folders_widget.py index f238eabcef..6de0b17ea2 100644 --- a/client/ayon_core/tools/loader/ui/folders_widget.py +++ b/client/ayon_core/tools/loader/ui/folders_widget.py @@ -1,11 +1,11 @@ +from typing import Optional + import qtpy from qtpy import QtWidgets, QtCore, QtGui -from ayon_core.tools.utils import ( - RecursiveSortFilterProxyModel, - DeselectableTreeView, -) from ayon_core.style import get_objected_colors +from ayon_core.tools.utils import DeselectableTreeView +from ayon_core.tools.utils.folders_widget import FoldersProxyModel from ayon_core.tools.utils import ( FoldersQtModel, @@ -260,7 +260,7 @@ class LoaderFoldersWidget(QtWidgets.QWidget): QtWidgets.QAbstractItemView.ExtendedSelection) folders_model = LoaderFoldersModel(controller) - folders_proxy_model = RecursiveSortFilterProxyModel() + folders_proxy_model = FoldersProxyModel() folders_proxy_model.setSourceModel(folders_model) folders_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) @@ -314,6 +314,15 @@ class LoaderFoldersWidget(QtWidgets.QWidget): if name: self._folders_view.expandAll() + def set_folder_ids_filter(self, folder_ids: Optional[list[str]]): + """Set filter of folder ids. + + Args: + folder_ids (list[str]): The list of folder ids. + + """ + self._folders_proxy_model.set_folder_ids_filter(folder_ids) + def set_merged_products_selection(self, items): """ diff --git a/client/ayon_core/tools/loader/ui/products_widget.py b/client/ayon_core/tools/loader/ui/products_widget.py index e5bb75a208..ddd6ce8554 100644 --- a/client/ayon_core/tools/loader/ui/products_widget.py +++ b/client/ayon_core/tools/loader/ui/products_widget.py @@ -420,8 +420,9 @@ class ProductsWidget(QtWidgets.QWidget): if version_id is not None: version_ids.add(version_id) - action_items = self._controller.get_versions_action_items( - project_name, version_ids) + action_items = self._controller.get_action_items( + project_name, version_ids, "version" + ) # Prepare global point where to show the menu global_point = self._products_view.mapToGlobal(point) @@ -437,11 +438,13 @@ class ProductsWidget(QtWidgets.QWidget): return self._controller.trigger_action_item( - action_item.identifier, - options, - action_item.project_name, - version_ids=action_item.version_ids, - representation_ids=action_item.representation_ids, + identifier=action_item.identifier, + project_name=project_name, + selected_ids=version_ids, + selected_entity_type="version", + data=action_item.data, + options=options, + form_values={}, ) def _on_selection_change(self): diff --git a/client/ayon_core/tools/loader/ui/repres_widget.py b/client/ayon_core/tools/loader/ui/repres_widget.py index d19ad306a3..33bbf46b34 100644 --- a/client/ayon_core/tools/loader/ui/repres_widget.py +++ b/client/ayon_core/tools/loader/ui/repres_widget.py @@ -384,8 +384,8 @@ class RepresentationsWidget(QtWidgets.QWidget): def _on_context_menu(self, point): repre_ids = self._get_selected_repre_ids() - action_items = self._controller.get_representations_action_items( - self._selected_project_name, repre_ids + action_items = self._controller.get_action_items( + self._selected_project_name, repre_ids, "representation" ) global_point = self._repre_view.mapToGlobal(point) result = show_actions_menu( @@ -399,9 +399,11 @@ class RepresentationsWidget(QtWidgets.QWidget): return self._controller.trigger_action_item( - action_item.identifier, - options, - action_item.project_name, - version_ids=action_item.version_ids, - representation_ids=action_item.representation_ids, + identifier=action_item.identifier, + project_name=self._selected_project_name, + selected_ids=repre_ids, + selected_entity_type="representation", + data=action_item.data, + options=options, + form_values={}, ) diff --git a/client/ayon_core/tools/loader/ui/tasks_widget.py b/client/ayon_core/tools/loader/ui/tasks_widget.py index cc7e2e9c95..3a38739cf0 100644 --- a/client/ayon_core/tools/loader/ui/tasks_widget.py +++ b/client/ayon_core/tools/loader/ui/tasks_widget.py @@ -1,11 +1,11 @@ import collections import hashlib +from typing import Optional from qtpy import QtWidgets, QtCore, QtGui from ayon_core.style import get_default_entity_icon_color from ayon_core.tools.utils import ( - RecursiveSortFilterProxyModel, DeselectableTreeView, TasksQtModel, TASKS_MODEL_SENDER_NAME, @@ -15,9 +15,11 @@ from ayon_core.tools.utils.tasks_widget import ( ITEM_NAME_ROLE, PARENT_ID_ROLE, TASK_TYPE_ROLE, + TasksProxyModel, ) from ayon_core.tools.utils.lib import RefreshThread, get_qt_icon + # Role that can't clash with default 'tasks_widget' roles FOLDER_LABEL_ROLE = QtCore.Qt.UserRole + 100 NO_TASKS_ID = "--no-task--" @@ -295,7 +297,7 @@ class LoaderTasksQtModel(TasksQtModel): return super().data(index, role) -class LoaderTasksProxyModel(RecursiveSortFilterProxyModel): +class LoaderTasksProxyModel(TasksProxyModel): def lessThan(self, left, right): if left.data(ITEM_ID_ROLE) == NO_TASKS_ID: return False @@ -303,6 +305,12 @@ class LoaderTasksProxyModel(RecursiveSortFilterProxyModel): return True return super().lessThan(left, right) + def filterAcceptsRow(self, row, parent_index): + source_index = self.sourceModel().index(row, 0, parent_index) + if source_index.data(ITEM_ID_ROLE) == NO_TASKS_ID: + return True + return super().filterAcceptsRow(row, parent_index) + class LoaderTasksWidget(QtWidgets.QWidget): refreshed = QtCore.Signal() @@ -363,6 +371,15 @@ class LoaderTasksWidget(QtWidgets.QWidget): if name: self._tasks_view.expandAll() + def set_task_ids_filter(self, task_ids: Optional[list[str]]): + """Set filter of folder ids. + + Args: + task_ids (list[str]): The list of folder ids. + + """ + self._tasks_proxy_model.set_task_ids_filter(task_ids) + def refresh(self): self._tasks_model.refresh() diff --git a/client/ayon_core/tools/loader/ui/window.py b/client/ayon_core/tools/loader/ui/window.py index df5beb708f..e4677a62d9 100644 --- a/client/ayon_core/tools/loader/ui/window.py +++ b/client/ayon_core/tools/loader/ui/window.py @@ -1,18 +1,24 @@ from __future__ import annotations +from typing import Optional + from qtpy import QtWidgets, QtCore, QtGui from ayon_core.resources import get_ayon_icon_filepath from ayon_core.style import load_stylesheet +from ayon_core.pipeline.actions import LoaderActionResult from ayon_core.tools.utils import ( - PlaceholderLineEdit, + MessageOverlayObject, ErrorMessageBox, ThumbnailPainterWidget, RefreshButton, GoToCurrentButton, + ProjectsCombobox, + get_qt_icon, + FoldersFiltersWidget, ) +from ayon_core.tools.attribute_defs import AttributeDefinitionsDialog from ayon_core.tools.utils.lib import center_window -from ayon_core.tools.utils import ProjectsCombobox from ayon_core.tools.common_models import StatusItem from ayon_core.tools.loader.abstract import ProductTypeItem from ayon_core.tools.loader.control import LoaderController @@ -141,6 +147,8 @@ class LoaderWindow(QtWidgets.QWidget): if controller is None: controller = LoaderController() + overlay_object = MessageOverlayObject(self) + main_splitter = QtWidgets.QSplitter(self) context_splitter = QtWidgets.QSplitter(main_splitter) @@ -170,15 +178,14 @@ class LoaderWindow(QtWidgets.QWidget): context_top_layout.addWidget(go_to_current_btn, 0) context_top_layout.addWidget(refresh_btn, 0) - folders_filter_input = PlaceholderLineEdit(context_widget) - folders_filter_input.setPlaceholderText("Folder name filter...") + filters_widget = FoldersFiltersWidget(context_widget) folders_widget = LoaderFoldersWidget(controller, context_widget) context_layout = QtWidgets.QVBoxLayout(context_widget) context_layout.setContentsMargins(0, 0, 0, 0) context_layout.addWidget(context_top_widget, 0) - context_layout.addWidget(folders_filter_input, 0) + context_layout.addWidget(filters_widget, 0) context_layout.addWidget(folders_widget, 1) tasks_widget = LoaderTasksWidget(controller, context_widget) @@ -247,9 +254,12 @@ class LoaderWindow(QtWidgets.QWidget): projects_combobox.refreshed.connect(self._on_projects_refresh) folders_widget.refreshed.connect(self._on_folders_refresh) products_widget.refreshed.connect(self._on_products_refresh) - folders_filter_input.textChanged.connect( + filters_widget.text_changed.connect( self._on_folder_filter_change ) + filters_widget.my_tasks_changed.connect( + self._on_my_tasks_checkbox_state_changed + ) search_bar.filter_changed.connect(self._on_filter_change) product_group_checkbox.stateChanged.connect( self._on_product_group_change @@ -294,6 +304,12 @@ class LoaderWindow(QtWidgets.QWidget): "controller.reset.finished", self._on_controller_reset_finish, ) + controller.register_event_callback( + "loader.action.finished", + self._on_loader_action_finished, + ) + + self._overlay_object = overlay_object self._group_dialog = ProductGroupDialog(controller, self) @@ -303,7 +319,7 @@ class LoaderWindow(QtWidgets.QWidget): self._refresh_btn = refresh_btn self._projects_combobox = projects_combobox - self._folders_filter_input = folders_filter_input + self._filters_widget = filters_widget self._folders_widget = folders_widget self._tasks_widget = tasks_widget @@ -406,6 +422,20 @@ class LoaderWindow(QtWidgets.QWidget): if self._reset_on_show: self.refresh() + def _show_toast_message( + self, + message: str, + success: bool = True, + message_id: Optional[str] = None, + ): + message_type = None + if not success: + message_type = "error" + + self._overlay_object.add_message( + message, message_type, message_id=message_id + ) + def _show_group_dialog(self): project_name = self._projects_combobox.get_selected_project_name() if not project_name: @@ -421,9 +451,21 @@ class LoaderWindow(QtWidgets.QWidget): self._group_dialog.set_product_ids(project_name, product_ids) self._group_dialog.show() - def _on_folder_filter_change(self, text): + def _on_folder_filter_change(self, text: str) -> None: self._folders_widget.set_name_filter(text) + def _on_my_tasks_checkbox_state_changed(self, enabled: bool) -> None: + folder_ids = None + task_ids = None + if enabled: + entity_ids = self._controller.get_my_tasks_entity_ids( + self._selected_project_name + ) + folder_ids = entity_ids["folder_ids"] + task_ids = entity_ids["task_ids"] + self._folders_widget.set_folder_ids_filter(folder_ids) + self._tasks_widget.set_task_ids_filter(task_ids) + def _on_product_group_change(self): self._products_widget.set_enable_grouping( self._product_group_checkbox.isChecked() @@ -485,6 +527,10 @@ class LoaderWindow(QtWidgets.QWidget): if not self._refresh_handler.project_refreshed: self._projects_combobox.refresh() self._update_filters() + # Update my tasks + self._on_my_tasks_checkbox_state_changed( + self._filters_widget.is_my_tasks_checked() + ) def _on_load_finished(self, event): error_info = event["error_info"] @@ -494,6 +540,77 @@ class LoaderWindow(QtWidgets.QWidget): box = LoadErrorMessageBox(error_info, self) box.show() + def _on_loader_action_finished(self, event): + crashed = event["crashed"] + if crashed: + self._show_toast_message( + "Action failed", + success=False, + ) + return + + result: Optional[LoaderActionResult] = event["result"] + if result is None: + return + + if result.message: + self._show_toast_message( + result.message, result.success + ) + + if result.form is None: + return + + form = result.form + dialog = AttributeDefinitionsDialog( + form.fields, + title=form.title, + parent=self, + ) + if result.form_values: + dialog.set_values(result.form_values) + submit_label = form.submit_label + submit_icon = form.submit_icon + cancel_label = form.cancel_label + cancel_icon = form.cancel_icon + + if submit_icon: + submit_icon = get_qt_icon(submit_icon) + if cancel_icon: + cancel_icon = get_qt_icon(cancel_icon) + + if submit_label: + dialog.set_submit_label(submit_label) + else: + dialog.set_submit_visible(False) + + if submit_icon: + dialog.set_submit_icon(submit_icon) + + if cancel_label: + dialog.set_cancel_label(cancel_label) + else: + dialog.set_cancel_visible(False) + + if cancel_icon: + dialog.set_cancel_icon(cancel_icon) + + dialog.setMinimumSize(300, 140) + result = dialog.exec_() + if result != QtWidgets.QDialog.Accepted: + return + + form_values = dialog.get_values() + self._controller.trigger_action_item( + identifier=event["identifier"], + project_name=event["project_name"], + selected_ids=event["selected_ids"], + selected_entity_type=event["selected_entity_type"], + options={}, + data=event["data"], + form_values=form_values, + ) + def _on_project_selection_changed(self, event): self._selected_project_name = event["project_name"] self._update_filters() diff --git a/client/ayon_core/tools/publisher/abstract.py b/client/ayon_core/tools/publisher/abstract.py index 14da15793d..bfd0948519 100644 --- a/client/ayon_core/tools/publisher/abstract.py +++ b/client/ayon_core/tools/publisher/abstract.py @@ -295,6 +295,21 @@ class AbstractPublisherFrontend(AbstractPublisherCommon): """Get folder id from folder path.""" pass + @abstractmethod + def get_my_tasks_entity_ids( + self, project_name: str + ) -> dict[str, list[str]]: + """Get entity ids for my tasks. + + Args: + project_name (str): Project name. + + Returns: + dict[str, list[str]]: Folder and task ids. + + """ + pass + # --- Create --- @abstractmethod def get_creator_items(self) -> Dict[str, "CreatorItem"]: diff --git a/client/ayon_core/tools/publisher/control.py b/client/ayon_core/tools/publisher/control.py index 038816c6fc..3d11131dc3 100644 --- a/client/ayon_core/tools/publisher/control.py +++ b/client/ayon_core/tools/publisher/control.py @@ -11,7 +11,11 @@ from ayon_core.pipeline import ( registered_host, get_process_id, ) -from ayon_core.tools.common_models import ProjectsModel, HierarchyModel +from ayon_core.tools.common_models import ( + ProjectsModel, + HierarchyModel, + UsersModel, +) from .models import ( PublishModel, @@ -101,6 +105,7 @@ class PublisherController( # Cacher of avalon documents self._projects_model = ProjectsModel(self) self._hierarchy_model = HierarchyModel(self) + self._users_model = UsersModel(self) @property def log(self): @@ -317,6 +322,17 @@ class PublisherController( return False return True + def get_my_tasks_entity_ids( + self, project_name: str + ) -> dict[str, list[str]]: + username = self._users_model.get_current_username() + assignees = [] + if username: + assignees.append(username) + return self._hierarchy_model.get_entity_ids_for_assignees( + project_name, assignees + ) + # --- Publish specific callbacks --- def get_context_title(self): """Get context title for artist shown at the top of main window.""" @@ -359,6 +375,7 @@ class PublisherController( self._emit_event("controller.reset.started") self._hierarchy_model.reset() + self._users_model.reset() # Publish part must be reset after plugins self._create_model.reset() diff --git a/client/ayon_core/tools/publisher/models/create.py b/client/ayon_core/tools/publisher/models/create.py index 5098826b8b..b8518a7de6 100644 --- a/client/ayon_core/tools/publisher/models/create.py +++ b/client/ayon_core/tools/publisher/models/create.py @@ -1,5 +1,6 @@ import logging import re +import copy from typing import ( Union, List, @@ -34,6 +35,7 @@ from ayon_core.pipeline.create import ( ConvertorsOperationFailed, ConvertorItem, ) + from ayon_core.tools.publisher.abstract import ( AbstractPublisherBackend, CardMessageTypes, @@ -1098,7 +1100,7 @@ class CreateModel: creator_attributes[key] = attr_def.default elif attr_def.is_value_valid(value): - creator_attributes[key] = value + creator_attributes[key] = copy.deepcopy(value) def _set_instances_publish_attr_values( self, instance_ids, plugin_name, key, value diff --git a/client/ayon_core/tools/publisher/models/publish.py b/client/ayon_core/tools/publisher/models/publish.py index 97070d106f..cd99a952e3 100644 --- a/client/ayon_core/tools/publisher/models/publish.py +++ b/client/ayon_core/tools/publisher/models/publish.py @@ -21,6 +21,7 @@ from ayon_core.pipeline.plugin_discover import DiscoverResult from ayon_core.pipeline.publish import ( get_publish_instance_label, PublishError, + filter_crashed_publish_paths, ) from ayon_core.tools.publisher.abstract import AbstractPublisherBackend @@ -107,11 +108,14 @@ class PublishReportMaker: creator_discover_result: Optional[DiscoverResult] = None, convertor_discover_result: Optional[DiscoverResult] = None, publish_discover_result: Optional[DiscoverResult] = None, + blocking_crashed_paths: Optional[list[str]] = None, ): self._create_discover_result: Union[DiscoverResult, None] = None self._convert_discover_result: Union[DiscoverResult, None] = None self._publish_discover_result: Union[DiscoverResult, None] = None + self._blocking_crashed_paths: list[str] = [] + self._all_instances_by_id: Dict[str, pyblish.api.Instance] = {} self._plugin_data_by_id: Dict[str, Any] = {} self._current_plugin_id: Optional[str] = None @@ -120,6 +124,7 @@ class PublishReportMaker: creator_discover_result, convertor_discover_result, publish_discover_result, + blocking_crashed_paths, ) def reset( @@ -127,12 +132,14 @@ class PublishReportMaker: creator_discover_result: Union[DiscoverResult, None], convertor_discover_result: Union[DiscoverResult, None], publish_discover_result: Union[DiscoverResult, None], + blocking_crashed_paths: list[str], ): """Reset report and clear all data.""" self._create_discover_result = creator_discover_result self._convert_discover_result = convertor_discover_result self._publish_discover_result = publish_discover_result + self._blocking_crashed_paths = blocking_crashed_paths self._all_instances_by_id = {} self._plugin_data_by_id = {} @@ -242,9 +249,10 @@ class PublishReportMaker: "instances": instances_details, "context": self._extract_context_data(publish_context), "crashed_file_paths": crashed_file_paths, + "blocking_crashed_paths": list(self._blocking_crashed_paths), "id": uuid.uuid4().hex, "created_at": now.isoformat(), - "report_version": "1.1.0", + "report_version": "1.1.1", } def _add_plugin_data_item(self, plugin: pyblish.api.Plugin): @@ -959,11 +967,16 @@ class PublishModel: self._publish_plugins_proxy = PublishPluginsProxy( publish_plugins ) - + blocking_crashed_paths = filter_crashed_publish_paths( + create_context.get_current_project_name(), + set(create_context.publish_discover_result.crashed_file_paths), + project_settings=create_context.get_current_project_settings(), + ) self._publish_report.reset( create_context.creator_discover_result, create_context.convertor_discover_result, create_context.publish_discover_result, + blocking_crashed_paths, ) for plugin in create_context.publish_plugins_mismatch_targets: self._publish_report.set_plugin_skipped(plugin.id) diff --git a/client/ayon_core/tools/publisher/publish_report_viewer/report_items.py b/client/ayon_core/tools/publisher/publish_report_viewer/report_items.py index a3c5a7a2fd..24955d18c3 100644 --- a/client/ayon_core/tools/publisher/publish_report_viewer/report_items.py +++ b/client/ayon_core/tools/publisher/publish_report_viewer/report_items.py @@ -139,3 +139,6 @@ class PublishReport: self.logs = logs self.crashed_plugin_paths = report_data["crashed_file_paths"] + self.blocking_crashed_paths = report_data.get( + "blocking_crashed_paths", [] + ) diff --git a/client/ayon_core/tools/publisher/publish_report_viewer/widgets.py b/client/ayon_core/tools/publisher/publish_report_viewer/widgets.py index 5fa1c04dc0..225dd15ade 100644 --- a/client/ayon_core/tools/publisher/publish_report_viewer/widgets.py +++ b/client/ayon_core/tools/publisher/publish_report_viewer/widgets.py @@ -7,6 +7,7 @@ from ayon_core.tools.utils import ( SeparatorWidget, IconButton, paint_image_with_color, + get_qt_icon, ) from ayon_core.resources import get_image_path from ayon_core.style import get_objected_colors @@ -46,10 +47,13 @@ def get_pretty_milliseconds(value): class PluginLoadReportModel(QtGui.QStandardItemModel): + _blocking_icon = None + def __init__(self): super().__init__() self._traceback_by_filepath = {} self._items_by_filepath = {} + self._blocking_crashed_paths = set() self._is_active = True self._need_refresh = False @@ -75,6 +79,7 @@ class PluginLoadReportModel(QtGui.QStandardItemModel): for filepath in to_remove: self._traceback_by_filepath.pop(filepath) + self._blocking_crashed_paths = set(report.blocking_crashed_paths) self._update_items() def _update_items(self): @@ -83,6 +88,7 @@ class PluginLoadReportModel(QtGui.QStandardItemModel): parent = self.invisibleRootItem() if not self._traceback_by_filepath: parent.removeRows(0, parent.rowCount()) + self._items_by_filepath = {} return new_items = [] @@ -91,12 +97,18 @@ class PluginLoadReportModel(QtGui.QStandardItemModel): set(self._items_by_filepath) - set(self._traceback_by_filepath) ) for filepath in self._traceback_by_filepath: - if filepath in self._items_by_filepath: - continue - item = QtGui.QStandardItem(filepath) - new_items.append(item) - new_items_by_filepath[filepath] = item - self._items_by_filepath[filepath] = item + item = self._items_by_filepath.get(filepath) + if item is None: + item = QtGui.QStandardItem(filepath) + new_items.append(item) + new_items_by_filepath[filepath] = item + self._items_by_filepath[filepath] = item + + icon = None + if filepath.replace("\\", "/") in self._blocking_crashed_paths: + icon = self._get_blocking_icon() + + item.setData(icon, QtCore.Qt.DecorationRole) if new_items: parent.appendRows(new_items) @@ -113,6 +125,16 @@ class PluginLoadReportModel(QtGui.QStandardItemModel): item = self._items_by_filepath.pop(filepath) parent.removeRow(item.row()) + @classmethod + def _get_blocking_icon(cls): + if cls._blocking_icon is None: + cls._blocking_icon = get_qt_icon({ + "type": "material-symbols", + "name": "block", + "color": "red", + }) + return cls._blocking_icon + class DetailWidget(QtWidgets.QTextEdit): def __init__(self, text, *args, **kwargs): @@ -856,7 +878,7 @@ class PublishReportViewerWidget(QtWidgets.QFrame): report = PublishReport(report_data) self.set_report(report) - def set_report(self, report): + def set_report(self, report: PublishReport) -> None: self._ignore_selection_changes = True self._report_item = report @@ -866,6 +888,10 @@ class PublishReportViewerWidget(QtWidgets.QFrame): self._logs_text_widget.set_report(report) self._plugin_load_report_widget.set_report(report) self._plugins_details_widget.set_report(report) + if report.blocking_crashed_paths: + self._details_tab_widget.setCurrentWidget( + self._plugin_load_report_widget + ) self._ignore_selection_changes = False diff --git a/client/ayon_core/tools/publisher/widgets/card_view_widgets.py b/client/ayon_core/tools/publisher/widgets/card_view_widgets.py index 84786a671e..a9abd56584 100644 --- a/client/ayon_core/tools/publisher/widgets/card_view_widgets.py +++ b/client/ayon_core/tools/publisher/widgets/card_view_widgets.py @@ -202,7 +202,7 @@ class ContextCardWidget(CardWidget): Is not visually under group widget and is always at the top of card view. """ - def __init__(self, parent): + def __init__(self, parent: QtWidgets.QWidget): super().__init__(parent) self._id = CONTEXT_ID @@ -211,7 +211,12 @@ class ContextCardWidget(CardWidget): icon_widget = PublishPixmapLabel(None, self) icon_widget.setObjectName("ProductTypeIconLabel") - label_widget = QtWidgets.QLabel(CONTEXT_LABEL, self) + label_widget = QtWidgets.QLabel(f"{CONTEXT_LABEL}", self) + # HTML text will cause that label start catch mouse clicks + # - disabling with changing interaction flag + label_widget.setTextInteractionFlags( + QtCore.Qt.NoTextInteraction + ) icon_layout = QtWidgets.QHBoxLayout() icon_layout.setContentsMargins(5, 5, 5, 5) @@ -288,6 +293,8 @@ class InstanceCardWidget(CardWidget): self._last_product_name = None self._last_variant = None self._last_label = None + self._last_folder_path = None + self._last_task_name = None icon_widget = IconValuePixmapLabel(group_icon, self) icon_widget.setObjectName("ProductTypeIconLabel") @@ -383,29 +390,54 @@ class InstanceCardWidget(CardWidget): self._icon_widget.setVisible(valid) self._context_warning.setVisible(not valid) + @staticmethod + def _get_card_widget_sub_label( + folder_path: Optional[str], + task_name: Optional[str], + ) -> str: + sublabel = "" + if folder_path: + folder_name = folder_path.rsplit("/", 1)[-1] + sublabel = f"{folder_name}" + if task_name: + sublabel += f" - {task_name}" + return sublabel + def _update_product_name(self): variant = self.instance.variant product_name = self.instance.product_name label = self.instance.label + folder_path = self.instance.folder_path + task_name = self.instance.task_name if ( variant == self._last_variant and product_name == self._last_product_name and label == self._last_label + and folder_path == self._last_folder_path + and task_name == self._last_task_name ): return self._last_variant = variant self._last_product_name = product_name self._last_label = label + self._last_folder_path = folder_path + self._last_task_name = task_name + # Make `variant` bold label = html_escape(self.instance.label) found_parts = set(re.findall(variant, label, re.IGNORECASE)) if found_parts: for part in found_parts: - replacement = "{}".format(part) + replacement = f"{part}" label = label.replace(part, replacement) + label = f"{label}" + sublabel = self._get_card_widget_sub_label(folder_path, task_name) + if sublabel: + label += f"
{sublabel}" + self._label_widget.setText(label) # HTML text will cause that label start catch mouse clicks # - disabling with changing interaction flag @@ -702,11 +734,9 @@ class InstanceCardView(AbstractInstanceView): def refresh(self): """Refresh instances in view based on CreatedContext.""" - self._make_sure_context_widget_exists() self._update_convertors_group() - context_info_by_id = self._controller.get_instances_context_info() # Prepare instances by group and identifiers by group @@ -814,6 +844,8 @@ class InstanceCardView(AbstractInstanceView): widget.setVisible(False) widget.deleteLater() + sorted_group_names.insert(0, CONTEXT_GROUP) + self._parent_id_by_id = parent_id_by_id self._instance_ids_by_parent_id = instance_ids_by_parent_id self._group_name_by_instance_id = group_by_instance_id @@ -881,7 +913,7 @@ class InstanceCardView(AbstractInstanceView): context_info, is_parent_active, group_icon, - group_widget + group_widget, ) widget.selected.connect(self._on_widget_selection) widget.active_changed.connect(self._on_active_changed) diff --git a/client/ayon_core/tools/publisher/widgets/create_context_widgets.py b/client/ayon_core/tools/publisher/widgets/create_context_widgets.py index faf2248181..405445c8eb 100644 --- a/client/ayon_core/tools/publisher/widgets/create_context_widgets.py +++ b/client/ayon_core/tools/publisher/widgets/create_context_widgets.py @@ -1,10 +1,14 @@ from qtpy import QtWidgets, QtCore from ayon_core.lib.events import QueuedEventSystem -from ayon_core.tools.utils import PlaceholderLineEdit, GoToCurrentButton from ayon_core.tools.common_models import HierarchyExpectedSelection -from ayon_core.tools.utils import FoldersWidget, TasksWidget +from ayon_core.tools.utils import ( + FoldersWidget, + TasksWidget, + FoldersFiltersWidget, + GoToCurrentButton, +) from ayon_core.tools.publisher.abstract import AbstractPublisherFrontend @@ -180,8 +184,7 @@ class CreateContextWidget(QtWidgets.QWidget): headers_widget = QtWidgets.QWidget(self) - folder_filter_input = PlaceholderLineEdit(headers_widget) - folder_filter_input.setPlaceholderText("Filter folders..") + filters_widget = FoldersFiltersWidget(headers_widget) current_context_btn = GoToCurrentButton(headers_widget) current_context_btn.setToolTip("Go to current context") @@ -189,7 +192,8 @@ class CreateContextWidget(QtWidgets.QWidget): headers_layout = QtWidgets.QHBoxLayout(headers_widget) headers_layout.setContentsMargins(0, 0, 0, 0) - headers_layout.addWidget(folder_filter_input, 1) + headers_layout.setSpacing(5) + headers_layout.addWidget(filters_widget, 1) headers_layout.addWidget(current_context_btn, 0) hierarchy_controller = CreateHierarchyController(controller) @@ -207,15 +211,17 @@ class CreateContextWidget(QtWidgets.QWidget): main_layout.setContentsMargins(0, 0, 0, 0) main_layout.setSpacing(0) main_layout.addWidget(headers_widget, 0) + main_layout.addSpacing(5) main_layout.addWidget(folders_widget, 2) main_layout.addWidget(tasks_widget, 1) folders_widget.selection_changed.connect(self._on_folder_change) tasks_widget.selection_changed.connect(self._on_task_change) current_context_btn.clicked.connect(self._on_current_context_click) - folder_filter_input.textChanged.connect(self._on_folder_filter_change) + filters_widget.text_changed.connect(self._on_folder_filter_change) + filters_widget.my_tasks_changed.connect(self._on_my_tasks_change) - self._folder_filter_input = folder_filter_input + self._filters_widget = filters_widget self._current_context_btn = current_context_btn self._folders_widget = folders_widget self._tasks_widget = tasks_widget @@ -285,6 +291,10 @@ class CreateContextWidget(QtWidgets.QWidget): self._hierarchy_controller.set_expected_selection( self._last_project_name, folder_id, task_name ) + # Update my tasks + self._on_my_tasks_change( + self._filters_widget.is_my_tasks_checked() + ) def _clear_selection(self): self._folders_widget.set_selected_folder(None) @@ -303,5 +313,17 @@ class CreateContextWidget(QtWidgets.QWidget): self._last_project_name, folder_id, task_name ) - def _on_folder_filter_change(self, text): + def _on_folder_filter_change(self, text: str) -> None: self._folders_widget.set_name_filter(text) + + def _on_my_tasks_change(self, enabled: bool) -> None: + folder_ids = None + task_ids = None + if enabled: + entity_ids = self._controller.get_my_tasks_entity_ids( + self._last_project_name + ) + folder_ids = entity_ids["folder_ids"] + task_ids = entity_ids["task_ids"] + self._folders_widget.set_folder_ids_filter(folder_ids) + self._tasks_widget.set_task_ids_filter(task_ids) diff --git a/client/ayon_core/tools/publisher/widgets/create_widget.py b/client/ayon_core/tools/publisher/widgets/create_widget.py index b9b3afd895..db93632471 100644 --- a/client/ayon_core/tools/publisher/widgets/create_widget.py +++ b/client/ayon_core/tools/publisher/widgets/create_widget.py @@ -310,9 +310,6 @@ class CreateWidget(QtWidgets.QWidget): folder_path = None if self._context_change_is_enabled(): folder_path = self._context_widget.get_selected_folder_path() - - if folder_path is None: - folder_path = self.get_current_folder_path() return folder_path or None def _get_folder_id(self): @@ -328,9 +325,6 @@ class CreateWidget(QtWidgets.QWidget): folder_path = self._context_widget.get_selected_folder_path() if folder_path: task_name = self._context_widget.get_selected_task_name() - - if not task_name: - task_name = self.get_current_task_name() return task_name def _set_context_enabled(self, enabled): @@ -710,11 +704,13 @@ class CreateWidget(QtWidgets.QWidget): def _on_first_show(self): width = self.width() - part = int(width / 4) - rem_width = width - part - self._main_splitter_widget.setSizes([part, rem_width]) - rem_width = rem_width - part - self._creators_splitter.setSizes([part, rem_width]) + part = int(width / 9) + context_width = part * 3 + create_sel_width = part * 2 + rem_width = width - context_width + self._main_splitter_widget.setSizes([context_width, rem_width]) + rem_width -= create_sel_width + self._creators_splitter.setSizes([create_sel_width, rem_width]) def showEvent(self, event): super().showEvent(event) diff --git a/client/ayon_core/tools/publisher/widgets/folders_dialog.py b/client/ayon_core/tools/publisher/widgets/folders_dialog.py index d2eb68310e..824ed728c9 100644 --- a/client/ayon_core/tools/publisher/widgets/folders_dialog.py +++ b/client/ayon_core/tools/publisher/widgets/folders_dialog.py @@ -1,7 +1,10 @@ from qtpy import QtWidgets from ayon_core.lib.events import QueuedEventSystem -from ayon_core.tools.utils import PlaceholderLineEdit, FoldersWidget +from ayon_core.tools.utils import ( + FoldersWidget, + FoldersFiltersWidget, +) from ayon_core.tools.publisher.abstract import AbstractPublisherFrontend @@ -43,8 +46,7 @@ class FoldersDialog(QtWidgets.QDialog): super().__init__(parent) self.setWindowTitle("Select folder") - filter_input = PlaceholderLineEdit(self) - filter_input.setPlaceholderText("Filter folders..") + filters_widget = FoldersFiltersWidget(self) folders_controller = FoldersDialogController(controller) folders_widget = FoldersWidget(folders_controller, self) @@ -59,7 +61,8 @@ class FoldersDialog(QtWidgets.QDialog): btns_layout.addWidget(cancel_btn) layout = QtWidgets.QVBoxLayout(self) - layout.addWidget(filter_input, 0) + layout.setSpacing(5) + layout.addWidget(filters_widget, 0) layout.addWidget(folders_widget, 1) layout.addLayout(btns_layout, 0) @@ -68,12 +71,13 @@ class FoldersDialog(QtWidgets.QDialog): ) folders_widget.double_clicked.connect(self._on_ok_clicked) - filter_input.textChanged.connect(self._on_filter_change) + filters_widget.text_changed.connect(self._on_filter_change) + filters_widget.my_tasks_changed.connect(self._on_my_tasks_change) ok_btn.clicked.connect(self._on_ok_clicked) cancel_btn.clicked.connect(self._on_cancel_clicked) self._controller = controller - self._filter_input = filter_input + self._filters_widget = filters_widget self._ok_btn = ok_btn self._cancel_btn = cancel_btn @@ -88,6 +92,50 @@ class FoldersDialog(QtWidgets.QDialog): self._first_show = True self._default_height = 500 + self._project_name = None + + def showEvent(self, event): + """Refresh folders widget on show.""" + super().showEvent(event) + if self._first_show: + self._first_show = False + self._on_first_show() + # Refresh on show + self.reset(False) + + def reset(self, force=True): + """Reset widget.""" + if not force and not self._soft_reset_enabled: + return + + self._project_name = self._controller.get_current_project_name() + if self._soft_reset_enabled: + self._soft_reset_enabled = False + + self._folders_widget.set_project_name(self._project_name) + self._on_my_tasks_change(self._filters_widget.is_my_tasks_checked()) + + def get_selected_folder_path(self): + """Get selected folder path.""" + return self._selected_folder_path + + def set_selected_folders(self, folder_paths: list[str]) -> None: + """Change preselected folder before showing the dialog. + + This also resets model and clean filter. + """ + self.reset(False) + self._filters_widget.set_text("") + self._filters_widget.set_my_tasks_checked(False) + + folder_id = None + for folder_path in folder_paths: + folder_id = self._controller.get_folder_id_from_path(folder_path) + if folder_id: + break + if folder_id: + self._folders_widget.set_selected_folder(folder_id) + def _on_first_show(self): center = self.rect().center() size = self.size() @@ -103,27 +151,6 @@ class FoldersDialog(QtWidgets.QDialog): # Change reset enabled so model is reset on show event self._soft_reset_enabled = True - def showEvent(self, event): - """Refresh folders widget on show.""" - super().showEvent(event) - if self._first_show: - self._first_show = False - self._on_first_show() - # Refresh on show - self.reset(False) - - def reset(self, force=True): - """Reset widget.""" - if not force and not self._soft_reset_enabled: - return - - if self._soft_reset_enabled: - self._soft_reset_enabled = False - - self._folders_widget.set_project_name( - self._controller.get_current_project_name() - ) - def _on_filter_change(self, text): """Trigger change of filter of folders.""" self._folders_widget.set_name_filter(text) @@ -137,22 +164,11 @@ class FoldersDialog(QtWidgets.QDialog): ) self.done(1) - def set_selected_folders(self, folder_paths): - """Change preselected folder before showing the dialog. - - This also resets model and clean filter. - """ - self.reset(False) - self._filter_input.setText("") - - folder_id = None - for folder_path in folder_paths: - folder_id = self._controller.get_folder_id_from_path(folder_path) - if folder_id: - break - if folder_id: - self._folders_widget.set_selected_folder(folder_id) - - def get_selected_folder_path(self): - """Get selected folder path.""" - return self._selected_folder_path + def _on_my_tasks_change(self, enabled: bool) -> None: + folder_ids = None + if enabled: + entity_ids = self._controller.get_my_tasks_entity_ids( + self._project_name + ) + folder_ids = entity_ids["folder_ids"] + self._folders_widget.set_folder_ids_filter(folder_ids) diff --git a/client/ayon_core/tools/publisher/window.py b/client/ayon_core/tools/publisher/window.py index dc086a3b48..6f7444ed04 100644 --- a/client/ayon_core/tools/publisher/window.py +++ b/client/ayon_core/tools/publisher/window.py @@ -1,9 +1,11 @@ +from __future__ import annotations + import os import json import time import collections import copy -from typing import Optional +from typing import Optional, Any from qtpy import QtWidgets, QtCore, QtGui @@ -393,6 +395,9 @@ class PublisherWindow(QtWidgets.QDialog): self._publish_frame_visible = None self._tab_on_reset = None + self._create_context_valid: bool = True + self._blocked_by_crashed_paths: bool = False + self._error_messages_to_show = collections.deque() self._errors_dialog_message_timer = errors_dialog_message_timer @@ -406,6 +411,8 @@ class PublisherWindow(QtWidgets.QDialog): self._show_counter = 0 self._window_is_visible = False + self._update_footer_state() + @property def controller(self) -> AbstractPublisherFrontend: """Kept for compatibility with traypublisher.""" @@ -664,11 +671,33 @@ class PublisherWindow(QtWidgets.QDialog): self._tab_on_reset = tab - def _update_publish_details_widget(self, force=False): - if not force and not self._is_on_details_tab(): + def set_current_tab(self, tab): + if tab == "create": + self._go_to_create_tab() + elif tab == "publish": + self._go_to_publish_tab() + elif tab == "report": + self._go_to_report_tab() + elif tab == "details": + self._go_to_details_tab() + + if not self._window_is_visible: + self.set_tab_on_reset(tab) + + def _update_publish_details_widget( + self, + force: bool = False, + report_data: Optional[dict[str, Any]] = None, + ) -> None: + if ( + report_data is None + and not force + and not self._is_on_details_tab() + ): return - report_data = self._controller.get_publish_report() + if report_data is None: + report_data = self._controller.get_publish_report() self._publish_details_widget.set_report_data(report_data) def _on_help_click(self): @@ -678,13 +707,8 @@ class PublisherWindow(QtWidgets.QDialog): self._help_dialog.show() window = self.window() - if hasattr(QtWidgets.QApplication, "desktop"): - desktop = QtWidgets.QApplication.desktop() - screen_idx = desktop.screenNumber(window) - screen_geo = desktop.screenGeometry(screen_idx) - else: - screen = window.screen() - screen_geo = screen.geometry() + screen = window.screen() + screen_geo = screen.geometry() window_geo = window.geometry() dialog_x = window_geo.x() + window_geo.width() @@ -757,19 +781,6 @@ class PublisherWindow(QtWidgets.QDialog): def _set_current_tab(self, identifier): self._tabs_widget.set_current_tab(identifier) - def set_current_tab(self, tab): - if tab == "create": - self._go_to_create_tab() - elif tab == "publish": - self._go_to_publish_tab() - elif tab == "report": - self._go_to_report_tab() - elif tab == "details": - self._go_to_details_tab() - - if not self._window_is_visible: - self.set_tab_on_reset(tab) - def _is_current_tab(self, identifier): return self._tabs_widget.is_current_tab(identifier) @@ -870,26 +881,56 @@ class PublisherWindow(QtWidgets.QDialog): # Reset style self._comment_input.setStyleSheet("") - def _set_footer_enabled(self, enabled): - self._save_btn.setEnabled(True) + def _set_create_context_valid(self, valid: bool) -> None: + self._create_context_valid = valid + self._update_footer_state() + + def _set_blocked(self, blocked: bool) -> None: + self._blocked_by_crashed_paths = blocked + self._overview_widget.setEnabled(not blocked) + self._update_footer_state() + if not blocked: + return + + self.set_tab_on_reset("details") + self._go_to_details_tab() + + QtWidgets.QMessageBox.critical( + self, + "Failed to load plugins", + ( + "Failed to load plugins that do prevent you from" + " using publish tool.\n" + "Please contact your TD or administrator." + ) + ) + + def _update_footer_state(self) -> None: + enabled = ( + not self._blocked_by_crashed_paths + and self._create_context_valid + ) + save_enabled = not self._blocked_by_crashed_paths + + self._save_btn.setEnabled(save_enabled) self._reset_btn.setEnabled(True) - if enabled: - self._stop_btn.setEnabled(False) - self._validate_btn.setEnabled(True) - self._publish_btn.setEnabled(True) - else: - self._stop_btn.setEnabled(enabled) - self._validate_btn.setEnabled(enabled) - self._publish_btn.setEnabled(enabled) + self._stop_btn.setEnabled(False) + self._validate_btn.setEnabled(enabled) + self._publish_btn.setEnabled(enabled) def _on_publish_reset(self): self._create_tab.setEnabled(True) self._set_comment_input_visiblity(True) self._set_publish_overlay_visibility(False) self._set_publish_visibility(False) - self._update_publish_details_widget() + + report_data = self._controller.get_publish_report() + blocked = bool(report_data["blocking_crashed_paths"]) + self._set_blocked(blocked) + self._update_publish_details_widget(report_data=report_data) def _on_controller_reset(self): + self._update_publish_details_widget(force=True) self._first_reset, first_reset = False, self._first_reset if self._tab_on_reset is not None: self._tab_on_reset, new_tab = None, self._tab_on_reset @@ -957,7 +998,7 @@ class PublisherWindow(QtWidgets.QDialog): def _validate_create_instances(self): if not self._controller.is_host_valid(): - self._set_footer_enabled(True) + self._set_create_context_valid(True) return active_instances_by_id = { @@ -978,7 +1019,7 @@ class PublisherWindow(QtWidgets.QDialog): if all_valid is None: all_valid = True - self._set_footer_enabled(bool(all_valid)) + self._set_create_context_valid(bool(all_valid)) def _on_create_model_reset(self): self._validate_create_instances() diff --git a/client/ayon_core/tools/push_to_project/control.py b/client/ayon_core/tools/push_to_project/control.py index b4e0d56dfd..a24cedf455 100644 --- a/client/ayon_core/tools/push_to_project/control.py +++ b/client/ayon_core/tools/push_to_project/control.py @@ -41,6 +41,7 @@ class PushToContextController: self._process_item_id = None self._use_original_name = False + self._version_up = False self.set_source(project_name, version_ids) @@ -212,7 +213,7 @@ class PushToContextController: self._user_values.variant, comment=self._user_values.comment, new_folder_name=self._user_values.new_folder_name, - dst_version=1, + version_up=self._version_up, use_original_name=self._use_original_name, ) item_ids.append(item_id) @@ -229,6 +230,9 @@ class PushToContextController: thread.start() return item_ids + def set_version_up(self, state): + self._version_up = state + def wait_for_process_thread(self): if self._process_thread is None: return diff --git a/client/ayon_core/tools/push_to_project/models/integrate.py b/client/ayon_core/tools/push_to_project/models/integrate.py index ef49838152..d0e191a412 100644 --- a/client/ayon_core/tools/push_to_project/models/integrate.py +++ b/client/ayon_core/tools/push_to_project/models/integrate.py @@ -3,9 +3,10 @@ import re import copy import itertools import sys +import tempfile import traceback import uuid -from typing import Optional, Dict +from typing import Optional, Any import ayon_api from ayon_api.utils import create_entity_id @@ -88,7 +89,7 @@ class ProjectPushItem: variant, comment, new_folder_name, - dst_version, + version_up, item_id=None, use_original_name=False ): @@ -99,7 +100,7 @@ class ProjectPushItem: self.dst_project_name = dst_project_name self.dst_folder_id = dst_folder_id self.dst_task_name = dst_task_name - self.dst_version = dst_version + self.version_up = version_up self.variant = variant self.new_folder_name = new_folder_name self.comment = comment or "" @@ -117,7 +118,7 @@ class ProjectPushItem: str(self.dst_folder_id), str(self.new_folder_name), str(self.dst_task_name), - str(self.dst_version), + str(self.version_up), self.use_original_name ]) return self._repr_value @@ -132,7 +133,7 @@ class ProjectPushItem: "dst_project_name": self.dst_project_name, "dst_folder_id": self.dst_folder_id, "dst_task_name": self.dst_task_name, - "dst_version": self.dst_version, + "version_up": self.version_up, "variant": self.variant, "comment": self.comment, "new_folder_name": self.new_folder_name, @@ -225,8 +226,8 @@ class ProjectPushRepreItem: but filenames are not template based. Args: - repre_entity (Dict[str, Ant]): Representation entity. - roots (Dict[str, str]): Project roots (based on project anatomy). + repre_entity (dict[str, Ant]): Representation entity. + roots (dict[str, str]): Project roots (based on project anatomy). """ def __init__(self, repre_entity, roots): @@ -482,6 +483,8 @@ class ProjectPushItemProcess: self._log_info("Destination project was found") self._fill_or_create_destination_folder() self._log_info("Destination folder was determined") + self._fill_or_create_destination_task() + self._log_info("Destination task was determined") self._determine_product_type() self._determine_publish_template_name() self._determine_product_name() @@ -650,10 +653,10 @@ class ProjectPushItemProcess: def _create_folder( self, - src_folder_entity, - project_entity, - parent_folder_entity, - folder_name + src_folder_entity: dict[str, Any], + project_entity: dict[str, Any], + parent_folder_entity: dict[str, Any], + folder_name: str ): parent_id = None if parent_folder_entity: @@ -702,12 +705,19 @@ class ProjectPushItemProcess: if new_folder_name != folder_name: folder_label = folder_name - # TODO find out how to define folder type + src_folder_type = src_folder_entity["folderType"] + dst_folder_type = self._get_dst_folder_type( + project_entity, + src_folder_type + ) + new_thumbnail_id = self._create_new_folder_thumbnail( + project_entity, src_folder_entity) folder_entity = new_folder_entity( folder_name, - "Folder", + dst_folder_type, parent_id=parent_id, - attribs=new_folder_attrib + attribs=new_folder_attrib, + thumbnail_id=new_thumbnail_id ) if folder_label: folder_entity["label"] = folder_label @@ -727,10 +737,59 @@ class ProjectPushItemProcess: folder_entity["path"] = "/".join([parent_path, folder_name]) return folder_entity + def _create_new_folder_thumbnail( + self, + project_entity: dict[str, Any], + src_folder_entity: dict[str, Any] + ) -> Optional[str]: + """Copy thumbnail possibly set on folder. + + Could be different from representation thumbnails, and it is only shown + when folder is selected. + """ + if not src_folder_entity["thumbnailId"]: + return None + + thumbnail = ayon_api.get_folder_thumbnail( + self._item.src_project_name, + src_folder_entity["id"], + src_folder_entity["thumbnailId"] + ) + if not thumbnail.id: + return None + + with tempfile.NamedTemporaryFile(delete=False) as tmp_file: + tmp_file.write(thumbnail.content) + temp_file_path = tmp_file.name + + new_thumbnail_id = None + try: + new_thumbnail_id = ayon_api.create_thumbnail( + project_entity["name"], temp_file_path) + finally: + if os.path.exists(temp_file_path): + os.remove(temp_file_path) + return new_thumbnail_id + + def _get_dst_folder_type( + self, + project_entity: dict[str, Any], + src_folder_type: str + ) -> str: + """Get new folder type.""" + for folder_type in project_entity["folderTypes"]: + if folder_type["name"].lower() == src_folder_type.lower(): + return folder_type["name"] + + self._status.set_failed( + f"'{src_folder_type}' folder type is not configured in " + f"project Anatomy." + ) + raise PushToProjectError(self._status.fail_reason) + def _fill_or_create_destination_folder(self): dst_project_name = self._item.dst_project_name dst_folder_id = self._item.dst_folder_id - dst_task_name = self._item.dst_task_name new_folder_name = self._item.new_folder_name if not dst_folder_id and not new_folder_name: self._status.set_failed( @@ -761,9 +820,11 @@ class ProjectPushItemProcess: new_folder_name ) self._folder_entity = folder_entity - if not dst_task_name: - self._task_info = {} - return + + def _fill_or_create_destination_task(self): + folder_entity = self._folder_entity + dst_task_name = self._item.dst_task_name + dst_project_name = self._item.dst_project_name folder_path = folder_entity["path"] folder_tasks = { @@ -772,6 +833,20 @@ class ProjectPushItemProcess: dst_project_name, folder_ids=[folder_entity["id"]] ) } + + if not dst_task_name: + src_task_info = self._get_src_task_info() + if not src_task_info: # really no task selected nor on source + self._task_info = {} + return + + dst_task_name = src_task_info["name"] + if dst_task_name.lower() not in folder_tasks: + task_info = self._make_sure_task_exists( + folder_entity, src_task_info + ) + folder_tasks[dst_task_name.lower()] = task_info + task_info = folder_tasks.get(dst_task_name.lower()) if not task_info: self._status.set_failed( @@ -790,7 +865,10 @@ class ProjectPushItemProcess: task_type["name"]: task_type for task_type in self._project_entity["taskTypes"] } - task_type_info = task_types_by_name.get(task_type_name, {}) + task_type_info = copy.deepcopy( + task_types_by_name.get(task_type_name, {}) + ) + task_type_info.pop("name") # do not overwrite real task name task_info.update(task_type_info) self._task_info = task_info @@ -870,10 +948,22 @@ class ProjectPushItemProcess: self._product_entity = product_entity return product_entity + src_attrib = self._src_product_entity["attrib"] + + dst_attrib = {} + for key in { + "description", + "productGroup", + }: + value = src_attrib.get(key) + if value: + dst_attrib[key] = value + product_entity = new_product_entity( product_name, product_type, folder_id, + attribs=dst_attrib ) self._operations.create_entity( project_name, "product", product_entity @@ -884,7 +974,7 @@ class ProjectPushItemProcess: """Make sure version document exits in database.""" project_name = self._item.dst_project_name - version = self._item.dst_version + version_up = self._item.version_up src_version_entity = self._src_version_entity product_entity = self._product_entity product_id = product_entity["id"] @@ -912,27 +1002,29 @@ class ProjectPushItemProcess: "description", "intent", }: - if key in src_attrib: - dst_attrib[key] = src_attrib[key] + value = src_attrib.get(key) + if value: + dst_attrib[key] = value - if version is None: - last_version_entity = ayon_api.get_last_version_by_product_id( - project_name, product_id + last_version_entity = ayon_api.get_last_version_by_product_id( + project_name, product_id + ) + if last_version_entity is None: + dst_version = get_versioning_start( + project_name, + self.host_name, + task_name=self._task_info.get("name"), + task_type=self._task_info.get("taskType"), + product_type=product_type, + product_name=product_entity["name"], ) - if last_version_entity: - version = int(last_version_entity["version"]) + 1 - else: - version = get_versioning_start( - project_name, - self.host_name, - task_name=self._task_info["name"], - task_type=self._task_info["taskType"], - product_type=product_type, - product_name=product_entity["name"], - ) + else: + dst_version = int(last_version_entity["version"]) + if version_up: + dst_version += 1 existing_version_entity = ayon_api.get_version_by_name( - project_name, version, product_id + project_name, dst_version, product_id ) thumbnail_id = self._copy_version_thumbnail() @@ -950,10 +1042,29 @@ class ProjectPushItemProcess: existing_version_entity["attrib"].update(dst_attrib) self._version_entity = existing_version_entity return + copied_tags = self._get_transferable_tags(src_version_entity) + copied_status = self._get_transferable_status(src_version_entity) + + description_parts = [] + dst_attr_description = dst_attrib.get("description") + if dst_attr_description: + description_parts.append(dst_attr_description) + + description = self._create_src_version_description( + self._item.src_project_name, + src_version_entity + ) + if description: + description_parts.append(description) + + dst_attrib["description"] = "\n\n".join(description_parts) version_entity = new_version_entity( - version, + dst_version, product_id, + status=copied_status, + tags=copied_tags, + task_id=self._task_info.get("id"), attribs=dst_attrib, thumbnail_id=thumbnail_id, ) @@ -962,6 +1073,47 @@ class ProjectPushItemProcess: ) self._version_entity = version_entity + def _make_sure_task_exists( + self, + folder_entity: dict[str, Any], + task_info: dict[str, Any], + ) -> dict[str, Any]: + """Creates destination task from source task information""" + project_name = self._item.dst_project_name + found_task_type = False + src_task_type = task_info["taskType"] + for task_type in self._project_entity["taskTypes"]: + if task_type["name"].lower() == src_task_type.lower(): + found_task_type = True + break + + if not found_task_type: + self._status.set_failed( + f"'{src_task_type}' task type is not configured in " + "project Anatomy." + ) + + raise PushToProjectError(self._status.fail_reason) + + task_info = self._operations.create_task( + project_name, + task_info["name"], + folder_id=folder_entity["id"], + task_type=src_task_type, + attrib=task_info["attrib"], + ) + self._task_info = task_info.data + return self._task_info + + def _get_src_task_info(self): + src_version_entity = self._src_version_entity + if not src_version_entity["taskId"]: + return None + src_task = ayon_api.get_task_by_id( + self._item.src_project_name, src_version_entity["taskId"] + ) + return src_task + def _integrate_representations(self): try: self._real_integrate_representations() @@ -990,8 +1142,6 @@ class ProjectPushItemProcess: self.host_name ) formatting_data.update({ - "subset": self._product_name, - "family": self._product_type, "product": { "name": self._product_name, "type": self._product_type, @@ -1197,18 +1347,66 @@ class ProjectPushItemProcess: if context_value and isinstance(context_value, dict): for context_sub_key in context_value.keys(): value_to_update = formatting_data.get(context_key, {}).get( - context_sub_key) + context_sub_key + ) if value_to_update: - repre_context[context_key][ - context_sub_key] = value_to_update + repre_context[context_key][context_sub_key] = ( + value_to_update + ) else: value_to_update = formatting_data.get(context_key) if value_to_update: repre_context[context_key] = value_to_update if "task" not in formatting_data: - repre_context.pop("task") + repre_context.pop("task", None) return repre_context + def _get_transferable_tags(self, src_version_entity): + """Copy over only tags present in destination project""" + dst_project_tags = [ + tag["name"] for tag in self._project_entity["tags"] + ] + copied_tags = [] + for src_tag in src_version_entity["tags"]: + if src_tag in dst_project_tags: + copied_tags.append(src_tag) + return copied_tags + + def _get_transferable_status(self, src_version_entity): + """Copy over status, first status if not matching found""" + dst_project_statuses = { + status["name"]: status + for status in self._project_entity["statuses"] + } + copied_status = dst_project_statuses.get(src_version_entity["status"]) + if copied_status: + return copied_status["name"] + return None + + def _create_src_version_description( + self, + src_project_name: str, + src_version_entity: dict[str, Any] + ) -> str: + """Creates description text about source version.""" + src_version_id = src_version_entity["id"] + src_author = src_version_entity["author"] + query = "&".join([ + f"project={src_project_name}", + "type=version", + f"id={src_version_id}" + ]) + version_url = ( + f"{ayon_api.get_base_url()}" + f"/projects/{src_project_name}/products?{query}" + ) + description = ( + f"Version copied from from {version_url} " + f"created by '{src_author}', " + ) + + return description + class IntegrateModel: def __init__(self, controller): @@ -1231,7 +1429,7 @@ class IntegrateModel: variant, comment, new_folder_name, - dst_version, + version_up, use_original_name ): """Create new item for integration. @@ -1245,7 +1443,7 @@ class IntegrateModel: variant (str): Variant name. comment (Union[str, None]): Comment. new_folder_name (Union[str, None]): New folder name. - dst_version (int): Destination version number. + version_up (bool): Should destination product be versioned up use_original_name (bool): If original product names should be used Returns: @@ -1262,7 +1460,7 @@ class IntegrateModel: variant, comment=comment, new_folder_name=new_folder_name, - dst_version=dst_version, + version_up=version_up, use_original_name=use_original_name ) process_item = ProjectPushItemProcess(self, item) @@ -1281,6 +1479,6 @@ class IntegrateModel: return item.integrate() - def get_items(self) -> Dict[str, ProjectPushItemProcess]: + def get_items(self) -> dict[str, ProjectPushItemProcess]: """Returns dict of all ProjectPushItemProcess items """ return self._process_items diff --git a/client/ayon_core/tools/push_to_project/ui/window.py b/client/ayon_core/tools/push_to_project/ui/window.py index f382ccce64..b77cca0e09 100644 --- a/client/ayon_core/tools/push_to_project/ui/window.py +++ b/client/ayon_core/tools/push_to_project/ui/window.py @@ -144,6 +144,8 @@ class PushToContextSelectWindow(QtWidgets.QWidget): variant_input.setPlaceholderText("< Variant >") variant_input.setObjectName("ValidatedLineEdit") + version_up_checkbox = NiceCheckbox(True, parent=inputs_widget) + comment_input = PlaceholderLineEdit(inputs_widget) comment_input.setPlaceholderText("< Publish comment >") @@ -153,7 +155,11 @@ class PushToContextSelectWindow(QtWidgets.QWidget): inputs_layout.addRow("New folder name", folder_name_input) inputs_layout.addRow("Variant", variant_input) inputs_layout.addRow( - "Use original product names", original_names_checkbox) + "Use original product names", original_names_checkbox + ) + inputs_layout.addRow( + "Version up existing Product", version_up_checkbox + ) inputs_layout.addRow("Comment", comment_input) main_splitter.addWidget(context_widget) @@ -209,8 +215,11 @@ class PushToContextSelectWindow(QtWidgets.QWidget): "Show error detail dialog to copy full error." ) original_names_checkbox.setToolTip( - "Required for multi copy, doesn't allow changes " - "variant values." + "Required for multi copy, doesn't allow changes variant values." + ) + version_up_checkbox.setToolTip( + "Version up existing product. If not selected version will be " + "updated." ) overlay_close_btn = QtWidgets.QPushButton( @@ -259,6 +268,8 @@ class PushToContextSelectWindow(QtWidgets.QWidget): library_only_checkbox.stateChanged.connect(self._on_library_only_change) original_names_checkbox.stateChanged.connect( self._on_original_names_change) + version_up_checkbox.stateChanged.connect( + self._on_version_up_checkbox_change) publish_btn.clicked.connect(self._on_select_click) cancel_btn.clicked.connect(self._on_close_click) @@ -308,6 +319,7 @@ class PushToContextSelectWindow(QtWidgets.QWidget): self._folder_name_input = folder_name_input self._comment_input = comment_input self._use_original_names_checkbox = original_names_checkbox + self._library_only_checkbox = library_only_checkbox self._publish_btn = publish_btn @@ -328,6 +340,7 @@ class PushToContextSelectWindow(QtWidgets.QWidget): self._new_folder_name_input_text = None self._variant_input_text = None self._comment_input_text = None + self._version_up_checkbox = version_up_checkbox self._first_show = True self._show_timer = show_timer @@ -344,6 +357,7 @@ class PushToContextSelectWindow(QtWidgets.QWidget): show_detail_btn.setVisible(False) overlay_close_btn.setVisible(False) overlay_try_btn.setVisible(False) + version_up_checkbox.setChecked(False) # Support of public api function of controller def set_source(self, project_name, version_ids): @@ -376,7 +390,6 @@ class PushToContextSelectWindow(QtWidgets.QWidget): self._invalidate_new_folder_name( new_folder_name, user_values["is_new_folder_name_valid"] ) - self._controller._invalidate() self._projects_combobox.refresh() def _on_first_show(self): @@ -415,14 +428,18 @@ class PushToContextSelectWindow(QtWidgets.QWidget): self._comment_input_text = text self._user_input_changed_timer.start() - def _on_library_only_change(self, state: int) -> None: + def _on_library_only_change(self) -> None: """Change toggle state, reset filter, recalculate dropdown""" - state = bool(state) - self._projects_combobox.set_standard_filter_enabled(state) + is_checked = self._library_only_checkbox.isChecked() + self._projects_combobox.set_standard_filter_enabled(is_checked) - def _on_original_names_change(self, state: int) -> None: - use_original_name = bool(state) - self._invalidate_use_original_names(use_original_name) + def _on_original_names_change(self) -> None: + is_checked = self._use_original_names_checkbox.isChecked() + self._invalidate_use_original_names(is_checked) + + def _on_version_up_checkbox_change(self) -> None: + is_checked = self._version_up_checkbox.isChecked() + self._controller.set_version_up(is_checked) def _on_user_input_timer(self): folder_name_enabled = self._new_folder_name_enabled diff --git a/client/ayon_core/tools/sceneinventory/view.py b/client/ayon_core/tools/sceneinventory/view.py index 22bc170230..eb12fe5e06 100644 --- a/client/ayon_core/tools/sceneinventory/view.py +++ b/client/ayon_core/tools/sceneinventory/view.py @@ -1114,6 +1114,8 @@ class SceneInventoryView(QtWidgets.QTreeView): try: for item_id, item_version in zip(item_ids, versions): container = containers_by_id[item_id] + if container.get("version_locked"): + continue try: update_container(container, item_version) except Exception as exc: diff --git a/client/ayon_core/tools/texture_copy/app.py b/client/ayon_core/tools/texture_copy/app.py index c288187aac..1013020185 100644 --- a/client/ayon_core/tools/texture_copy/app.py +++ b/client/ayon_core/tools/texture_copy/app.py @@ -32,8 +32,6 @@ class TextureCopy: product_type = "texture" template_data = get_template_data(project_entity, folder_entity) template_data.update({ - "family": product_type, - "subset": product_name, "product": { "name": product_name, "type": product_type, diff --git a/client/ayon_core/tools/utils/__init__.py b/client/ayon_core/tools/utils/__init__.py index 111b7c614b..56989927ee 100644 --- a/client/ayon_core/tools/utils/__init__.py +++ b/client/ayon_core/tools/utils/__init__.py @@ -76,6 +76,7 @@ from .folders_widget import ( FoldersQtModel, FOLDERS_MODEL_SENDER_NAME, SimpleFoldersWidget, + FoldersFiltersWidget, ) from .tasks_widget import ( @@ -160,6 +161,7 @@ __all__ = ( "FoldersQtModel", "FOLDERS_MODEL_SENDER_NAME", "SimpleFoldersWidget", + "FoldersFiltersWidget", "TasksWidget", "TasksQtModel", diff --git a/client/ayon_core/tools/utils/color_widgets/color_screen_pick.py b/client/ayon_core/tools/utils/color_widgets/color_screen_pick.py index 542db2831a..c900ad1f48 100644 --- a/client/ayon_core/tools/utils/color_widgets/color_screen_pick.py +++ b/client/ayon_core/tools/utils/color_widgets/color_screen_pick.py @@ -1,4 +1,3 @@ -import qtpy from qtpy import QtWidgets, QtCore, QtGui @@ -6,7 +5,7 @@ class PickScreenColorWidget(QtWidgets.QWidget): color_selected = QtCore.Signal(QtGui.QColor) def __init__(self, parent=None): - super(PickScreenColorWidget, self).__init__(parent) + super().__init__(parent) self.labels = [] self.magnification = 2 @@ -53,7 +52,7 @@ class PickLabel(QtWidgets.QLabel): close_session = QtCore.Signal() def __init__(self, pick_widget): - super(PickLabel, self).__init__() + super().__init__() self.setMouseTracking(True) self.pick_widget = pick_widget @@ -74,14 +73,10 @@ class PickLabel(QtWidgets.QLabel): self.show() self.windowHandle().setScreen(screen_obj) geo = screen_obj.geometry() - args = ( - QtWidgets.QApplication.desktop().winId(), + pix = screen_obj.grabWindow( + self.winId(), geo.x(), geo.y(), geo.width(), geo.height() ) - if qtpy.API in ("pyqt4", "pyside"): - pix = QtGui.QPixmap.grabWindow(*args) - else: - pix = screen_obj.grabWindow(*args) if pix.width() > pix.height(): size = pix.height() diff --git a/client/ayon_core/tools/utils/folders_widget.py b/client/ayon_core/tools/utils/folders_widget.py index 7b71dd087c..ea278da6cb 100644 --- a/client/ayon_core/tools/utils/folders_widget.py +++ b/client/ayon_core/tools/utils/folders_widget.py @@ -15,6 +15,8 @@ from ayon_core.tools.common_models import ( from .models import RecursiveSortFilterProxyModel from .views import TreeView from .lib import RefreshThread, get_qt_icon +from .widgets import PlaceholderLineEdit +from .nice_checkbox import NiceCheckbox FOLDERS_MODEL_SENDER_NAME = "qt_folders_model" @@ -343,6 +345,8 @@ class FoldersProxyModel(RecursiveSortFilterProxyModel): def __init__(self): super().__init__() + self.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) + self._folder_ids_filter = None def set_folder_ids_filter(self, folder_ids: Optional[list[str]]): @@ -794,3 +798,53 @@ class SimpleFoldersWidget(FoldersWidget): event (Event): Triggered event. """ pass + + +class FoldersFiltersWidget(QtWidgets.QWidget): + """Helper widget for most commonly used filters in context selection.""" + text_changed = QtCore.Signal(str) + my_tasks_changed = QtCore.Signal(bool) + + def __init__(self, parent: QtWidgets.QWidget) -> None: + super().__init__(parent) + + folders_filter_input = PlaceholderLineEdit(self) + folders_filter_input.setPlaceholderText("Folder name filter...") + + my_tasks_tooltip = ( + "Filter folders and task to only those you are assigned to." + ) + my_tasks_label = QtWidgets.QLabel("My tasks", self) + my_tasks_label.setToolTip(my_tasks_tooltip) + + my_tasks_checkbox = NiceCheckbox(self) + my_tasks_checkbox.setChecked(False) + my_tasks_checkbox.setToolTip(my_tasks_tooltip) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(5) + layout.addWidget(folders_filter_input, 1) + layout.addWidget(my_tasks_label, 0) + layout.addWidget(my_tasks_checkbox, 0) + + folders_filter_input.textChanged.connect(self.text_changed) + my_tasks_checkbox.stateChanged.connect(self._on_my_tasks_change) + + self._folders_filter_input = folders_filter_input + self._my_tasks_checkbox = my_tasks_checkbox + + def is_my_tasks_checked(self) -> bool: + return self._my_tasks_checkbox.isChecked() + + def text(self) -> str: + return self._folders_filter_input.text() + + def set_text(self, text: str) -> None: + self._folders_filter_input.setText(text) + + def set_my_tasks_checked(self, checked: bool) -> None: + self._my_tasks_checkbox.setChecked(checked) + + def _on_my_tasks_change(self, _state: int) -> None: + self.my_tasks_changed.emit(self._my_tasks_checkbox.isChecked()) diff --git a/client/ayon_core/tools/utils/lib.py b/client/ayon_core/tools/utils/lib.py index a99c46199b..3308b943f0 100644 --- a/client/ayon_core/tools/utils/lib.py +++ b/client/ayon_core/tools/utils/lib.py @@ -53,14 +53,8 @@ def checkstate_enum_to_int(state): def center_window(window): """Move window to center of it's screen.""" - - if hasattr(QtWidgets.QApplication, "desktop"): - desktop = QtWidgets.QApplication.desktop() - screen_idx = desktop.screenNumber(window) - screen_geo = desktop.screenGeometry(screen_idx) - else: - screen = window.screen() - screen_geo = screen.geometry() + screen = window.screen() + screen_geo = screen.geometry() geo = window.frameGeometry() geo.moveCenter(screen_geo.center()) @@ -554,11 +548,17 @@ class _IconsCache: elif icon_type == "ayon_url": url = icon_def["url"].lstrip("/") url = f"{ayon_api.get_base_url()}/{url}" - stream = io.BytesIO() - ayon_api.download_file_to_stream(url, stream) - pix = QtGui.QPixmap() - pix.loadFromData(stream.getvalue()) - icon = QtGui.QIcon(pix) + try: + stream = io.BytesIO() + ayon_api.download_file_to_stream(url, stream) + pix = QtGui.QPixmap() + pix.loadFromData(stream.getvalue()) + icon = QtGui.QIcon(pix) + except Exception: + log.warning( + "Failed to download image '%s'", url, exc_info=True + ) + icon = None elif icon_type == "transparent": size = icon_def.get("size") diff --git a/client/ayon_core/tools/utils/widgets.py b/client/ayon_core/tools/utils/widgets.py index 4b787ff830..9341e665bc 100644 --- a/client/ayon_core/tools/utils/widgets.py +++ b/client/ayon_core/tools/utils/widgets.py @@ -865,24 +865,26 @@ class OptionalMenu(QtWidgets.QMenu): def mouseReleaseEvent(self, event): """Emit option clicked signal if mouse released on it""" active = self.actionAt(event.pos()) - if active and active.use_option: + if isinstance(active, OptionalAction) and active.use_option: option = active.widget.option if option.is_hovered(event.globalPos()): option.clicked.emit() - super(OptionalMenu, self).mouseReleaseEvent(event) + super().mouseReleaseEvent(event) def mouseMoveEvent(self, event): """Add highlight to active action""" active = self.actionAt(event.pos()) for action in self.actions(): - action.set_highlight(action is active, event.globalPos()) - super(OptionalMenu, self).mouseMoveEvent(event) + if isinstance(action, OptionalAction): + action.set_highlight(action is active, event.globalPos()) + super().mouseMoveEvent(event) def leaveEvent(self, event): """Remove highlight from all actions""" for action in self.actions(): - action.set_highlight(False) - super(OptionalMenu, self).leaveEvent(event) + if isinstance(action, OptionalAction): + action.set_highlight(False) + super().leaveEvent(event) class OptionalAction(QtWidgets.QWidgetAction): @@ -894,7 +896,7 @@ class OptionalAction(QtWidgets.QWidgetAction): """ def __init__(self, label, icon, use_option, parent): - super(OptionalAction, self).__init__(parent) + super().__init__(parent) self.label = label self.icon = icon self.use_option = use_option @@ -955,7 +957,7 @@ class OptionalActionWidget(QtWidgets.QWidget): """Main widget class for `OptionalAction`""" def __init__(self, label, parent=None): - super(OptionalActionWidget, self).__init__(parent) + super().__init__(parent) body_widget = QtWidgets.QWidget(self) body_widget.setObjectName("OptionalActionBody") diff --git a/client/ayon_core/tools/workfiles/abstract.py b/client/ayon_core/tools/workfiles/abstract.py index 863d6bb9bc..1b92c0d334 100644 --- a/client/ayon_core/tools/workfiles/abstract.py +++ b/client/ayon_core/tools/workfiles/abstract.py @@ -1,8 +1,15 @@ +from __future__ import annotations + import os from abc import ABC, abstractmethod +import typing +from typing import Optional from ayon_core.style import get_default_entity_icon_color +if typing.TYPE_CHECKING: + from ayon_core.host import PublishedWorkfileInfo + class FolderItem: """Item representing folder entity on a server. @@ -159,6 +166,17 @@ class WorkareaFilepathResult: self.filepath = filepath +class PublishedWorkfileWrap: + """Wrapper for workfile info that also contains version comment.""" + def __init__( + self, + info: Optional[PublishedWorkfileInfo] = None, + comment: Optional[str] = None, + ) -> None: + self.info = info + self.comment = comment + + class AbstractWorkfilesCommon(ABC): @abstractmethod def is_host_valid(self): @@ -787,6 +805,25 @@ class AbstractWorkfilesFrontend(AbstractWorkfilesCommon): """ pass + @abstractmethod + def get_published_workfile_info( + self, + folder_id: Optional[str], + representation_id: Optional[str], + ) -> PublishedWorkfileWrap: + """Get published workfile info by representation ID. + + Args: + folder_id (Optional[str]): Folder id. + representation_id (Optional[str]): Representation id. + + Returns: + PublishedWorkfileWrap: Published workfile info or None + if not found. + + """ + pass + @abstractmethod def get_workfile_info(self, folder_id, task_id, rootless_path): """Workfile info from database. diff --git a/client/ayon_core/tools/workfiles/control.py b/client/ayon_core/tools/workfiles/control.py index f0e0f0e416..c399a1bf33 100644 --- a/client/ayon_core/tools/workfiles/control.py +++ b/client/ayon_core/tools/workfiles/control.py @@ -1,4 +1,7 @@ +from __future__ import annotations + import os +from typing import Optional import ayon_api @@ -18,6 +21,7 @@ from ayon_core.tools.common_models import ( from .abstract import ( AbstractWorkfilesBackend, AbstractWorkfilesFrontend, + PublishedWorkfileWrap, ) from .models import SelectionModel, WorkfilesModel @@ -432,6 +436,15 @@ class BaseWorkfileController( folder_id, task_id ) + def get_published_workfile_info( + self, + folder_id: Optional[str], + representation_id: Optional[str], + ) -> PublishedWorkfileWrap: + return self._workfiles_model.get_published_workfile_info( + folder_id, representation_id + ) + def get_workfile_info(self, folder_id, task_id, rootless_path): return self._workfiles_model.get_workfile_info( folder_id, task_id, rootless_path diff --git a/client/ayon_core/tools/workfiles/models/selection.py b/client/ayon_core/tools/workfiles/models/selection.py index 9a6440b2a1..65caa287d1 100644 --- a/client/ayon_core/tools/workfiles/models/selection.py +++ b/client/ayon_core/tools/workfiles/models/selection.py @@ -17,6 +17,8 @@ class SelectionModel(object): self._task_name = None self._task_id = None self._workfile_path = None + self._rootless_workfile_path = None + self._workfile_entity_id = None self._representation_id = None def get_selected_folder_id(self): @@ -62,39 +64,49 @@ class SelectionModel(object): def get_selected_workfile_path(self): return self._workfile_path + def get_selected_workfile_data(self): + return { + "project_name": self._controller.get_current_project_name(), + "path": self._workfile_path, + "rootless_path": self._rootless_workfile_path, + "folder_id": self._folder_id, + "task_name": self._task_name, + "task_id": self._task_id, + "workfile_entity_id": self._workfile_entity_id, + } + def set_selected_workfile_path( self, rootless_path, path, workfile_entity_id ): if path == self._workfile_path: return + self._rootless_workfile_path = rootless_path self._workfile_path = path + self._workfile_entity_id = workfile_entity_id self._controller.emit_event( "selection.workarea.changed", - { - "project_name": self._controller.get_current_project_name(), - "path": path, - "rootless_path": rootless_path, - "folder_id": self._folder_id, - "task_name": self._task_name, - "task_id": self._task_id, - "workfile_entity_id": workfile_entity_id, - }, + self.get_selected_workfile_data(), self.event_source ) def get_selected_representation_id(self): return self._representation_id + def get_selected_representation_data(self): + return { + "project_name": self._controller.get_current_project_name(), + "folder_id": self._folder_id, + "task_id": self._task_id, + "representation_id": self._representation_id, + } + def set_selected_representation_id(self, representation_id): if representation_id == self._representation_id: return self._representation_id = representation_id self._controller.emit_event( "selection.representation.changed", - { - "project_name": self._controller.get_current_project_name(), - "representation_id": representation_id, - }, + self.get_selected_representation_data(), self.event_source ) diff --git a/client/ayon_core/tools/workfiles/models/workfiles.py b/client/ayon_core/tools/workfiles/models/workfiles.py index 5b5591fe43..c15dda2b4f 100644 --- a/client/ayon_core/tools/workfiles/models/workfiles.py +++ b/client/ayon_core/tools/workfiles/models/workfiles.py @@ -39,6 +39,7 @@ from ayon_core.pipeline.workfile import ( from ayon_core.pipeline.version_start import get_versioning_start from ayon_core.tools.workfiles.abstract import ( WorkareaFilepathResult, + PublishedWorkfileWrap, AbstractWorkfilesBackend, ) @@ -79,6 +80,7 @@ class WorkfilesModel: # Published workfiles self._repre_by_id = {} + self._version_comment_by_id = {} self._published_workfile_items_cache = NestedCacheItem( levels=1, default_factory=list ) @@ -95,6 +97,7 @@ class WorkfilesModel: self._workarea_file_items_cache.reset() self._repre_by_id = {} + self._version_comment_by_id = {} self._published_workfile_items_cache.reset() self._workfile_entities_by_task_id = {} @@ -552,13 +555,13 @@ class WorkfilesModel: ) def get_published_file_items( - self, folder_id: str, task_id: str + self, folder_id: Optional[str], task_id: Optional[str] ) -> list[PublishedWorkfileInfo]: """Published workfiles for passed context. Args: - folder_id (str): Folder id. - task_id (str): Task id. + folder_id (Optional[str]): Folder id. + task_id (Optional[str]): Task id. Returns: list[PublishedWorkfileInfo]: List of files for published workfiles. @@ -586,7 +589,7 @@ class WorkfilesModel: version_entities = list(ayon_api.get_versions( project_name, product_ids=product_ids, - fields={"id", "author", "taskId"}, + fields={"id", "author", "taskId", "attrib.comment"}, )) repre_entities = [] @@ -600,6 +603,13 @@ class WorkfilesModel: repre_entity["id"]: repre_entity for repre_entity in repre_entities }) + + # Map versions by representation ID for easy lookup + self._version_comment_by_id.update({ + version_entity["id"]: version_entity["attrib"].get("comment") + for version_entity in version_entities + }) + project_entity = self._controller.get_project_entity(project_name) prepared_data = ListPublishedWorkfilesOptionalData( @@ -626,6 +636,34 @@ class WorkfilesModel: ] return items + def get_published_workfile_info( + self, + folder_id: Optional[str], + representation_id: Optional[str], + ) -> PublishedWorkfileWrap: + """Get published workfile info by representation ID. + + Args: + folder_id (Optional[str]): Folder id. + representation_id (Optional[str]): Representation id. + + Returns: + PublishedWorkfileWrap: Published workfile info or None + if not found. + + """ + if not representation_id: + return PublishedWorkfileWrap() + + # Search through all cached published workfile items + for item in self.get_published_file_items(folder_id, None): + if item.representation_id == representation_id: + comment = self._get_published_workfile_version_comment( + representation_id + ) + return PublishedWorkfileWrap(item, comment) + return PublishedWorkfileWrap() + @property def _project_name(self) -> str: return self._controller.get_current_project_name() @@ -642,6 +680,25 @@ class WorkfilesModel: self._current_username = get_ayon_username() return self._current_username + def _get_published_workfile_version_comment( + self, representation_id: str + ) -> Optional[str]: + """Get version comment for published workfile. + + Args: + representation_id (str): Representation id. + + Returns: + Optional[str]: Version comment or None. + + """ + if not representation_id: + return None + repre = self._repre_by_id.get(representation_id) + if not repre: + return None + return self._version_comment_by_id.get(repre["versionId"]) + # --- Host --- def _open_workfile(self, folder_id: str, task_id: str, filepath: str): # TODO move to workfiles pipeline diff --git a/client/ayon_core/tools/workfiles/widgets/side_panel.py b/client/ayon_core/tools/workfiles/widgets/side_panel.py index b1b91d9721..2929ac780d 100644 --- a/client/ayon_core/tools/workfiles/widgets/side_panel.py +++ b/client/ayon_core/tools/workfiles/widgets/side_panel.py @@ -1,6 +1,7 @@ import datetime +from typing import Optional -from qtpy import QtWidgets, QtCore +from qtpy import QtCore, QtWidgets def file_size_to_string(file_size): @@ -8,9 +9,9 @@ def file_size_to_string(file_size): return "N/A" size = 0 size_ending_mapping = { - "KB": 1024 ** 1, - "MB": 1024 ** 2, - "GB": 1024 ** 3 + "KB": 1024**1, + "MB": 1024**2, + "GB": 1024**3, } ending = "B" for _ending, _size in size_ending_mapping.items(): @@ -70,7 +71,12 @@ class SidePanelWidget(QtWidgets.QWidget): btn_description_save.clicked.connect(self._on_save_click) controller.register_event_callback( - "selection.workarea.changed", self._on_selection_change + "selection.workarea.changed", + self._on_workarea_selection_change + ) + controller.register_event_callback( + "selection.representation.changed", + self._on_representation_selection_change, ) self._details_input = details_input @@ -82,12 +88,13 @@ class SidePanelWidget(QtWidgets.QWidget): self._task_id = None self._filepath = None self._rootless_path = None + self._representation_id = None self._orig_description = "" self._controller = controller - self._set_context(None, None, None, None) + self._set_context(False, None, None) - def set_published_mode(self, published_mode): + def set_published_mode(self, published_mode: bool) -> None: """Change published mode. Args: @@ -95,14 +102,37 @@ class SidePanelWidget(QtWidgets.QWidget): """ self._description_widget.setVisible(not published_mode) + # Clear the context when switching modes to avoid showing stale data + if published_mode: + self._set_publish_context( + self._folder_id, + self._task_id, + self._representation_id, + ) + else: + self._set_workarea_context( + self._folder_id, + self._task_id, + self._rootless_path, + self._filepath, + ) - def _on_selection_change(self, event): + def _on_workarea_selection_change(self, event): folder_id = event["folder_id"] task_id = event["task_id"] filepath = event["path"] rootless_path = event["rootless_path"] - self._set_context(folder_id, task_id, rootless_path, filepath) + self._set_workarea_context( + folder_id, task_id, rootless_path, filepath + ) + + def _on_representation_selection_change(self, event): + folder_id = event["folder_id"] + task_id = event["task_id"] + representation_id = event["representation_id"] + + self._set_publish_context(folder_id, task_id, representation_id) def _on_description_change(self): text = self._description_input.toPlainText() @@ -118,85 +148,134 @@ class SidePanelWidget(QtWidgets.QWidget): self._orig_description = description self._btn_description_save.setEnabled(False) - def _set_context(self, folder_id, task_id, rootless_path, filepath): + def _set_workarea_context( + self, + folder_id: Optional[str], + task_id: Optional[str], + rootless_path: Optional[str], + filepath: Optional[str], + ) -> None: + self._rootless_path = rootless_path + self._filepath = filepath + workfile_info = None # Check if folder, task and file are selected if folder_id and task_id and rootless_path: workfile_info = self._controller.get_workfile_info( folder_id, task_id, rootless_path ) - enabled = workfile_info is not None - self._details_input.setEnabled(enabled) - self._description_input.setEnabled(enabled) - self._btn_description_save.setEnabled(enabled) - - self._folder_id = folder_id - self._task_id = task_id - self._filepath = filepath - self._rootless_path = rootless_path - - # Disable inputs and remove texts if any required arguments are - # missing - if not enabled: + if workfile_info is None: self._orig_description = "" - self._details_input.setPlainText("") self._description_input.setPlainText("") + self._set_context(False, folder_id, task_id) return - description = workfile_info.description - size_value = file_size_to_string(workfile_info.file_size) + self._set_context( + True, + folder_id, + task_id, + file_created=workfile_info.file_created, + file_modified=workfile_info.file_modified, + size_value=workfile_info.file_size, + created_by=workfile_info.created_by, + updated_by=workfile_info.updated_by, + ) + + description = workfile_info.description + self._orig_description = description + self._description_input.setPlainText(description) + + def _set_publish_context( + self, + folder_id: Optional[str], + task_id: Optional[str], + representation_id: Optional[str], + ) -> None: + self._representation_id = representation_id + published_workfile_wrap = self._controller.get_published_workfile_info( + folder_id, + representation_id, + ) + info = published_workfile_wrap.info + comment = published_workfile_wrap.comment + if info is None: + self._set_context(False, folder_id, task_id) + return + + self._set_context( + True, + folder_id, + task_id, + file_created=info.file_created, + file_modified=info.file_modified, + size_value=info.file_size, + created_by=info.author, + comment=comment, + ) + + def _set_context( + self, + is_valid: bool, + folder_id: Optional[str], + task_id: Optional[str], + *, + file_created: Optional[int] = None, + file_modified: Optional[int] = None, + size_value: Optional[int] = None, + created_by: Optional[str] = None, + updated_by: Optional[str] = None, + comment: Optional[str] = None, + ) -> None: + self._folder_id = folder_id + self._task_id = task_id + + self._details_input.setEnabled(is_valid) + self._description_input.setEnabled(is_valid) + self._btn_description_save.setEnabled(is_valid) + if not is_valid: + self._details_input.setPlainText("") + return - # Append html string datetime_format = "%b %d %Y %H:%M:%S" - file_created = workfile_info.file_created - modification_time = workfile_info.file_modified if file_created: file_created = datetime.datetime.fromtimestamp(file_created) - if modification_time: - modification_time = datetime.datetime.fromtimestamp( - modification_time) + if file_modified: + file_modified = datetime.datetime.fromtimestamp( + file_modified + ) user_items_by_name = self._controller.get_user_items_by_name() - def convert_username(username): - user_item = user_items_by_name.get(username) + def convert_username(username_v): + user_item = user_items_by_name.get(username_v) if user_item is not None and user_item.full_name: return user_item.full_name - return username + return username_v - created_lines = [] - if workfile_info.created_by: - created_lines.append( - convert_username(workfile_info.created_by) - ) - if file_created: - created_lines.append(file_created.strftime(datetime_format)) + lines = [] + if size_value is not None: + size_value = file_size_to_string(size_value) + lines.append(f"Size:
{size_value}") - if created_lines: - created_lines.insert(0, "Created:") + # Add version comment for published workfiles + if comment: + lines.append(f"Comment:
{comment}") - modified_lines = [] - if workfile_info.updated_by: - modified_lines.append( - convert_username(workfile_info.updated_by) - ) - if modification_time: - modified_lines.append( - modification_time.strftime(datetime_format) - ) - if modified_lines: - modified_lines.insert(0, "Modified:") + if created_by or file_created: + lines.append("Created:") + if created_by: + lines.append(convert_username(created_by)) + if file_created: + lines.append(file_created.strftime(datetime_format)) - lines = ( - "Size:", - size_value, - "
".join(created_lines), - "
".join(modified_lines), - ) - self._orig_description = description - self._description_input.setPlainText(description) + if updated_by or file_modified: + lines.append("Modified:") + if updated_by: + lines.append(convert_username(updated_by)) + if file_modified: + lines.append(file_modified.strftime(datetime_format)) # Set as empty string self._details_input.setPlainText("") diff --git a/client/ayon_core/tools/workfiles/widgets/window.py b/client/ayon_core/tools/workfiles/widgets/window.py index 3f96f0bb15..bb3fd19ae1 100644 --- a/client/ayon_core/tools/workfiles/widgets/window.py +++ b/client/ayon_core/tools/workfiles/widgets/window.py @@ -6,12 +6,11 @@ from ayon_core.tools.utils import ( FoldersWidget, GoToCurrentButton, MessageOverlayObject, - NiceCheckbox, PlaceholderLineEdit, RefreshButton, TasksWidget, + FoldersFiltersWidget, ) -from ayon_core.tools.utils.lib import checkstate_int_to_enum from ayon_core.tools.workfiles.control import BaseWorkfileController from .files_widget import FilesWidget @@ -69,7 +68,6 @@ class WorkfilesToolWindow(QtWidgets.QWidget): self._default_window_flags = flags self._folders_widget = None - self._folder_filter_input = None self._files_widget = None @@ -178,50 +176,37 @@ class WorkfilesToolWindow(QtWidgets.QWidget): col_widget = QtWidgets.QWidget(parent) header_widget = QtWidgets.QWidget(col_widget) - folder_filter_input = PlaceholderLineEdit(header_widget) - folder_filter_input.setPlaceholderText("Filter folders..") + filters_widget = FoldersFiltersWidget(header_widget) go_to_current_btn = GoToCurrentButton(header_widget) refresh_btn = RefreshButton(header_widget) + header_layout = QtWidgets.QHBoxLayout(header_widget) + header_layout.setContentsMargins(0, 0, 0, 0) + header_layout.addWidget(filters_widget, 1) + header_layout.addWidget(go_to_current_btn, 0) + header_layout.addWidget(refresh_btn, 0) + folder_widget = FoldersWidget( controller, col_widget, handle_expected_selection=True ) - my_tasks_tooltip = ( - "Filter folders and task to only those you are assigned to." - ) - - my_tasks_label = QtWidgets.QLabel("My tasks") - my_tasks_label.setToolTip(my_tasks_tooltip) - - my_tasks_checkbox = NiceCheckbox(folder_widget) - my_tasks_checkbox.setChecked(False) - my_tasks_checkbox.setToolTip(my_tasks_tooltip) - - header_layout = QtWidgets.QHBoxLayout(header_widget) - header_layout.setContentsMargins(0, 0, 0, 0) - header_layout.addWidget(folder_filter_input, 1) - header_layout.addWidget(go_to_current_btn, 0) - header_layout.addWidget(refresh_btn, 0) - header_layout.addWidget(my_tasks_label, 0) - header_layout.addWidget(my_tasks_checkbox, 0) - col_layout = QtWidgets.QVBoxLayout(col_widget) col_layout.setContentsMargins(0, 0, 0, 0) col_layout.addWidget(header_widget, 0) col_layout.addWidget(folder_widget, 1) - folder_filter_input.textChanged.connect(self._on_folder_filter_change) - go_to_current_btn.clicked.connect(self._on_go_to_current_clicked) - refresh_btn.clicked.connect(self._on_refresh_clicked) - my_tasks_checkbox.stateChanged.connect( + filters_widget.text_changed.connect(self._on_folder_filter_change) + filters_widget.my_tasks_changed.connect( self._on_my_tasks_checkbox_state_changed ) + go_to_current_btn.clicked.connect(self._on_go_to_current_clicked) + refresh_btn.clicked.connect(self._on_refresh_clicked) - self._folder_filter_input = folder_filter_input self._folders_widget = folder_widget + self._filters_widget = filters_widget + return col_widget def _create_col_3_widget(self, controller, parent): @@ -358,8 +343,11 @@ class WorkfilesToolWindow(QtWidgets.QWidget): if not self._host_is_valid: return - self._folders_widget.set_project_name( - self._controller.get_current_project_name() + self._project_name = self._controller.get_current_project_name() + self._folders_widget.set_project_name(self._project_name) + # Update my tasks + self._on_my_tasks_checkbox_state_changed( + self._filters_widget.is_my_tasks_checked() ) def _on_save_as_finished(self, event): @@ -404,11 +392,10 @@ class WorkfilesToolWindow(QtWidgets.QWidget): else: self.close() - def _on_my_tasks_checkbox_state_changed(self, state): + def _on_my_tasks_checkbox_state_changed(self, enabled: bool) -> None: folder_ids = None task_ids = None - state = checkstate_int_to_enum(state) - if state == QtCore.Qt.Checked: + if enabled: entity_ids = self._controller.get_my_tasks_entity_ids( self._project_name ) diff --git a/client/ayon_core/version.py b/client/ayon_core/version.py index 4aeeb94ea8..7ba13a0b63 100644 --- a/client/ayon_core/version.py +++ b/client/ayon_core/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring AYON addon 'core' version.""" -__version__ = "1.6.6" +__version__ = "1.7.0+dev" diff --git a/client/pyproject.toml b/client/pyproject.toml index 6416d9b8e1..5ae71de18b 100644 --- a/client/pyproject.toml +++ b/client/pyproject.toml @@ -3,7 +3,6 @@ name="core" description="AYON core addon." [tool.poetry.dependencies] -python = ">=3.9.1,<3.10" markdown = "^3.4.1" clique = "1.6.*" jsonschema = "^2.6.0" @@ -19,3 +18,6 @@ OpenTimelineIO = "0.16.0" opencolorio = "^2.3.2,<2.4.0" Pillow = "9.5.0" websocket-client = ">=0.40.0,<2" + +[ayon.runtimeDependencies.darwin] +pyobjc-core = "^11.1" diff --git a/package.py b/package.py index ced8763100..795131463b 100644 --- a/package.py +++ b/package.py @@ -1,6 +1,6 @@ name = "core" title = "Core" -version = "1.6.6" +version = "1.7.0+dev" client_dir = "ayon_core" @@ -12,6 +12,7 @@ ayon_server_version = ">=1.8.4,<2.0.0" ayon_launcher_version = ">=1.0.2" ayon_required_addons = {} ayon_compatible_addons = { + "ayon_third_party": ">=1.3.0", "ayon_ocio": ">=1.2.1", "applications": ">=1.1.2", "harmony": ">0.4.0", diff --git a/pyproject.toml b/pyproject.toml index 7460ddc831..64c884bd37 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ [tool.poetry] name = "ayon-core" -version = "1.6.6" +version = "1.7.0+dev" description = "" authors = ["Ynput Team "] readme = "README.md" @@ -37,7 +37,7 @@ opentimelineio = "^0.17.0" speedcopy = "^2.1" qtpy="^2.4.3" pyside6 = "^6.5.2" -pytest-ayon = { git = "https://github.com/ynput/pytest-ayon.git", branch = "chore/align-dependencies" } +pytest-ayon = { git = "https://github.com/ynput/pytest-ayon.git", branch = "develop" } [tool.codespell] # Ignore words that are not in the dictionary. diff --git a/server/settings/conversion.py b/server/settings/conversion.py index 34820b5b32..8eb42d8e6b 100644 --- a/server/settings/conversion.py +++ b/server/settings/conversion.py @@ -1,8 +1,57 @@ +import re import copy from typing import Any from .publish_plugins import DEFAULT_PUBLISH_VALUES +PRODUCT_NAME_REPL_REGEX = re.compile(r"[^<>{}\[\]a-zA-Z0-9_.]") + + +def _convert_product_name_templates_1_7_0(overrides): + product_name_profiles = ( + overrides + .get("tools", {}) + .get("creator", {}) + .get("product_name_profiles") + ) + if ( + not product_name_profiles + or not isinstance(product_name_profiles, list) + ): + return + + # Already converted + item = product_name_profiles[0] + if "product_base_types" in item or "product_types" not in item: + return + + # Move product base types to product types + for item in product_name_profiles: + item["product_base_types"] = item["product_types"] + item["product_types"] = [] + + +def _convert_product_name_templates_1_6_5(overrides): + product_name_profiles = ( + overrides + .get("tools", {}) + .get("creator", {}) + .get("product_name_profiles") + ) + if isinstance(product_name_profiles, list): + for item in product_name_profiles: + # Remove unsupported product name characters + template = item.get("template") + if isinstance(template, str): + item["template"] = PRODUCT_NAME_REPL_REGEX.sub("", template) + + for new_key, old_key in ( + ("host_names", "hosts"), + ("task_names", "tasks"), + ): + if old_key in item: + item[new_key] = item.get(old_key) + def _convert_imageio_configs_0_4_5(overrides): """Imageio config settings did change to profiles since 0.4.5.""" @@ -133,11 +182,54 @@ def _convert_publish_plugins(overrides): _convert_oiio_transcode_0_4_5(overrides["publish"]) +def _convert_extract_thumbnail(overrides): + """ExtractThumbnail config settings did change to profiles.""" + extract_thumbnail_overrides = ( + overrides.get("publish", {}).get("ExtractThumbnail") + ) + if extract_thumbnail_overrides is None: + return + + base_value = { + "product_types": [], + "host_names": [], + "task_types": [], + "task_names": [], + "product_names": [], + "integrate_thumbnail": True, + "target_size": {"type": "source"}, + "duration_split": 0.5, + "oiiotool_defaults": { + "type": "colorspace", + "colorspace": "color_picking", + }, + "ffmpeg_args": {"input": ["-apply_trc gamma22"], "output": []}, + } + for key in ( + "product_names", + "integrate_thumbnail", + "target_size", + "duration_split", + "oiiotool_defaults", + "ffmpeg_args", + ): + if key in extract_thumbnail_overrides: + base_value[key] = extract_thumbnail_overrides.pop(key) + + extract_thumbnail_profiles = extract_thumbnail_overrides.setdefault( + "profiles", [] + ) + extract_thumbnail_profiles.append(base_value) + + def convert_settings_overrides( source_version: str, overrides: dict[str, Any], ) -> dict[str, Any]: _convert_imageio_configs_0_3_1(overrides) _convert_imageio_configs_0_4_5(overrides) + _convert_product_name_templates_1_6_5(overrides) + _convert_product_name_templates_1_7_0(overrides) _convert_publish_plugins(overrides) + _convert_extract_thumbnail(overrides) return overrides diff --git a/server/settings/main.py b/server/settings/main.py index cca885303f..3bd9549116 100644 --- a/server/settings/main.py +++ b/server/settings/main.py @@ -59,6 +59,7 @@ def _ocio_config_profile_types(): {"value": "builtin_path", "label": "AYON built-in OCIO config"}, {"value": "custom_path", "label": "Path to OCIO config"}, {"value": "published_product", "label": "Published product"}, + {"value": "disabled", "label": "Disable OCIO management"}, ] diff --git a/server/settings/publish_plugins.py b/server/settings/publish_plugins.py index ee422a0acf..eb41c75699 100644 --- a/server/settings/publish_plugins.py +++ b/server/settings/publish_plugins.py @@ -74,13 +74,35 @@ class CollectFramesFixDefModel(BaseSettingsModel): ) +def usd_contribution_layer_types(): + return [ + {"value": "asset", "label": "Asset"}, + {"value": "shot", "label": "Shot"}, + ] + + class ContributionLayersModel(BaseSettingsModel): _layout = "compact" - name: str = SettingsField(title="Name") - order: str = SettingsField( + name: str = SettingsField( + default="", + regex="[A-Za-z0-9_-]+", + title="Name") + scope: list[str] = SettingsField( + # This should actually be returned from a callable to `default_factory` + # because lists are mutable. However, the frontend can't interpret + # the callable. It will fail to apply it as the default. Specifying + # this default directly did not show any ill side effects. + default=["asset", "shot"], + title="Scope", + min_items=1, + enum_resolver=usd_contribution_layer_types) + order: int = SettingsField( + default=0, title="Order", - description="Higher order means a higher strength and stacks the " - "layer on top.") + description=( + "Higher order means a higher strength and stacks the layer on top." + ) + ) class CollectUSDLayerContributionsProfileModel(BaseSettingsModel): @@ -251,6 +273,19 @@ class AyonEntityURIModel(BaseSettingsModel): ) +class ExtractUSDLayerContributionModel(AyonEntityURIModel): + enforce_default_prim: bool = SettingsField( + title="Always set default prim to folder name.", + description=( + "When enabled ignore any default prim specified on older " + "published versions of a layer and always override it to the " + "AYON standard default prim. When disabled, preserve default prim " + "on the layer and then only the initial version would be setting " + "the AYON standard default prim." + ) + ) + + class PluginStateByHostModelProfile(BaseSettingsModel): _layout = "expanded" # Filtering @@ -387,24 +422,30 @@ class ExtractThumbnailOIIODefaultsModel(BaseSettingsModel): ) -class ExtractThumbnailModel(BaseSettingsModel): - _isGroup = True - enabled: bool = SettingsField(True) +class ExtractThumbnailProfileModel(BaseSettingsModel): + product_types: list[str] = SettingsField( + default_factory=list, title="Product types" + ) + host_names: list[str] = SettingsField( + default_factory=list, title="Host names" + ) + task_types: list[str] = SettingsField( + default_factory=list, title="Task types", enum_resolver=task_types_enum + ) + task_names: list[str] = SettingsField( + default_factory=list, title="Task names" + ) product_names: list[str] = SettingsField( - default_factory=list, - title="Product names" + default_factory=list, title="Product names" ) integrate_thumbnail: bool = SettingsField( - True, - title="Integrate Thumbnail Representation" + True, title="Integrate Thumbnail Representation" ) target_size: ResizeModel = SettingsField( - default_factory=ResizeModel, - title="Target size" + default_factory=ResizeModel, title="Target size" ) background_color: ColorRGBA_uint8 = SettingsField( - (0, 0, 0, 0.0), - title="Background color" + (0, 0, 0, 0.0), title="Background color" ) duration_split: float = SettingsField( 0.5, @@ -421,6 +462,15 @@ class ExtractThumbnailModel(BaseSettingsModel): ) +class ExtractThumbnailModel(BaseSettingsModel): + _isGroup = True + enabled: bool = SettingsField(True) + + profiles: list[ExtractThumbnailProfileModel] = SettingsField( + default_factory=list, title="Profiles" + ) + + def _extract_oiio_transcoding_type(): return [ {"value": "colorspace", "label": "Use Colorspace"}, @@ -443,7 +493,7 @@ class UseDisplayViewModel(BaseSettingsModel): title="Target Display", description=( "Display of the target transform. If left empty, the" - " source Display value will be used." + " scene Display value will be used." ) ) view: str = SettingsField( @@ -451,11 +501,23 @@ class UseDisplayViewModel(BaseSettingsModel): title="Target View", description=( "View of the target transform. If left empty, the" - " source View value will be used." + " scene View value will be used." ) ) +class ExtractThumbnailFromSourceModel(BaseSettingsModel): + """Thumbnail extraction from source files using ffmpeg and oiiotool.""" + enabled: bool = SettingsField(True) + + target_size: ResizeModel = SettingsField( + default_factory=ResizeModel, title="Target size" + ) + background_color: ColorRGBA_uint8 = SettingsField( + (0, 0, 0, 0.0), title="Background color" + ) + + class ExtractOIIOTranscodeOutputModel(BaseSettingsModel): _layout = "expanded" name: str = SettingsField( @@ -565,12 +627,125 @@ class ExtractOIIOTranscodeProfileModel(BaseSettingsModel): class ExtractOIIOTranscodeModel(BaseSettingsModel): + """Color conversion transcoding using OIIO for images mostly aimed at + transcoding for reviewables (it'll process and output only RGBA channels). + """ enabled: bool = SettingsField(True) profiles: list[ExtractOIIOTranscodeProfileModel] = SettingsField( default_factory=list, title="Profiles" ) +class ExtractOIIOPostProcessOutputModel(BaseSettingsModel): + _layout = "expanded" + name: str = SettingsField( + "", + title="Name", + description="Output name (no space)", + regex=r"[a-zA-Z0-9_]([a-zA-Z0-9_\.\-]*[a-zA-Z0-9_])?$", + ) + extension: str = SettingsField( + "", + title="Extension", + description=( + "Target extension. If left empty, original" + " extension is used." + ), + ) + input_arguments: list[str] = SettingsField( + default_factory=list, + title="Input arguments", + description="Arguments passed prior to the input file argument.", + ) + output_arguments: list[str] = SettingsField( + default_factory=list, + title="Output arguments", + description="Arguments passed prior to the -o argument.", + ) + tags: list[str] = SettingsField( + default_factory=list, + title="Tags", + description=( + "Additional tags that will be added to the created representation." + "\nAdd *review* tag to create review from the transcoded" + " representation instead of the original." + ) + ) + custom_tags: list[str] = SettingsField( + default_factory=list, + title="Custom Tags", + description=( + "Additional custom tags that will be added" + " to the created representation." + ) + ) + + +class ExtractOIIOPostProcessProfileModel(BaseSettingsModel): + host_names: list[str] = SettingsField( + section="Profile", + default_factory=list, + title="Host names" + ) + task_types: list[str] = SettingsField( + default_factory=list, + title="Task types", + enum_resolver=task_types_enum + ) + task_names: list[str] = SettingsField( + default_factory=list, + title="Task names" + ) + product_types: list[str] = SettingsField( + default_factory=list, + title="Product types" + ) + product_names: list[str] = SettingsField( + default_factory=list, + title="Product names" + ) + representation_names: list[str] = SettingsField( + default_factory=list, + title="Representation names", + ) + representation_exts: list[str] = SettingsField( + default_factory=list, + title="Representation extensions", + ) + delete_original: bool = SettingsField( + True, + title="Delete Original Representation", + description=( + "Choose to preserve or remove the original representation.\n" + "Keep in mind that if the transcoded representation includes" + " a `review` tag, it will take precedence over" + " the original for creating reviews." + ), + section="Conversion Outputs", + ) + outputs: list[ExtractOIIOPostProcessOutputModel] = SettingsField( + default_factory=list, + title="Output Definitions", + ) + + @validator("outputs") + def validate_unique_outputs(cls, value): + ensure_unique_names(value) + return value + + +class ExtractOIIOPostProcessModel(BaseSettingsModel): + """Process representation images with `oiiotool` on publish. + + This could be used to convert images to different formats, convert to + scanline images or flatten deep images. + """ + enabled: bool = SettingsField(True) + profiles: list[ExtractOIIOPostProcessProfileModel] = SettingsField( + default_factory=list, title="Profiles" + ) + + # --- [START] Extract Review --- class ExtractReviewFFmpegModel(BaseSettingsModel): video_filters: list[str] = SettingsField( @@ -1118,10 +1293,24 @@ class PublishPuginsModel(BaseSettingsModel): default_factory=ExtractThumbnailModel, title="Extract Thumbnail" ) + ExtractThumbnailFromSource: ExtractThumbnailFromSourceModel = SettingsField( # noqa: E501 + default_factory=ExtractThumbnailFromSourceModel, + title="Extract Thumbnail from source", + description=( + "Extract thumbnails from explicit file set in " + "instance.data['thumbnailSource'] using oiiotool" + " or ffmpeg." + "Used when artist provided thumbnail source." + ) + ) ExtractOIIOTranscode: ExtractOIIOTranscodeModel = SettingsField( default_factory=ExtractOIIOTranscodeModel, title="Extract OIIO Transcode" ) + ExtractOIIOPostProcess: ExtractOIIOPostProcessModel = SettingsField( + default_factory=ExtractOIIOPostProcessModel, + title="Extract OIIO Post Process" + ) ExtractReview: ExtractReviewModel = SettingsField( default_factory=ExtractReviewModel, title="Extract Review" @@ -1134,9 +1323,11 @@ class PublishPuginsModel(BaseSettingsModel): default_factory=AyonEntityURIModel, title="Extract USD Asset Contribution", ) - ExtractUSDLayerContribution: AyonEntityURIModel = SettingsField( - default_factory=AyonEntityURIModel, - title="Extract USD Layer Contribution", + ExtractUSDLayerContribution: ExtractUSDLayerContributionModel = ( + SettingsField( + default_factory=ExtractUSDLayerContributionModel, + title="Extract USD Layer Contribution", + ) ) PreIntegrateThumbnails: PreIntegrateThumbnailsModel = SettingsField( default_factory=PreIntegrateThumbnailsModel, @@ -1213,17 +1404,17 @@ DEFAULT_PUBLISH_VALUES = { "enabled": True, "contribution_layers": [ # Asset layers - {"name": "model", "order": 100}, - {"name": "assembly", "order": 150}, - {"name": "groom", "order": 175}, - {"name": "look", "order": 200}, - {"name": "rig", "order": 300}, + {"name": "model", "order": 100, "scope": ["asset"]}, + {"name": "assembly", "order": 150, "scope": ["asset"]}, + {"name": "groom", "order": 175, "scope": ["asset"]}, + {"name": "look", "order": 200, "scope": ["asset"]}, + {"name": "rig", "order": 300, "scope": ["asset"]}, # Shot layers - {"name": "layout", "order": 200}, - {"name": "animation", "order": 300}, - {"name": "simulation", "order": 400}, - {"name": "fx", "order": 500}, - {"name": "lighting", "order": 600}, + {"name": "layout", "order": 200, "scope": ["shot"]}, + {"name": "animation", "order": 300, "scope": ["shot"]}, + {"name": "simulation", "order": 400, "scope": ["shot"]}, + {"name": "fx", "order": 500, "scope": ["shot"]}, + {"name": "lighting", "order": 600, "scope": ["shot"]}, ], "profiles": [ { @@ -1326,27 +1517,49 @@ DEFAULT_PUBLISH_VALUES = { }, "ExtractThumbnail": { "enabled": True, - "product_names": [], - "integrate_thumbnail": True, + "profiles": [ + { + "product_types": [], + "host_names": [], + "task_types": [], + "task_names": [], + "product_names": [], + "integrate_thumbnail": True, + "target_size": { + "type": "source" + }, + "duration_split": 0.5, + "oiiotool_defaults": { + "type": "colorspace", + "colorspace": "color_picking" + }, + "ffmpeg_args": { + "input": [ + "-apply_trc gamma22" + ], + "output": [] + } + } + ] + }, + "ExtractThumbnailFromSource": { + "enabled": True, "target_size": { - "type": "source" + "type": "resize", + "resize": { + "width": 300, + "height": 170 + } }, - "duration_split": 0.5, - "oiiotool_defaults": { - "type": "colorspace", - "colorspace": "color_picking" - }, - "ffmpeg_args": { - "input": [ - "-apply_trc gamma22" - ], - "output": [] - } }, "ExtractOIIOTranscode": { "enabled": True, "profiles": [] }, + "ExtractOIIOPostProcess": { + "enabled": True, + "profiles": [] + }, "ExtractReview": { "enabled": True, "profiles": [ @@ -1448,6 +1661,105 @@ DEFAULT_PUBLISH_VALUES = { "fill_missing_frames": "closest_existing" } ] + }, + { + "product_types": [], + "hosts": ["substancepainter"], + "task_types": [], + "outputs": [ + { + "name": "png", + "ext": "png", + "tags": [ + "ftrackreview", + "kitsureview", + "webreview" + ], + "burnins": [], + "ffmpeg_args": { + "video_filters": [], + "audio_filters": [], + "input": [], + "output": [] + }, + "filter": { + "families": [ + "render", + "review", + "ftrack" + ], + "product_names": [], + "custom_tags": [], + "single_frame_filter": "single_frame" + }, + "overscan_crop": "", + # "overscan_color": [0, 0, 0], + "overscan_color": [0, 0, 0, 0.0], + "width": 1920, + "height": 1080, + "scale_pixel_aspect": True, + "bg_color": [0, 0, 0, 0.0], + "letter_box": { + "enabled": False, + "ratio": 0.0, + "fill_color": [0, 0, 0, 1.0], + "line_thickness": 0, + "line_color": [255, 0, 0, 1.0] + }, + "fill_missing_frames": "only_rendered" + }, + { + "name": "h264", + "ext": "mp4", + "tags": [ + "burnin", + "ftrackreview", + "kitsureview", + "webreview" + ], + "burnins": [], + "ffmpeg_args": { + "video_filters": [], + "audio_filters": [], + "input": [ + "-apply_trc gamma22" + ], + "output": [ + "-pix_fmt yuv420p", + "-crf 18", + "-c:a aac", + "-b:a 192k", + "-g 1", + "-movflags faststart" + ] + }, + "filter": { + "families": [ + "render", + "review", + "ftrack" + ], + "product_names": [], + "custom_tags": [], + "single_frame_filter": "multi_frame" + }, + "overscan_crop": "", + # "overscan_color": [0, 0, 0], + "overscan_color": [0, 0, 0, 0.0], + "width": 0, + "height": 0, + "scale_pixel_aspect": True, + "bg_color": [0, 0, 0, 0.0], + "letter_box": { + "enabled": False, + "ratio": 0.0, + "fill_color": [0, 0, 0, 1.0], + "line_thickness": 0, + "line_color": [255, 0, 0, 1.0] + }, + "fill_missing_frames": "only_rendered" + } + ] } ] }, @@ -1526,6 +1838,7 @@ DEFAULT_PUBLISH_VALUES = { }, "ExtractUSDLayerContribution": { "use_ayon_entity_uri": False, + "enforce_default_prim": False, }, "PreIntegrateThumbnails": { "enabled": True, diff --git a/server/settings/tools.py b/server/settings/tools.py index f40c7c3627..19a1ff01be 100644 --- a/server/settings/tools.py +++ b/server/settings/tools.py @@ -24,17 +24,32 @@ class ProductTypeSmartSelectModel(BaseSettingsModel): class ProductNameProfile(BaseSettingsModel): _layout = "expanded" - product_types: list[str] = SettingsField( - default_factory=list, title="Product types" + product_base_types: list[str] = SettingsField( + default_factory=list, + title="Product base types", + ) + product_types: list[str] = SettingsField( + default_factory=list, + title="Product types", + ) + host_names: list[str] = SettingsField( + default_factory=list, + title="Host names", ) - hosts: list[str] = SettingsField(default_factory=list, title="Hosts") task_types: list[str] = SettingsField( default_factory=list, title="Task types", - enum_resolver=task_types_enum + enum_resolver=task_types_enum, + ) + task_names: list[str] = SettingsField( + default_factory=list, + title="Task names", + ) + template: str = SettingsField( + "", + title="Template", + regex=r"^[<>{}\[\]a-zA-Z0-9_.]+$", ) - tasks: list[str] = SettingsField(default_factory=list, title="Task names") - template: str = SettingsField("", title="Template") class FilterCreatorProfile(BaseSettingsModel): @@ -341,6 +356,27 @@ class CustomStagingDirProfileModel(BaseSettingsModel): ) +class DiscoverValidationModel(BaseSettingsModel): + """Strictly validate publish plugins discovery. + + Artist won't be able to publish if path to publish plugin fails to be + imported. + + """ + _isGroup = True + enabled: bool = SettingsField( + False, + description="Enable strict mode of plugins discovery", + ) + ignore_paths: list[str] = SettingsField( + default_factory=list, + title="Ignored paths (regex)", + description=( + "Paths that do match regex will be skipped in validation." + ), + ) + + class PublishToolModel(BaseSettingsModel): template_name_profiles: list[PublishTemplateNameProfile] = SettingsField( default_factory=list, @@ -358,6 +394,10 @@ class PublishToolModel(BaseSettingsModel): title="Custom Staging Dir Profiles" ) ) + discover_validation: DiscoverValidationModel = SettingsField( + default_factory=DiscoverValidationModel, + title="Validate plugins discovery", + ) comment_minimum_required_chars: int = SettingsField( 0, title="Publish comment minimum required characters", @@ -432,108 +472,118 @@ DEFAULT_TOOLS_VALUES = { ], "product_name_profiles": [ { + "product_base_types": [], "product_types": [], - "hosts": [], + "host_names": [], "task_types": [], - "tasks": [], + "task_names": [], "template": "{product[type]}{variant}" }, { - "product_types": [ + "product_base_types": [ "workfile" ], - "hosts": [], + "product_types": [], + "host_names": [], "task_types": [], - "tasks": [], + "task_names": [], "template": "{product[type]}{Task[name]}" }, { - "product_types": [ + "product_base_types": [ "render" ], - "hosts": [], + "product_types": [], + "host_names": [], "task_types": [], - "tasks": [], + "task_names": [], "template": "{product[type]}{Task[name]}{Variant}<_{Aov}>" }, { - "product_types": [ + "product_base_types": [ "renderLayer", "renderPass" ], - "hosts": [ + "product_types": [], + "host_names": [ "tvpaint" ], "task_types": [], - "tasks": [], + "task_names": [], "template": ( "{product[type]}{Task[name]}_{Renderlayer}_{Renderpass}" ) }, { - "product_types": [ + "product_base_types": [ "review", "workfile" ], - "hosts": [ + "product_types": [], + "host_names": [ "aftereffects", "tvpaint" ], "task_types": [], - "tasks": [], + "task_names": [], "template": "{product[type]}{Task[name]}" }, { - "product_types": ["render"], - "hosts": [ + "product_base_types": ["render"], + "product_types": [], + "host_names": [ "aftereffects" ], "task_types": [], - "tasks": [], + "task_names": [], "template": "{product[type]}{Task[name]}{Composition}{Variant}" }, { - "product_types": [ + "product_base_types": [ "staticMesh" ], - "hosts": [ + "product_types": [], + "host_names": [ "maya" ], "task_types": [], - "tasks": [], + "task_names": [], "template": "S_{folder[name]}{variant}" }, { - "product_types": [ + "product_base_types": [ "skeletalMesh" ], - "hosts": [ + "product_types": [], + "host_names": [ "maya" ], "task_types": [], - "tasks": [], + "task_names": [], "template": "SK_{folder[name]}{variant}" }, { - "product_types": [ + "product_base_types": [ "hda" ], - "hosts": [ + "product_types": [], + "host_names": [ "houdini" ], "task_types": [], - "tasks": [], + "task_names": [], "template": "{folder[name]}_{variant}" }, { - "product_types": [ + "product_base_types": [ "textureSet" ], - "hosts": [ + "product_types": [], + "host_names": [ "substancedesigner" ], "task_types": [], - "tasks": [], + "task_names": [], "template": "T_{folder[name]}{variant}" } ], @@ -680,6 +730,10 @@ DEFAULT_TOOLS_VALUES = { "template_name": "simpleUnrealTextureHero" } ], + "discover_validation": { + "enabled": False, + "ignore_paths": [], + }, "comment_minimum_required_chars": 0, } } diff --git a/tests/client/ayon_core/lib/test_transcoding.py b/tests/client/ayon_core/lib/test_transcoding.py new file mode 100644 index 0000000000..b9959e2958 --- /dev/null +++ b/tests/client/ayon_core/lib/test_transcoding.py @@ -0,0 +1,158 @@ +import unittest + +from ayon_core.lib.transcoding import ( + get_review_info_by_layer_name +) + + +class GetReviewInfoByLayerName(unittest.TestCase): + """Test responses from `get_review_info_by_layer_name`""" + def test_rgba_channels(self): + + # RGB is supported + info = get_review_info_by_layer_name(["R", "G", "B"]) + self.assertEqual(info, [{ + "name": "", + "review_channels": { + "R": "R", + "G": "G", + "B": "B", + "A": None, + } + }]) + + # rgb is supported + info = get_review_info_by_layer_name(["r", "g", "b"]) + self.assertEqual(info, [{ + "name": "", + "review_channels": { + "R": "r", + "G": "g", + "B": "b", + "A": None, + } + }]) + + # diffuse.[RGB] is supported + info = get_review_info_by_layer_name( + ["diffuse.R", "diffuse.G", "diffuse.B"] + ) + self.assertEqual(info, [{ + "name": "diffuse", + "review_channels": { + "R": "diffuse.R", + "G": "diffuse.G", + "B": "diffuse.B", + "A": None, + } + }]) + + info = get_review_info_by_layer_name(["R", "G", "B", "A"]) + self.assertEqual(info, [{ + "name": "", + "review_channels": { + "R": "R", + "G": "G", + "B": "B", + "A": "A", + } + }]) + + def test_z_channel(self): + + info = get_review_info_by_layer_name(["Z"]) + self.assertEqual(info, [{ + "name": "", + "review_channels": { + "R": "Z", + "G": "Z", + "B": "Z", + "A": None, + } + }]) + + info = get_review_info_by_layer_name(["Z", "A"]) + self.assertEqual(info, [{ + "name": "", + "review_channels": { + "R": "Z", + "G": "Z", + "B": "Z", + "A": "A", + } + }]) + + def test_ar_ag_ab_channels(self): + + info = get_review_info_by_layer_name(["AR", "AG", "AB"]) + self.assertEqual(info, [{ + "name": "", + "review_channels": { + "R": "AR", + "G": "AG", + "B": "AB", + "A": None, + } + }]) + + info = get_review_info_by_layer_name(["AR", "AG", "AB", "A"]) + self.assertEqual(info, [{ + "name": "", + "review_channels": { + "R": "AR", + "G": "AG", + "B": "AB", + "A": "A", + } + }]) + + def test_unknown_channels(self): + info = get_review_info_by_layer_name(["hello", "world"]) + self.assertEqual(info, []) + + def test_rgba_priority(self): + """Ensure main layer, and RGB channels are prioritized + + If both Z and RGB channels are present for a layer name, then RGB + should be prioritized and the Z channel should be ignored. + + Also, the alpha channel from another "layer name" is not used. Note + how the diffuse response does not take A channel from the main layer. + + """ + + info = get_review_info_by_layer_name([ + "Z", + "diffuse.R", "diffuse.G", "diffuse.B", + "R", "G", "B", "A", + "specular.R", "specular.G", "specular.B", "specular.A", + ]) + self.assertEqual(info, [ + { + "name": "", + "review_channels": { + "R": "R", + "G": "G", + "B": "B", + "A": "A", + }, + }, + { + "name": "diffuse", + "review_channels": { + "R": "diffuse.R", + "G": "diffuse.G", + "B": "diffuse.B", + "A": None, + }, + }, + { + "name": "specular", + "review_channels": { + "R": "specular.R", + "G": "specular.G", + "B": "specular.B", + "A": "specular.A", + }, + }, + ]) diff --git a/tests/client/ayon_core/pipeline/create/test_product_name.py b/tests/client/ayon_core/pipeline/create/test_product_name.py new file mode 100644 index 0000000000..7181e18b43 --- /dev/null +++ b/tests/client/ayon_core/pipeline/create/test_product_name.py @@ -0,0 +1,333 @@ +"""Tests for product_name helpers.""" +import pytest +from unittest.mock import patch + +from ayon_core.pipeline.create.product_name import ( + get_product_name_template, + get_product_name, +) +from ayon_core.pipeline.create.constants import DEFAULT_PRODUCT_TEMPLATE +from ayon_core.pipeline.create.exceptions import ( + TaskNotSetError, + TemplateFillError, +) + + +class TestGetProductNameTemplate: + @patch("ayon_core.pipeline.create.product_name.get_project_settings") + @patch("ayon_core.pipeline.create.product_name.filter_profiles") + def test_matching_profile_with_replacements( + self, + mock_filter_profiles, + mock_get_settings, + ): + """Matching profile applies legacy replacement tokens.""" + mock_get_settings.return_value = { + "core": {"tools": {"creator": {"product_name_profiles": []}}} + } + # The function should replace {task}/{family}/{asset} variants + mock_filter_profiles.return_value = { + "template": ("{task}-{Task}-{TASK}-{family}-{Family}" + "-{FAMILY}-{asset}-{Asset}-{ASSET}") + } + + result = get_product_name_template( + project_name="proj", + product_type="model", + task_name="modeling", + task_type="Modeling", + host_name="maya", + ) + assert result == ( + "{task[name]}-{Task[name]}-{TASK[NAME]}-" + "{product[type]}-{Product[type]}-{PRODUCT[TYPE]}-" + "{folder[name]}-{Folder[name]}-{FOLDER[NAME]}" + ) + + @patch("ayon_core.pipeline.create.product_name.get_project_settings") + @patch("ayon_core.pipeline.create.product_name.filter_profiles") + def test_no_matching_profile_uses_default( + self, + mock_filter_profiles, + mock_get_settings, + ): + mock_get_settings.return_value = { + "core": {"tools": {"creator": {"product_name_profiles": []}}} + } + mock_filter_profiles.return_value = None + + assert ( + get_product_name_template( + project_name="proj", + product_type="model", + task_name="modeling", + task_type="Modeling", + host_name="maya", + ) + == DEFAULT_PRODUCT_TEMPLATE + ) + + @patch("ayon_core.pipeline.create.product_name.get_project_settings") + @patch("ayon_core.pipeline.create.product_name.filter_profiles") + def test_custom_default_template_used( + self, + mock_filter_profiles, + mock_get_settings, + ): + mock_get_settings.return_value = { + "core": {"tools": {"creator": {"product_name_profiles": []}}} + } + mock_filter_profiles.return_value = None + + custom_default = "{variant}_{family}" + assert ( + get_product_name_template( + project_name="proj", + product_type="model", + task_name="modeling", + task_type="Modeling", + host_name="maya", + default_template=custom_default, + ) + == custom_default + ) + + @patch("ayon_core.pipeline.create.product_name.get_project_settings") + @patch("ayon_core.pipeline.create.product_name.filter_profiles") + def test_product_base_type_added_to_filtering_when_provided( + self, + mock_filter_profiles, + mock_get_settings, + ): + mock_get_settings.return_value = { + "core": {"tools": {"creator": {"product_name_profiles": []}}} + } + mock_filter_profiles.return_value = None + + get_product_name_template( + project_name="proj", + product_type="model", + task_name="modeling", + task_type="Modeling", + host_name="maya", + product_base_type="asset", + ) + args, kwargs = mock_filter_profiles.call_args + # args[1] is filtering_criteria + assert args[1]["product_base_types"] == "asset" + + +class TestGetProductName: + @patch("ayon_core.pipeline.create.product_name.get_product_name_template") + @patch("ayon_core.pipeline.create.product_name." + "StringTemplate.format_strict_template") + @patch("ayon_core.pipeline.create.product_name.prepare_template_data") + def test_empty_product_type_returns_empty( + self, mock_prepare, mock_format, mock_get_tmpl + ): + assert ( + get_product_name( + project_name="proj", + task_name="modeling", + task_type="Modeling", + host_name="maya", + product_type="", + variant="Main", + ) + == "" + ) + mock_get_tmpl.assert_not_called() + mock_format.assert_not_called() + mock_prepare.assert_not_called() + + @patch("ayon_core.pipeline.create.product_name.get_product_name_template") + @patch("ayon_core.pipeline.create.product_name." + "StringTemplate.format_strict_template") + @patch("ayon_core.pipeline.create.product_name.prepare_template_data") + def test_happy_path( + self, mock_prepare, mock_format, mock_get_tmpl + ): + mock_get_tmpl.return_value = "{task[name]}_{product[type]}_{variant}" + mock_prepare.return_value = { + "task": {"name": "modeling"}, + "product": {"type": "model"}, + "variant": "Main", + "family": "model", + } + mock_format.return_value = "modeling_model_Main" + + result = get_product_name( + project_name="proj", + task_name="modeling", + task_type="Modeling", + host_name="maya", + product_type="model", + variant="Main", + ) + assert result == "modeling_model_Main" + mock_get_tmpl.assert_called_once() + mock_prepare.assert_called_once() + mock_format.assert_called_once() + + @patch("ayon_core.pipeline.create.product_name.get_product_name_template") + @patch("ayon_core.pipeline.create.product_name." + "StringTemplate.format_strict_template") + @patch("ayon_core.pipeline.create.product_name.prepare_template_data") + def test_product_name_with_base_type( + self, mock_prepare, mock_format, mock_get_tmpl + ): + mock_get_tmpl.return_value = ( + "{task[name]}_{product[basetype]}_{variant}" + ) + mock_prepare.return_value = { + "task": {"name": "modeling"}, + "product": {"type": "model"}, + "variant": "Main", + "family": "model", + } + mock_format.return_value = "modeling_modelBase_Main" + + result = get_product_name( + project_name="proj", + task_name="modeling", + task_type="Modeling", + host_name="maya", + product_type="model", + product_base_type="modelBase", + variant="Main", + ) + assert result == "modeling_modelBase_Main" + mock_get_tmpl.assert_called_once() + mock_prepare.assert_called_once() + mock_format.assert_called_once() + + @patch("ayon_core.pipeline.create.product_name.get_product_name_template") + def test_task_required_but_missing_raises(self, mock_get_tmpl): + mock_get_tmpl.return_value = "{task[name]}_{variant}" + with pytest.raises(TaskNotSetError): + get_product_name( + project_name="proj", + task_name="", + task_type="Modeling", + host_name="maya", + product_type="model", + variant="Main", + ) + + @patch("ayon_core.pipeline.create.product_name.get_product_name_template") + @patch("ayon_core.pipeline.create.product_name.ayon_api.get_project") + @patch("ayon_core.pipeline.create.product_name.StringTemplate." + "format_strict_template") + @patch("ayon_core.pipeline.create.product_name.prepare_template_data") + def test_task_short_name_is_used( + self, mock_prepare, mock_format, mock_get_project, mock_get_tmpl + ): + mock_get_tmpl.return_value = "{task[short]}_{variant}" + mock_get_project.return_value = { + "taskTypes": [{"name": "Modeling", "shortName": "mdl"}] + } + mock_prepare.return_value = { + "task": { + "short": "mdl" + }, + "variant": "Main" + } + mock_format.return_value = "mdl_Main" + + result = get_product_name( + project_name="proj", + task_name="modeling", + task_type="Modeling", + host_name="maya", + product_type="model", + variant="Main", + ) + assert result == "mdl_Main" + + @patch("ayon_core.pipeline.create.product_name.get_product_name_template") + @patch("ayon_core.pipeline.create.product_name.StringTemplate." + "format_strict_template") + @patch("ayon_core.pipeline.create.product_name.prepare_template_data") + def test_template_fill_error_translated( + self, mock_prepare, mock_format, mock_get_tmpl + ): + mock_get_tmpl.return_value = "{missing_key}_{variant}" + mock_prepare.return_value = {"variant": "Main"} + mock_format.side_effect = KeyError("missing_key") + with pytest.raises(TemplateFillError): + get_product_name( + project_name="proj", + task_name="modeling", + task_type="Modeling", + host_name="maya", + product_type="model", + variant="Main", + ) + + @patch("ayon_core.pipeline.create.product_name.warn") + @patch("ayon_core.pipeline.create.product_name.get_product_name_template") + @patch("ayon_core.pipeline.create.product_name." + "StringTemplate.format_strict_template") + @patch("ayon_core.pipeline.create.product_name.prepare_template_data") + def test_warns_when_template_needs_base_type_but_missing( + self, + mock_prepare, + mock_format, + mock_get_tmpl, + mock_warn, + ): + mock_get_tmpl.return_value = "{product[basetype]}_{variant}" + + mock_prepare.return_value = { + "product": {"type": "model"}, + "variant": "Main", + "family": "model", + } + mock_format.return_value = "asset_Main" + + _ = get_product_name( + project_name="proj", + task_name="modeling", + task_type="Modeling", + host_name="maya", + product_type="model", + variant="Main", + ) + mock_warn.assert_called_once() + + @patch("ayon_core.pipeline.create.product_name.get_product_name_template") + @patch("ayon_core.pipeline.create.product_name." + "StringTemplate.format_strict_template") + @patch("ayon_core.pipeline.create.product_name.prepare_template_data") + def test_dynamic_data_overrides_defaults( + self, mock_prepare, mock_format, mock_get_tmpl + ): + mock_get_tmpl.return_value = "{custom}_{variant}" + mock_prepare.return_value = {"custom": "overridden", "variant": "Main"} + mock_format.return_value = "overridden_Main" + + result = get_product_name( + project_name="proj", + task_name="modeling", + task_type="Modeling", + host_name="maya", + product_type="model", + variant="Main", + dynamic_data={"custom": "overridden"}, + ) + assert result == "overridden_Main" + + @patch("ayon_core.pipeline.create.product_name.get_product_name_template") + def test_product_type_filter_is_used(self, mock_get_tmpl): + mock_get_tmpl.return_value = DEFAULT_PRODUCT_TEMPLATE + _ = get_product_name( + project_name="proj", + task_name="modeling", + task_type="Modeling", + host_name="maya", + product_type="model", + variant="Main", + product_type_filter="look", + ) + args, kwargs = mock_get_tmpl.call_args + assert kwargs["product_type"] == "look" diff --git a/tests/client/ayon_core/pipeline/editorial/test_extract_otio_review.py b/tests/client/ayon_core/pipeline/editorial/test_extract_otio_review.py index 6a74df7f43..ed441edc63 100644 --- a/tests/client/ayon_core/pipeline/editorial/test_extract_otio_review.py +++ b/tests/client/ayon_core/pipeline/editorial/test_extract_otio_review.py @@ -246,75 +246,75 @@ def test_multiple_review_clips_no_gap(): expected = [ # 10 head black frames generated from gap (991-1000) '/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi' - ' -i color=c=black:s=1280x720 -tune ' + ' -i color=c=black:s=1920x1080 -tune ' 'stillimage -start_number 991 -pix_fmt rgba C:/result/output.%04d.png', # Alternance 25fps tiff sequence and 24fps exr sequence # for 100 frames each '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' f'C:\\no_tc{os.sep}output.%04d.tif ' - '-vf scale=1280:720:flags=lanczos -compression_level 5 ' + '-vf scale=1920:1080:flags=lanczos -compression_level 5 ' '-start_number 1001 -pix_fmt rgba C:/result/output.%04d.png', '/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i ' f'C:\\with_tc{os.sep}output.%04d.exr ' - '-vf scale=1280:720:flags=lanczos -compression_level 5 ' + '-vf scale=1920:1080:flags=lanczos -compression_level 5 ' '-start_number 1102 -pix_fmt rgba C:/result/output.%04d.png', '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' f'C:\\no_tc{os.sep}output.%04d.tif ' - '-vf scale=1280:720:flags=lanczos -compression_level 5 ' + '-vf scale=1920:1080:flags=lanczos -compression_level 5 ' '-start_number 1198 -pix_fmt rgba C:/result/output.%04d.png', '/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i ' f'C:\\with_tc{os.sep}output.%04d.exr ' - '-vf scale=1280:720:flags=lanczos -compression_level 5 ' + '-vf scale=1920:1080:flags=lanczos -compression_level 5 ' '-start_number 1299 -pix_fmt rgba C:/result/output.%04d.png', # Repeated 25fps tiff sequence multiple times till the end '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' f'C:\\no_tc{os.sep}output.%04d.tif ' - '-vf scale=1280:720:flags=lanczos -compression_level 5 ' + '-vf scale=1920:1080:flags=lanczos -compression_level 5 ' '-start_number 1395 -pix_fmt rgba C:/result/output.%04d.png', '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' f'C:\\no_tc{os.sep}output.%04d.tif ' - '-vf scale=1280:720:flags=lanczos -compression_level 5 ' + '-vf scale=1920:1080:flags=lanczos -compression_level 5 ' '-start_number 1496 -pix_fmt rgba C:/result/output.%04d.png', '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' f'C:\\no_tc{os.sep}output.%04d.tif ' - '-vf scale=1280:720:flags=lanczos -compression_level 5 ' + '-vf scale=1920:1080:flags=lanczos -compression_level 5 ' '-start_number 1597 -pix_fmt rgba C:/result/output.%04d.png', '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' f'C:\\no_tc{os.sep}output.%04d.tif ' - '-vf scale=1280:720:flags=lanczos -compression_level 5 ' + '-vf scale=1920:1080:flags=lanczos -compression_level 5 ' '-start_number 1698 -pix_fmt rgba C:/result/output.%04d.png', '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' f'C:\\no_tc{os.sep}output.%04d.tif ' - '-vf scale=1280:720:flags=lanczos -compression_level 5 ' + '-vf scale=1920:1080:flags=lanczos -compression_level 5 ' '-start_number 1799 -pix_fmt rgba C:/result/output.%04d.png', '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' f'C:\\no_tc{os.sep}output.%04d.tif ' - '-vf scale=1280:720:flags=lanczos -compression_level 5 ' + '-vf scale=1920:1080:flags=lanczos -compression_level 5 ' '-start_number 1900 -pix_fmt rgba C:/result/output.%04d.png', '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' f'C:\\no_tc{os.sep}output.%04d.tif ' - '-vf scale=1280:720:flags=lanczos -compression_level 5 ' + '-vf scale=1920:1080:flags=lanczos -compression_level 5 ' '-start_number 2001 -pix_fmt rgba C:/result/output.%04d.png', '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' f'C:\\no_tc{os.sep}output.%04d.tif ' - '-vf scale=1280:720:flags=lanczos -compression_level 5 ' + '-vf scale=1920:1080:flags=lanczos -compression_level 5 ' '-start_number 2102 -pix_fmt rgba C:/result/output.%04d.png', '/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i ' f'C:\\no_tc{os.sep}output.%04d.tif ' - '-vf scale=1280:720:flags=lanczos -compression_level 5 ' + '-vf scale=1920:1080:flags=lanczos -compression_level 5 ' '-start_number 2203 -pix_fmt rgba C:/result/output.%04d.png' ] @@ -348,12 +348,12 @@ def test_multiple_review_clips_with_gap(): '/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i ' f'C:\\with_tc{os.sep}output.%04d.exr ' - '-vf scale=1280:720:flags=lanczos -compression_level 5 ' + '-vf scale=1920:1080:flags=lanczos -compression_level 5 ' '-start_number 1003 -pix_fmt rgba C:/result/output.%04d.png', '/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i ' f'C:\\with_tc{os.sep}output.%04d.exr ' - '-vf scale=1280:720:flags=lanczos -compression_level 5 ' + '-vf scale=1920:1080:flags=lanczos -compression_level 5 ' '-start_number 1091 -pix_fmt rgba C:/result/output.%04d.png' ]