Merge branch 'develop' into bugfix/1245-default-render-product-name-profile-is-not-unique-for-each-aov

This commit is contained in:
Mustafa Zaky Jafar 2025-06-25 14:24:46 +03:00 committed by GitHub
commit 9438c5bb3f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
153 changed files with 12757 additions and 2190 deletions

View file

@ -35,6 +35,20 @@ body:
label: Version
description: What version are you running? Look to AYON Tray
options:
- 1.3.2
- 1.3.1
- 1.3.0
- 1.2.0
- 1.1.9
- 1.1.8
- 1.1.7
- 1.1.6
- 1.1.5
- 1.1.4
- 1.1.3
- 1.1.2
- 1.1.1
- 1.1.0
- 1.0.14
- 1.0.13
- 1.0.12

View file

@ -1,10 +1,11 @@
name: 🐞 Update Bug Report
on:
workflow_run:
workflows: ["🚀 Release Trigger"]
types:
- completed
workflow_dispatch:
release:
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#release
types: [published]
jobs:
update-bug-report:

1
.gitignore vendored
View file

@ -82,6 +82,7 @@ poetry.lock
.editorconfig
.pre-commit-config.yaml
mypy.ini
poetry.lock
.github_changelog_generator

View file

@ -1,42 +1,38 @@
# -*- coding: utf-8 -*-
"""Addons for AYON."""
from . import click_wrap
from .interfaces import (
IPluginPaths,
ITrayAddon,
ITrayAction,
ITrayService,
IHostAddon,
)
from .base import (
ProcessPreparationError,
ProcessContext,
AYONAddon,
AddonsManager,
AYONAddon,
ProcessContext,
ProcessPreparationError,
load_addons,
)
from .interfaces import (
IHostAddon,
IPluginPaths,
ITraits,
ITrayAction,
ITrayAddon,
ITrayService,
)
from .utils import (
ensure_addons_are_process_context_ready,
ensure_addons_are_process_ready,
)
__all__ = (
"click_wrap",
"IPluginPaths",
"ITrayAddon",
"ITrayAction",
"ITrayService",
"IHostAddon",
"ProcessPreparationError",
"ProcessContext",
"AYONAddon",
"AddonsManager",
"load_addons",
"IHostAddon",
"IPluginPaths",
"ITraits",
"ITrayAction",
"ITrayAddon",
"ITrayService",
"ProcessContext",
"ProcessPreparationError",
"click_wrap",
"ensure_addons_are_process_context_ready",
"ensure_addons_are_process_ready",
"load_addons",
)

View file

@ -1,16 +1,27 @@
"""Addon interfaces for AYON."""
from __future__ import annotations
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Callable, Optional, Type
from ayon_core import resources
if TYPE_CHECKING:
from qtpy import QtWidgets
from ayon_core.addon.base import AddonsManager
from ayon_core.pipeline.traits import TraitBase
from ayon_core.tools.tray.ui.tray import TrayManager
class _AYONInterfaceMeta(ABCMeta):
"""AYONInterface meta class to print proper string."""
"""AYONInterface metaclass to print proper string."""
def __str__(self):
return "<'AYONInterface.{}'>".format(self.__name__)
def __str__(cls):
return f"<'AYONInterface.{cls.__name__}'>"
def __repr__(self):
return str(self)
def __repr__(cls):
return str(cls)
class AYONInterface(metaclass=_AYONInterfaceMeta):
@ -24,7 +35,7 @@ class AYONInterface(metaclass=_AYONInterfaceMeta):
in the interface. By default, interface does not have any abstract parts.
"""
pass
log = None
class IPluginPaths(AYONInterface):
@ -38,10 +49,25 @@ class IPluginPaths(AYONInterface):
"""
@abstractmethod
def get_plugin_paths(self):
pass
def get_plugin_paths(self) -> dict[str, list[str]]:
"""Return plugin paths for addon.
def _get_plugin_paths_by_type(self, plugin_type):
Returns:
dict[str, list[str]]: Plugin paths for addon.
"""
def _get_plugin_paths_by_type(
self, plugin_type: str) -> list[str]:
"""Get plugin paths by type.
Args:
plugin_type (str): Type of plugin paths to get.
Returns:
list[str]: List of plugin paths.
"""
paths = self.get_plugin_paths()
if not paths or plugin_type not in paths:
return []
@ -54,14 +80,18 @@ class IPluginPaths(AYONInterface):
paths = [paths]
return paths
def get_launcher_action_paths(self):
def get_launcher_action_paths(self) -> list[str]:
"""Receive launcher actions paths.
Give addons ability to add launcher actions paths.
Returns:
list[str]: List of launcher action paths.
"""
return self._get_plugin_paths_by_type("actions")
def get_create_plugin_paths(self, host_name):
def get_create_plugin_paths(self, host_name: str) -> list[str]:
"""Receive create plugin paths.
Give addons ability to add create plugin paths based on host name.
@ -72,11 +102,14 @@ class IPluginPaths(AYONInterface):
Args:
host_name (str): For which host are the plugins meant.
"""
Returns:
list[str]: List of create plugin paths.
"""
return self._get_plugin_paths_by_type("create")
def get_load_plugin_paths(self, host_name):
def get_load_plugin_paths(self, host_name: str) -> list[str]:
"""Receive load plugin paths.
Give addons ability to add load plugin paths based on host name.
@ -87,11 +120,14 @@ class IPluginPaths(AYONInterface):
Args:
host_name (str): For which host are the plugins meant.
"""
Returns:
list[str]: List of load plugin paths.
"""
return self._get_plugin_paths_by_type("load")
def get_publish_plugin_paths(self, host_name):
def get_publish_plugin_paths(self, host_name: str) -> list[str]:
"""Receive publish plugin paths.
Give addons ability to add publish plugin paths based on host name.
@ -102,11 +138,14 @@ class IPluginPaths(AYONInterface):
Args:
host_name (str): For which host are the plugins meant.
"""
Returns:
list[str]: List of publish plugin paths.
"""
return self._get_plugin_paths_by_type("publish")
def get_inventory_action_paths(self, host_name):
def get_inventory_action_paths(self, host_name: str) -> list[str]:
"""Receive inventory action paths.
Give addons ability to add inventory action plugin paths.
@ -117,77 +156,84 @@ class IPluginPaths(AYONInterface):
Args:
host_name (str): For which host are the plugins meant.
"""
Returns:
list[str]: List of inventory action plugin paths.
"""
return self._get_plugin_paths_by_type("inventory")
class ITrayAddon(AYONInterface):
"""Addon has special procedures when used in Tray tool.
IMPORTANT:
The addon. still must be usable if is not used in tray even if
would do nothing.
"""
Important:
The addon. still must be usable if is not used in tray even if it
would do nothing.
"""
manager: AddonsManager
tray_initialized = False
_tray_manager = None
_tray_manager: TrayManager = None
_admin_submenu = None
@abstractmethod
def tray_init(self):
def tray_init(self) -> None:
"""Initialization part of tray implementation.
Triggered between `initialization` and `connect_with_addons`.
This is where GUIs should be loaded or tray specific parts should be
prepared.
prepared
"""
pass
@abstractmethod
def tray_menu(self, tray_menu):
def tray_menu(self, tray_menu: QtWidgets.QMenu) -> None:
"""Add addon's action to tray menu."""
pass
@abstractmethod
def tray_start(self):
def tray_start(self) -> None:
"""Start procedure in tray tool."""
pass
@abstractmethod
def tray_exit(self):
def tray_exit(self) -> None:
"""Cleanup method which is executed on tray shutdown.
This is place where all threads should be shut.
"""
pass
def execute_in_main_thread(self, callback: Callable) -> None:
"""Pushes callback to the queue or process 'callback' on a main thread.
def execute_in_main_thread(self, callback):
""" Pushes callback to the queue or process 'callback' on a main thread
Some callbacks need to be processed on main thread (menu actions
must be added on main thread else they won't get triggered etc.)
Args:
callback (Callable): Function to be executed on main thread
Some callbacks need to be processed on main thread (menu actions
must be added on main thread or they won't get triggered etc.)
"""
if not self.tray_initialized:
# TODO Called without initialized tray, still main thread needed
# TODO (Illicit): Called without initialized tray, still
# main thread needed.
try:
callback()
except Exception:
except Exception: # noqa: BLE001
self.log.warning(
"Failed to execute {} in main thread".format(callback),
exc_info=True)
"Failed to execute %s callback in main thread",
str(callback), exc_info=True)
return
self.manager.tray_manager.execute_in_main_thread(callback)
self._tray_manager.tray_manager.execute_in_main_thread(callback)
def show_tray_message(self, title, message, icon=None, msecs=None):
def show_tray_message(
self,
title: str,
message: str,
icon: Optional[QtWidgets.QSystemTrayIcon] = None,
msecs: Optional[int] = None) -> None:
"""Show tray message.
Args:
@ -198,16 +244,22 @@ class ITrayAddon(AYONInterface):
msecs (int): Duration of message visibility in milliseconds.
Default is 10000 msecs, may differ by Qt version.
"""
if self._tray_manager:
self._tray_manager.show_tray_message(title, message, icon, msecs)
def add_doubleclick_callback(self, callback):
def add_doubleclick_callback(self, callback: Callable) -> None:
"""Add callback to be triggered on tray icon double click."""
if hasattr(self.manager, "add_doubleclick_callback"):
self.manager.add_doubleclick_callback(self, callback)
@staticmethod
def admin_submenu(tray_menu):
def admin_submenu(tray_menu: QtWidgets.QMenu) -> QtWidgets.QMenu:
"""Get or create admin submenu.
Returns:
QtWidgets.QMenu: Admin submenu.
"""
if ITrayAddon._admin_submenu is None:
from qtpy import QtWidgets
@ -217,7 +269,18 @@ class ITrayAddon(AYONInterface):
return ITrayAddon._admin_submenu
@staticmethod
def add_action_to_admin_submenu(label, tray_menu):
def add_action_to_admin_submenu(
label: str, tray_menu: QtWidgets.QMenu) -> QtWidgets.QAction:
"""Add action to admin submenu.
Args:
label (str): Label of action.
tray_menu (QtWidgets.QMenu): Tray menu to add action to.
Returns:
QtWidgets.QAction: Action added to admin submenu
"""
from qtpy import QtWidgets
menu = ITrayAddon.admin_submenu(tray_menu)
@ -244,16 +307,15 @@ class ITrayAction(ITrayAddon):
@property
@abstractmethod
def label(self):
def label(self) -> str:
"""Service label showed in menu."""
pass
@abstractmethod
def on_action_trigger(self):
def on_action_trigger(self) -> None:
"""What happens on actions click."""
pass
def tray_menu(self, tray_menu):
def tray_menu(self, tray_menu: QtWidgets.QMenu) -> None:
"""Add action to tray menu."""
from qtpy import QtWidgets
if self.admin_action:
@ -265,36 +327,44 @@ class ITrayAction(ITrayAddon):
action.triggered.connect(self.on_action_trigger)
self._action_item = action
def tray_start(self):
def tray_start(self) -> None: # noqa: PLR6301
"""Start procedure in tray tool."""
return
def tray_exit(self):
def tray_exit(self) -> None: # noqa: PLR6301
"""Cleanup method which is executed on tray shutdown."""
return
class ITrayService(ITrayAddon):
"""Tray service Interface."""
# Module's property
menu_action = None
menu_action: QtWidgets.QAction = None
# Class properties
_services_submenu = None
_icon_failed = None
_icon_running = None
_icon_idle = None
_services_submenu: QtWidgets.QMenu = None
_icon_failed: QtWidgets.QIcon = None
_icon_running: QtWidgets.QIcon = None
_icon_idle: QtWidgets.QIcon = None
@property
@abstractmethod
def label(self):
def label(self) -> str:
"""Service label showed in menu."""
pass
# TODO be able to get any sort of information to show/print
# TODO (Illicit): be able to get any sort of information to show/print
# @abstractmethod
# def get_service_info(self):
# pass
@staticmethod
def services_submenu(tray_menu):
def services_submenu(tray_menu: QtWidgets.QMenu) -> QtWidgets.QMenu:
"""Get or create services submenu.
Returns:
QtWidgets.QMenu: Services submenu.
"""
if ITrayService._services_submenu is None:
from qtpy import QtWidgets
@ -304,13 +374,15 @@ class ITrayService(ITrayAddon):
return ITrayService._services_submenu
@staticmethod
def add_service_action(action):
def add_service_action(action: QtWidgets.QAction) -> None:
"""Add service action to services submenu."""
ITrayService._services_submenu.addAction(action)
if not ITrayService._services_submenu.menuAction().isVisible():
ITrayService._services_submenu.menuAction().setVisible(True)
@staticmethod
def _load_service_icons():
def _load_service_icons() -> None:
"""Load service icons."""
from qtpy import QtGui
ITrayService._failed_icon = QtGui.QIcon(
@ -324,24 +396,43 @@ class ITrayService(ITrayAddon):
)
@staticmethod
def get_icon_running():
def get_icon_running() -> QtWidgets.QIcon:
"""Get running icon.
Returns:
QtWidgets.QIcon: Returns "running" icon.
"""
if ITrayService._icon_running is None:
ITrayService._load_service_icons()
return ITrayService._icon_running
@staticmethod
def get_icon_idle():
def get_icon_idle() -> QtWidgets.QIcon:
"""Get idle icon.
Returns:
QtWidgets.QIcon: Returns "idle" icon.
"""
if ITrayService._icon_idle is None:
ITrayService._load_service_icons()
return ITrayService._icon_idle
@staticmethod
def get_icon_failed():
if ITrayService._failed_icon is None:
ITrayService._load_service_icons()
return ITrayService._failed_icon
def get_icon_failed() -> QtWidgets.QIcon:
"""Get failed icon.
def tray_menu(self, tray_menu):
Returns:
QtWidgets.QIcon: Returns "failed" icon.
"""
if ITrayService._icon_failed is None:
ITrayService._load_service_icons()
return ITrayService._icon_failed
def tray_menu(self, tray_menu: QtWidgets.QMenu) -> None:
"""Add service to tray menu."""
from qtpy import QtWidgets
action = QtWidgets.QAction(
@ -354,21 +445,18 @@ class ITrayService(ITrayAddon):
self.set_service_running_icon()
def set_service_running_icon(self):
def set_service_running_icon(self) -> None:
"""Change icon of an QAction to green circle."""
if self.menu_action:
self.menu_action.setIcon(self.get_icon_running())
def set_service_failed_icon(self):
def set_service_failed_icon(self) -> None:
"""Change icon of an QAction to red circle."""
if self.menu_action:
self.menu_action.setIcon(self.get_icon_failed())
def set_service_idle_icon(self):
def set_service_idle_icon(self) -> None:
"""Change icon of an QAction to orange circle."""
if self.menu_action:
self.menu_action.setIcon(self.get_icon_idle())
@ -378,18 +466,29 @@ class IHostAddon(AYONInterface):
@property
@abstractmethod
def host_name(self):
def host_name(self) -> str:
"""Name of host which addon represents."""
pass
def get_workfile_extensions(self):
def get_workfile_extensions(self) -> list[str]: # noqa: PLR6301
"""Define workfile extensions for host.
Not all hosts support workfiles thus this is optional implementation.
Returns:
List[str]: Extensions used for workfiles with dot.
"""
"""
return []
class ITraits(AYONInterface):
"""Interface for traits."""
@abstractmethod
def get_addon_traits(self) -> list[Type[TraitBase]]:
"""Get trait classes for the addon.
Returns:
list[Type[TraitBase]]: Traits for the addon.
"""

View file

@ -37,7 +37,7 @@ def _handle_error(
if process_context.headless:
if detail:
print(detail)
print(f"{10*'*'}\n{message}\n{10*'*'}")
print(f"{10 * '*'}\n{message}\n{10 * '*'}")
return
current_dir = os.path.dirname(os.path.abspath(__file__))

View file

@ -24,7 +24,6 @@ from ayon_core.lib.env_tools import (
)
@click.group(invoke_without_command=True)
@click.pass_context
@click.option("--use-staging", is_flag=True,
@ -173,7 +172,6 @@ def contextselection(
main(output_path, project, folder, strict)
@main_cli.command(
context_settings=dict(
ignore_unknown_options=True,
@ -237,6 +235,30 @@ def version(build):
print(os.environ["AYON_VERSION"])
@main_cli.command()
@click.option(
"--project",
type=str,
help="Project name",
required=True)
def create_project_structure(
project,
):
"""Create project folder structure as defined in setting
`ayon+settings://core/project_folder_structure`
Args:
project (str): The name of the project for which you
want to create its additional folder structure.
"""
from ayon_core.pipeline.project_folders import create_project_folders
print(f">>> Creating project folder structure for project '{project}'.")
create_project_folders(project)
def _set_global_environments() -> None:
"""Set global AYON environments."""
# First resolve general environment

View file

@ -32,8 +32,8 @@ class GlobalHostDataHook(PreLaunchHook):
"app": app,
"project_entity": self.data["project_entity"],
"folder_entity": self.data["folder_entity"],
"task_entity": self.data["task_entity"],
"folder_entity": self.data.get("folder_entity"),
"task_entity": self.data.get("task_entity"),
"anatomy": self.data["anatomy"],

View file

@ -29,6 +29,15 @@ class OCIOEnvHook(PreLaunchHook):
def execute(self):
"""Hook entry method."""
task_entity = self.data.get("task_entity")
if not task_entity:
self.log.info(
"Skipping OCIO Environment preparation."
"Task Entity is not available."
)
return
folder_entity = self.data["folder_entity"]
template_data = get_template_data(

View file

@ -0,0 +1,30 @@
""""Pre launch hook to remove launcher paths from the system."""
import os
from ayon_applications import PreLaunchHook
class PreRemoveLauncherPaths(PreLaunchHook):
"""Remove launcher paths from the system.
This hook is used to remove launcher paths from the system before launching
an application. It is used to ensure that the application is launched with
the correct environment variables. Especially for Windows, where
paths in `PATH` are used to load DLLs. This is important to avoid
conflicts with other applications that may have the same DLLs in their
paths.
"""
order = 1
def execute(self) -> None:
"""Execute the hook."""
# Remove launcher paths from the system
ayon_root = os.path.normpath(os.environ["AYON_ROOT"])
paths = [
path
for path in self.launch_context.env.get(
"PATH", "").split(os.pathsep)
if not os.path.normpath(path).startswith(ayon_root)
]
self.launch_context.env["PATH"] = os.pathsep.join(paths)

View file

@ -62,6 +62,7 @@ from .execute import (
run_subprocess,
run_detached_process,
run_ayon_launcher_process,
run_detached_ayon_launcher_process,
path_to_subprocess_arg,
CREATE_NO_WINDOW
)
@ -98,7 +99,6 @@ from .profiles_filtering import (
from .transcoding import (
get_transcode_temp_directory,
should_convert_for_ffmpeg,
convert_for_ffmpeg,
convert_input_paths_for_ffmpeg,
get_ffprobe_data,
get_ffprobe_streams,
@ -132,6 +132,7 @@ from .ayon_info import (
is_staging_enabled,
is_dev_mode_enabled,
is_in_tests,
get_settings_variant,
)
terminal = Terminal
@ -161,6 +162,7 @@ __all__ = [
"run_subprocess",
"run_detached_process",
"run_ayon_launcher_process",
"run_detached_ayon_launcher_process",
"path_to_subprocess_arg",
"CREATE_NO_WINDOW",
@ -198,7 +200,6 @@ __all__ = [
"get_transcode_temp_directory",
"should_convert_for_ffmpeg",
"convert_for_ffmpeg",
"convert_input_paths_for_ffmpeg",
"get_ffprobe_data",
"get_ffprobe_streams",
@ -242,4 +243,5 @@ __all__ = [
"is_staging_enabled",
"is_dev_mode_enabled",
"is_in_tests",
"get_settings_variant",
]

View file

@ -22,12 +22,10 @@ import clique
if typing.TYPE_CHECKING:
from typing import Self, Tuple, Union, TypedDict, Pattern
class EnumItemDict(TypedDict):
label: str
value: Any
EnumItemsInputType = Union[
Dict[Any, str],
List[Tuple[Any, str]],
@ -35,7 +33,6 @@ if typing.TYPE_CHECKING:
List[EnumItemDict]
]
class FileDefItemDict(TypedDict):
directory: str
filenames: List[str]
@ -289,6 +286,7 @@ AttrDefType = TypeVar("AttrDefType", bound=AbstractAttrDef)
# UI attribute definitions won't hold value
# -----------------------------------------
class UIDef(AbstractAttrDef):
is_value_def = False

View file

@ -177,10 +177,12 @@ def initialize_ayon_connection(force=False):
return _new_get_last_versions(
con, *args, **kwargs
)
def _lv_by_pi_wrapper(*args, **kwargs):
return _new_get_last_version_by_product_id(
con, *args, **kwargs
)
def _lv_by_pn_wrapper(*args, **kwargs):
return _new_get_last_version_by_product_name(
con, *args, **kwargs

View file

@ -78,15 +78,15 @@ def is_using_ayon_console():
return "ayon_console" in executable_filename
def is_headless_mode_enabled():
def is_headless_mode_enabled() -> bool:
return os.getenv("AYON_HEADLESS_MODE") == "1"
def is_staging_enabled():
def is_staging_enabled() -> bool:
return os.getenv("AYON_USE_STAGING") == "1"
def is_in_tests():
def is_in_tests() -> bool:
"""Process is running in automatic tests mode.
Returns:
@ -96,7 +96,7 @@ def is_in_tests():
return os.environ.get("AYON_IN_TESTS") == "1"
def is_dev_mode_enabled():
def is_dev_mode_enabled() -> bool:
"""Dev mode is enabled in AYON.
Returns:
@ -106,6 +106,22 @@ def is_dev_mode_enabled():
return os.getenv("AYON_USE_DEV") == "1"
def get_settings_variant() -> str:
"""Get AYON settings variant.
Returns:
str: Settings variant.
"""
if is_dev_mode_enabled():
return os.environ["AYON_BUNDLE_NAME"]
if is_staging_enabled():
return "staging"
return "production"
def get_ayon_info():
executable_args = get_ayon_launcher_args()
if is_running_from_build():

View file

@ -1,3 +1,4 @@
from __future__ import annotations
import os
import sys
import subprocess
@ -201,29 +202,9 @@ def clean_envs_for_ayon_process(env=None):
return env
def run_ayon_launcher_process(*args, add_sys_paths=False, **kwargs):
"""Execute AYON process with passed arguments and wait.
Wrapper for 'run_process' which prepends AYON executable arguments
before passed arguments and define environments if are not passed.
Values from 'os.environ' are used for environments if are not passed.
They are cleaned using 'clean_envs_for_ayon_process' function.
Example:
```
run_ayon_process("run", "<path to .py script>")
```
Args:
*args (str): ayon-launcher cli arguments.
**kwargs (Any): Keyword arguments for subprocess.Popen.
Returns:
str: Full output of subprocess concatenated stdout and stderr.
"""
args = get_ayon_launcher_args(*args)
def _prepare_ayon_launcher_env(
add_sys_paths: bool, kwargs: dict
) -> dict[str, str]:
env = kwargs.pop("env", None)
# Keep env untouched if are passed and not empty
if not env:
@ -239,8 +220,7 @@ def run_ayon_launcher_process(*args, add_sys_paths=False, **kwargs):
new_pythonpath.append(path)
lookup_set.add(path)
env["PYTHONPATH"] = os.pathsep.join(new_pythonpath)
return run_subprocess(args, env=env, **kwargs)
return env
def run_detached_process(args, **kwargs):
@ -314,6 +294,67 @@ def run_detached_process(args, **kwargs):
return process
def run_ayon_launcher_process(
*args, add_sys_paths: bool = False, **kwargs
) -> str:
"""Execute AYON process with passed arguments and wait.
Wrapper for 'run_process' which prepends AYON executable arguments
before passed arguments and define environments if are not passed.
Values from 'os.environ' are used for environments if are not passed.
They are cleaned using 'clean_envs_for_ayon_process' function.
Example:
```
run_ayon_launcher_process("run", "<path to .py script>")
```
Args:
*args (str): ayon-launcher cli arguments.
add_sys_paths (bool): Add system paths to PYTHONPATH.
**kwargs (Any): Keyword arguments for subprocess.Popen.
Returns:
str: Full output of subprocess concatenated stdout and stderr.
"""
args = get_ayon_launcher_args(*args)
env = _prepare_ayon_launcher_env(add_sys_paths, kwargs)
return run_subprocess(args, env=env, **kwargs)
def run_detached_ayon_launcher_process(
*args, add_sys_paths: bool = False, **kwargs
) -> subprocess.Popen:
"""Execute AYON process with passed arguments and wait.
Wrapper for 'run_process' which prepends AYON executable arguments
before passed arguments and define environments if are not passed.
Values from 'os.environ' are used for environments if are not passed.
They are cleaned using 'clean_envs_for_ayon_process' function.
Example:
```
run_detached_ayon_launcher_process("run", "<path to .py script>")
```
Args:
*args (str): ayon-launcher cli arguments.
add_sys_paths (bool): Add system paths to PYTHONPATH.
**kwargs (Any): Keyword arguments for subprocess.Popen.
Returns:
subprocess.Popen: Pointer to launched process but it is possible that
launched process is already killed (on linux).
"""
args = get_ayon_launcher_args(*args)
env = _prepare_ayon_launcher_env(add_sys_paths, kwargs)
return run_detached_process(args, env=env, **kwargs)
def path_to_subprocess_arg(path):
"""Prepare path for subprocess arguments.

View file

@ -1,15 +1,13 @@
import concurrent.futures
import os
import logging
import sys
import errno
from concurrent.futures import ThreadPoolExecutor, Future
from typing import List, Optional
from ayon_core.lib import create_hard_link
# this is needed until speedcopy for linux is fixed
if sys.platform == "win32":
from speedcopy import copyfile
else:
from shutil import copyfile
from speedcopy import copyfile
class DuplicateDestinationError(ValueError):
@ -109,41 +107,52 @@ class FileTransaction:
self._transfers[dst] = (src, opts)
def process(self):
# Backup any existing files
for dst, (src, _) in self._transfers.items():
self.log.debug("Checking file ... {} -> {}".format(src, dst))
path_same = self._same_paths(src, dst)
if path_same or not os.path.exists(dst):
continue
with ThreadPoolExecutor(max_workers=8) as executor:
# Submit backup tasks
backup_futures = [
executor.submit(self._backup_file, dst, src)
for dst, (src, _) in self._transfers.items()
]
wait_for_future_errors(
executor, backup_futures, logger=self.log)
# Backup original file
# todo: add timestamp or uuid to ensure unique
backup = dst + ".bak"
self._backup_to_original[backup] = dst
# Submit transfer tasks
transfer_futures = [
executor.submit(self._transfer_file, dst, src, opts)
for dst, (src, opts) in self._transfers.items()
]
wait_for_future_errors(
executor, transfer_futures, logger=self.log)
def _backup_file(self, dst, src):
self.log.debug(f"Checking file ... {src} -> {dst}")
path_same = self._same_paths(src, dst)
if path_same or not os.path.exists(dst):
return
# Backup original file
backup = dst + ".bak"
self._backup_to_original[backup] = dst
self.log.debug(f"Backup existing file: {dst} -> {backup}")
os.rename(dst, backup)
def _transfer_file(self, dst, src, opts):
path_same = self._same_paths(src, dst)
if path_same:
self.log.debug(
"Backup existing file: {} -> {}".format(dst, backup))
os.rename(dst, backup)
f"Source and destination are same files {src} -> {dst}")
return
# Copy the files to transfer
for dst, (src, opts) in self._transfers.items():
path_same = self._same_paths(src, dst)
if path_same:
self.log.debug(
"Source and destination are same files {} -> {}".format(
src, dst))
continue
self._create_folder_for_file(dst)
self._create_folder_for_file(dst)
if opts["mode"] == self.MODE_COPY:
self.log.debug(f"Copying file ... {src} -> {dst}")
copyfile(src, dst)
elif opts["mode"] == self.MODE_HARDLINK:
self.log.debug(f"Hardlinking file ... {src} -> {dst}")
create_hard_link(src, dst)
if opts["mode"] == self.MODE_COPY:
self.log.debug("Copying file ... {} -> {}".format(src, dst))
copyfile(src, dst)
elif opts["mode"] == self.MODE_HARDLINK:
self.log.debug("Hardlinking file ... {} -> {}".format(
src, dst))
create_hard_link(src, dst)
self._transferred.append(dst)
self._transferred.append(dst)
def finalize(self):
# Delete any backed up files
@ -212,3 +221,46 @@ class FileTransaction:
return os.stat(src) == os.stat(dst)
return src == dst
def wait_for_future_errors(
executor: ThreadPoolExecutor,
futures: List[Future],
logger: Optional[logging.Logger] = None):
"""For the ThreadPoolExecutor shutdown and cancel futures as soon one of
the workers raises an error as they complete.
The ThreadPoolExecutor only cancels pending futures on exception but will
still complete those that are running - each which also themselves could
fail. We log all exceptions but re-raise the last exception only.
"""
if logger is None:
logger = logging.getLogger(__name__)
for future in concurrent.futures.as_completed(futures):
exception = future.exception()
if exception:
# As soon as an error occurs, stop executing more futures.
# Running workers, however, will still be complete, so we also want
# to log those errors if any occurred on them.
executor.shutdown(wait=True, cancel_futures=True)
break
else:
# Futures are completed, no exceptions occurred
return
# An exception occurred in at least one future. Get exceptions from
# all futures that are done and ended up failing until that point.
exceptions = []
for future in futures:
if not future.cancelled() and future.done():
exception = future.exception()
if exception:
exceptions.append(exception)
# Log any exceptions that occurred in all workers
for exception in exceptions:
logger.error("Error occurred in worker", exc_info=exception)
# Raise the last exception
raise exceptions[-1]

View file

@ -39,6 +39,7 @@ class Terminal:
"""
from ayon_core.lib import env_value_to_bool
log_no_colors = env_value_to_bool(
"AYON_LOG_NO_COLORS", default=None
)

View file

@ -526,137 +526,6 @@ def should_convert_for_ffmpeg(src_filepath):
return False
# Deprecated since 2022 4 20
# - Reason - Doesn't convert sequences right way: Can't handle gaps, reuse
# first frame for all frames and changes filenames when input
# is sequence.
# - use 'convert_input_paths_for_ffmpeg' instead
def convert_for_ffmpeg(
first_input_path,
output_dir,
input_frame_start=None,
input_frame_end=None,
logger=None
):
"""Convert source file to format supported in ffmpeg.
Currently can convert only exrs.
Args:
first_input_path (str): Path to first file of a sequence or a single
file path for non-sequential input.
output_dir (str): Path to directory where output will be rendered.
Must not be same as input's directory.
input_frame_start (int): Frame start of input.
input_frame_end (int): Frame end of input.
logger (logging.Logger): Logger used for logging.
Raises:
ValueError: If input filepath has extension not supported by function.
Currently is supported only ".exr" extension.
"""
if logger is None:
logger = logging.getLogger(__name__)
logger.warning((
"DEPRECATED: 'ayon_core.lib.transcoding.convert_for_ffmpeg' is"
" deprecated function of conversion for FFMpeg. Please replace usage"
" with 'ayon_core.lib.transcoding.convert_input_paths_for_ffmpeg'"
))
ext = os.path.splitext(first_input_path)[1].lower()
if ext != ".exr":
raise ValueError((
"Function 'convert_for_ffmpeg' currently support only"
" \".exr\" extension. Got \"{}\"."
).format(ext))
is_sequence = False
if input_frame_start is not None and input_frame_end is not None:
is_sequence = int(input_frame_end) != int(input_frame_start)
input_info = get_oiio_info_for_input(first_input_path, logger=logger)
# Change compression only if source compression is "dwaa" or "dwab"
# - they're not supported in ffmpeg
compression = input_info["attribs"].get("compression")
if compression in ("dwaa", "dwab"):
compression = "none"
# Prepare subprocess arguments
oiio_cmd = get_oiio_tool_args(
"oiiotool",
# Don't add any additional attributes
"--nosoftwareattrib",
)
# Add input compression if available
if compression:
oiio_cmd.extend(["--compression", compression])
# Collect channels to export
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
oiio_cmd.extend([
input_arg, first_input_path,
# Tell oiiotool which channels should be put to top stack (and output)
"--ch", channels_arg,
# Use first subimage
"--subimage", "0"
])
# Add frame definitions to arguments
if is_sequence:
oiio_cmd.extend([
"--frames", "{}-{}".format(input_frame_start, input_frame_end)
])
for attr_name, attr_value in input_info["attribs"].items():
if not isinstance(attr_value, str):
continue
# Remove attributes that have string value longer than allowed length
# for ffmpeg or when contain prohibited symbols
erase_reason = "Missing reason"
erase_attribute = False
if len(attr_value) > MAX_FFMPEG_STRING_LEN:
erase_reason = "has too long value ({} chars).".format(
len(attr_value)
)
erase_attribute = True
if not erase_attribute:
for char in NOT_ALLOWED_FFMPEG_CHARS:
if char in attr_value:
erase_attribute = True
erase_reason = (
"contains unsupported character \"{}\"."
).format(char)
break
if erase_attribute:
# Set attribute to empty string
logger.info((
"Removed attribute \"{}\" from metadata because {}."
).format(attr_name, erase_reason))
oiio_cmd.extend(["--eraseattrib", attr_name])
# Add last argument - path to output
if is_sequence:
ext = os.path.splitext(first_input_path)[1]
base_filename = "tmp.%{:0>2}d{}".format(
len(str(input_frame_end)), ext
)
else:
base_filename = os.path.basename(first_input_path)
output_path = os.path.join(output_dir, base_filename)
oiio_cmd.extend([
"-o", output_path
])
logger.debug("Conversion command: {}".format(" ".join(oiio_cmd)))
run_subprocess(oiio_cmd, logger=logger)
def convert_input_paths_for_ffmpeg(
input_paths,
output_dir,
@ -664,7 +533,7 @@ def convert_input_paths_for_ffmpeg(
):
"""Convert source file to format supported in ffmpeg.
Currently can convert only exrs. The input filepaths should be files
Can currently convert only EXRs. The input filepaths should be files
with same type. Information about input is loaded only from first found
file.
@ -691,10 +560,10 @@ def convert_input_paths_for_ffmpeg(
ext = os.path.splitext(first_input_path)[1].lower()
if ext != ".exr":
raise ValueError((
"Function 'convert_for_ffmpeg' currently support only"
" \".exr\" extension. Got \"{}\"."
).format(ext))
raise ValueError(
"Function 'convert_input_paths_for_ffmpeg' currently supports"
f" only \".exr\" extension. Got \"{ext}\"."
)
input_info = get_oiio_info_for_input(first_input_path, logger=logger)

View file

@ -162,7 +162,7 @@ def find_tool_in_custom_paths(paths, tool, validation_func=None):
# Handle cases when path is just an executable
# - it allows to use executable from PATH
# - basename must match 'tool' value (without extension)
extless_path, ext = os.path.splitext(path)
extless_path, _ext = os.path.splitext(path)
if extless_path == tool:
executable_path = find_executable(tool)
if executable_path and (
@ -181,7 +181,7 @@ def find_tool_in_custom_paths(paths, tool, validation_func=None):
# If path is a file validate it
if os.path.isfile(normalized):
basename, ext = os.path.splitext(os.path.basename(path))
basename, _ext = os.path.splitext(os.path.basename(path))
# Check if the filename has actually the sane bane as 'tool'
if basename == tool:
executable_path = find_executable(normalized)

View file

@ -100,6 +100,10 @@ from .context_tools import (
get_current_task_name
)
from .compatibility import (
is_product_base_type_supported,
)
from .workfile import (
discover_workfile_build_plugins,
register_workfile_build_plugin,
@ -223,4 +227,7 @@ __all__ = (
# Backwards compatible function names
"install",
"uninstall",
# Feature detection
"is_product_base_type_supported",
)

View file

@ -462,8 +462,8 @@ class Anatomy(BaseAnatomy):
Union[Dict[str, str], None]): Local root overrides.
"""
if not project_name:
return
return ayon_api.get_project_roots_for_site(
return None
return ayon_api.get_project_root_overrides_by_site_id(
project_name, get_local_site_id()
)

View file

@ -834,7 +834,7 @@ def _get_global_config_data(
if not product_entities_by_name:
# in case no product was found we need to use fallback
fallback_type = fallback_data["type"]
fallback_type = fallback_data["fallback_type"]
return _get_config_path_from_profile_data(
fallback_data, fallback_type, template_data
)

View file

@ -0,0 +1,16 @@
"""Package to handle compatibility checks for pipeline components."""
def is_product_base_type_supported() -> bool:
"""Check support for product base types.
This function checks if the current pipeline supports product base types.
Once this feature is implemented, it will return True. This should be used
in places where some kind of backward compatibility is needed to avoid
breaking existing functionality that relies on the current behavior.
Returns:
bool: True if product base types are supported, False otherwise.
"""
return False

View file

@ -872,7 +872,7 @@ class CreateContext:
"""
return self._event_hub.add_callback(INSTANCE_ADDED_TOPIC, callback)
def add_instances_removed_callback (self, callback):
def add_instances_removed_callback(self, callback):
"""Register callback for removed instances.
Event is triggered when instances are already removed from context.
@ -933,7 +933,7 @@ class CreateContext:
"""
self._event_hub.add_callback(VALUE_CHANGED_TOPIC, callback)
def add_pre_create_attr_defs_change_callback (self, callback):
def add_pre_create_attr_defs_change_callback(self, callback):
"""Register callback to listen pre-create attribute changes.
Create plugin can trigger refresh of pre-create attributes. Usage of
@ -961,7 +961,7 @@ class CreateContext:
PRE_CREATE_ATTR_DEFS_CHANGED_TOPIC, callback
)
def add_create_attr_defs_change_callback (self, callback):
def add_create_attr_defs_change_callback(self, callback):
"""Register callback to listen create attribute changes.
Create plugin changed attribute definitions of instance.
@ -986,7 +986,7 @@ class CreateContext:
"""
self._event_hub.add_callback(CREATE_ATTR_DEFS_CHANGED_TOPIC, callback)
def add_publish_attr_defs_change_callback (self, callback):
def add_publish_attr_defs_change_callback(self, callback):
"""Register callback to listen publish attribute changes.
Publish plugin changed attribute definitions of instance of context.

View file

@ -52,15 +52,15 @@ def get_product_name_template(
# TODO remove formatting keys replacement
template = (
matching_profile["template"]
.replace("{task[name]}", "{task}")
.replace("{Task[name]}", "{Task}")
.replace("{TASK[NAME]}", "{TASK}")
.replace("{product[type]}", "{family}")
.replace("{Product[type]}", "{Family}")
.replace("{PRODUCT[TYPE]}", "{FAMILY}")
.replace("{folder[name]}", "{asset}")
.replace("{Folder[name]}", "{Asset}")
.replace("{FOLDER[NAME]}", "{ASSET}")
.replace("{task}", "{task[name]}")
.replace("{Task}", "{Task[name]}")
.replace("{TASK}", "{TASK[NAME]}")
.replace("{family}", "{product[type]}")
.replace("{Family}", "{Product[type]}")
.replace("{FAMILY}", "{PRODUCT[TYPE]}")
.replace("{asset}", "{folder[name]}")
.replace("{Asset}", "{Folder[name]}")
.replace("{ASSET}", "{FOLDER[NAME]}")
)
# Make sure template is set (matching may have empty string)

View file

@ -369,7 +369,7 @@ class PublishAttributes:
return copy.deepcopy(self._origin_data)
def attribute_value_changed(self, key, changes):
self._parent.publish_attribute_value_changed(key, changes)
self._parent.publish_attribute_value_changed(key, changes)
def set_publish_plugin_attr_defs(
self,

View file

@ -255,7 +255,7 @@ def deliver_sequence(
report_items[""].append(msg)
return report_items, 0
dir_path, file_name = os.path.split(str(src_path))
dir_path, _file_name = os.path.split(str(src_path))
context = repre["context"]
ext = context.get("ext", context.get("representation"))
@ -270,7 +270,7 @@ def deliver_sequence(
# context.representation could be .psd
ext = ext.replace("..", ".")
src_collections, remainder = clique.assemble(os.listdir(dir_path))
src_collections, _remainder = clique.assemble(os.listdir(dir_path))
src_collection = None
for col in src_collections:
if col.tail != ext:

View file

@ -1,4 +1,4 @@
from __future__ import annotations
from __future__ import annotations
import copy
import os
import re
@ -660,14 +660,6 @@ def _get_legacy_product_name_and_group(
warnings.warn("Using legacy product name for renders",
DeprecationWarning)
if not source_product_name.startswith(product_type):
resulting_group_name = '{}{}{}{}{}'.format(
product_type,
task_name[0].upper(), task_name[1:],
source_product_name[0].upper(), source_product_name[1:])
else:
resulting_group_name = source_product_name
# create product name `<product type><Task><Product name>`
if not source_product_name.startswith(product_type):
resulting_group_name = '{}{}{}{}{}'.format(
@ -1168,7 +1160,7 @@ def prepare_cache_representations(skeleton_data, exp_files, anatomy):
"""
representations = []
collections, remainders = clique.assemble(exp_files)
collections, _remainders = clique.assemble(exp_files)
log = Logger.get_logger("farm_publishing")

View file

@ -49,6 +49,11 @@ from .plugins import (
deregister_loader_plugin_path,
register_loader_plugin_path,
deregister_loader_plugin,
register_loader_hook_plugin,
deregister_loader_hook_plugin,
register_loader_hook_plugin_path,
deregister_loader_hook_plugin_path,
)
@ -103,4 +108,10 @@ __all__ = (
"deregister_loader_plugin_path",
"register_loader_plugin_path",
"deregister_loader_plugin",
"register_loader_hook_plugin",
"deregister_loader_hook_plugin",
"register_loader_hook_plugin_path",
"deregister_loader_hook_plugin_path",
)

View file

@ -1,21 +1,28 @@
import os
import logging
"""Plugins for loading representations and products into host applications."""
from __future__ import annotations
from abc import abstractmethod
import logging
import os
from typing import Any, Optional, Type
from ayon_core.settings import get_project_settings
from ayon_core.pipeline.plugin_discover import (
deregister_plugin,
deregister_plugin_path,
discover,
register_plugin,
register_plugin_path,
deregister_plugin,
deregister_plugin_path
)
from ayon_core.settings import get_project_settings
from .utils import get_representation_path_from_context
class LoaderPlugin(list):
"""Load representation into host application"""
product_types = set()
product_types: set[str] = set()
product_base_types: Optional[set[str]] = None
representations = set()
extensions = {"*"}
order = 0
@ -58,12 +65,12 @@ class LoaderPlugin(list):
if not plugin_settings:
return
print(">>> We have preset for {}".format(plugin_name))
print(f">>> We have preset for {plugin_name}")
for option, value in plugin_settings.items():
if option == "enabled" and value is False:
print(" - is disabled by preset")
else:
print(" - setting `{}`: `{}`".format(option, value))
print(f" - setting `{option}`: `{value}`")
setattr(cls, option, value)
@classmethod
@ -76,7 +83,6 @@ class LoaderPlugin(list):
Returns:
bool: Representation has valid extension
"""
if "*" in cls.extensions:
return True
@ -121,18 +127,34 @@ class LoaderPlugin(list):
"""
plugin_repre_names = cls.get_representations()
plugin_product_types = cls.product_types
# If the product base type isn't defined on the loader plugin,
# then we will use the product types.
plugin_product_filter = cls.product_base_types
if plugin_product_filter is None:
plugin_product_filter = cls.product_types
if plugin_product_filter:
plugin_product_filter = set(plugin_product_filter)
repre_entity = context.get("representation")
product_entity = context["product"]
# If no representation names, product types or extensions are defined
# then loader is not compatible with any context.
if (
not plugin_repre_names
or not plugin_product_types
or not plugin_product_filter
or not cls.extensions
):
return False
repre_entity = context.get("representation")
# If no representation entity is provided then loader is not
# compatible with context.
if not repre_entity:
return False
# Check the compatibility with the representation names.
plugin_repre_names = set(plugin_repre_names)
if (
"*" not in plugin_repre_names
@ -140,17 +162,34 @@ class LoaderPlugin(list):
):
return False
# Check the compatibility with the extension of the representation.
if not cls.has_valid_extension(repre_entity):
return False
plugin_product_types = set(plugin_product_types)
if "*" in plugin_product_types:
product_type = product_entity.get("productType")
product_base_type = product_entity.get("productBaseType")
# Use product base type if defined, otherwise use product type.
product_filter = product_base_type
# If there is no product base type defined in the product entity,
# then we will use the product type.
if product_filter is None:
product_filter = product_type
# If wildcard is used in product types or base types,
# then we will consider the loader compatible with any product type.
if "*" in plugin_product_filter:
return True
product_entity = context["product"]
product_type = product_entity["productType"]
# compatibility with legacy loader
if cls.product_base_types is None and product_base_type:
cls.log.error(
f"Loader {cls.__name__} is doesn't specify "
"`product_base_types` but product entity has "
f"`productBaseType` defined as `{product_base_type}`. "
)
return product_type in plugin_product_types
return product_filter in plugin_product_filter
@classmethod
def get_representations(cls):
@ -205,35 +244,20 @@ class LoaderPlugin(list):
bool: Whether the container was deleted
"""
raise NotImplementedError("Loader.remove() must be "
"implemented by subclass")
@classmethod
def get_options(cls, contexts):
"""
Returns static (cls) options or could collect from 'contexts'.
"""Returns static (cls) options or could collect from 'contexts'.
Args:
contexts (list): of repre or product contexts
Returns:
(list)
Args:
contexts (list): of repre or product contexts
Returns:
(list)
"""
return cls.options or []
@property
def fname(self):
"""Backwards compatibility with deprecation warning"""
self.log.warning((
"DEPRECATION WARNING: Source - Loader plugin {}."
" The 'fname' property on the Loader plugin will be removed in"
" future versions of OpenPype. Planned version to drop the support"
" is 3.16.6 or 3.17.0."
).format(self.__class__.__name__))
if hasattr(self, "_fname"):
return self._fname
@classmethod
def get_representation_name_aliases(cls, representation_name: str):
"""Return representation names to which switching is allowed from
@ -264,28 +288,152 @@ class ProductLoaderPlugin(LoaderPlugin):
"""
class LoaderHookPlugin:
"""Plugin that runs before and post specific Loader in 'loaders'
Should be used as non-invasive method to enrich core loading process.
Any studio might want to modify loaded data before or after
they are loaded without need to override existing core plugins.
The post methods are called after the loader's methods and receive the
return value of the loader's method as `result` argument.
"""
order = 0
@classmethod
@abstractmethod
def is_compatible(cls, Loader: Type[LoaderPlugin]) -> bool:
pass
@abstractmethod
def pre_load(
self,
plugin: LoaderPlugin,
context: dict,
name: Optional[str],
namespace: Optional[str],
options: Optional[dict],
):
pass
@abstractmethod
def post_load(
self,
plugin: LoaderPlugin,
result: Any,
context: dict,
name: Optional[str],
namespace: Optional[str],
options: Optional[dict],
):
pass
@abstractmethod
def pre_update(
self,
plugin: LoaderPlugin,
container: dict, # (ayon:container-3.0)
context: dict,
):
pass
@abstractmethod
def post_update(
self,
plugin: LoaderPlugin,
result: Any,
container: dict, # (ayon:container-3.0)
context: dict,
):
pass
@abstractmethod
def pre_remove(
self,
plugin: LoaderPlugin,
container: dict, # (ayon:container-3.0)
):
pass
@abstractmethod
def post_remove(
self,
plugin: LoaderPlugin,
result: Any,
container: dict, # (ayon:container-3.0)
):
pass
def discover_loader_plugins(project_name=None):
from ayon_core.lib import Logger
from ayon_core.pipeline import get_current_project_name
log = Logger.get_logger("LoaderDiscover")
plugins = discover(LoaderPlugin)
if not project_name:
project_name = get_current_project_name()
project_settings = get_project_settings(project_name)
plugins = discover(LoaderPlugin)
hooks = discover(LoaderHookPlugin)
sorted_hooks = sorted(hooks, key=lambda hook: hook.order)
for plugin in plugins:
try:
plugin.apply_settings(project_settings)
except Exception:
log.warning(
"Failed to apply settings to loader {}".format(
plugin.__name__
),
f"Failed to apply settings to loader {plugin.__name__}",
exc_info=True
)
compatible_hooks = []
for hook_cls in sorted_hooks:
if hook_cls.is_compatible(plugin):
compatible_hooks.append(hook_cls)
add_hooks_to_loader(plugin, compatible_hooks)
return plugins
def add_hooks_to_loader(
loader_class: LoaderPlugin, compatible_hooks: list[Type[LoaderHookPlugin]]
) -> None:
"""Monkey patch method replacing Loader.load|update|remove methods
It wraps applicable loaders with pre/post hooks. Discovery is called only
once per loaders discovery.
"""
loader_class._load_hooks = compatible_hooks
def wrap_method(method_name: str):
original_method = getattr(loader_class, method_name)
def wrapped_method(self, *args, **kwargs):
# Call pre_<method_name> on all hooks
pre_hook_name = f"pre_{method_name}"
hooks: list[LoaderHookPlugin] = []
for cls in loader_class._load_hooks:
hook = cls() # Instantiate the hook
hooks.append(hook)
pre_hook = getattr(hook, pre_hook_name, None)
if callable(pre_hook):
pre_hook(self, *args, **kwargs)
# Call original method
result = original_method(self, *args, **kwargs)
# Call post_<method_name> on all hooks
post_hook_name = f"post_{method_name}"
for hook in hooks:
post_hook = getattr(hook, post_hook_name, None)
if callable(post_hook):
post_hook(self, result, *args, **kwargs)
return result
setattr(loader_class, method_name, wrapped_method)
for method in ("load", "update", "remove"):
if hasattr(loader_class, method):
wrap_method(method)
def register_loader_plugin(plugin):
return register_plugin(LoaderPlugin, plugin)
@ -300,3 +448,19 @@ def deregister_loader_plugin_path(path):
def register_loader_plugin_path(path):
return register_plugin_path(LoaderPlugin, path)
def register_loader_hook_plugin(plugin):
return register_plugin(LoaderHookPlugin, plugin)
def deregister_loader_hook_plugin(plugin):
deregister_plugin(LoaderHookPlugin, plugin)
def register_loader_hook_plugin_path(path):
return register_plugin_path(LoaderHookPlugin, path)
def deregister_loader_hook_plugin_path(path):
deregister_plugin_path(LoaderHookPlugin, path)

View file

@ -288,7 +288,12 @@ def get_representation_context(project_name, representation):
def load_with_repre_context(
Loader, repre_context, namespace=None, name=None, options=None, **kwargs
Loader,
repre_context,
namespace=None,
name=None,
options=None,
**kwargs
):
# Ensure the Loader is compatible for the representation
@ -316,17 +321,16 @@ def load_with_repre_context(
)
loader = Loader()
# Backwards compatibility: Originally the loader's __init__ required the
# representation context to set `fname` attribute to the filename to load
# Deprecated - to be removed in OpenPype 3.16.6 or 3.17.0.
loader._fname = get_representation_path_from_context(repre_context)
return loader.load(repre_context, name, namespace, options)
def load_with_product_context(
Loader, product_context, namespace=None, name=None, options=None, **kwargs
Loader,
product_context,
namespace=None,
name=None,
options=None,
**kwargs
):
# Ensure options is a dictionary when no explicit options provided
@ -349,7 +353,12 @@ def load_with_product_context(
def load_with_product_contexts(
Loader, product_contexts, namespace=None, name=None, options=None, **kwargs
Loader,
product_contexts,
namespace=None,
name=None,
options=None,
**kwargs
):
# Ensure options is a dictionary when no explicit options provided
@ -559,15 +568,20 @@ def update_container(container, version=-1):
return Loader().update(container, context)
def switch_container(container, representation, loader_plugin=None):
def switch_container(
container,
representation,
loader_plugin=None,
):
"""Switch a container to representation
Args:
container (dict): container information
representation (dict): representation entity
loader_plugin (LoaderPlugin)
Returns:
function call
return from function call
"""
from ayon_core.pipeline import get_current_project_name

View file

@ -46,6 +46,11 @@ from .lib import (
get_publish_instance_families,
main_cli_publish,
add_trait_representations,
get_trait_representations,
has_trait_representations,
set_trait_representations,
)
from .abstract_expected_files import ExpectedFiles
@ -104,4 +109,9 @@ __all__ = (
"RenderInstance",
"AbstractCollectRender",
"add_trait_representations",
"get_trait_representations",
"has_trait_representations",
"set_trait_representations",
)

View file

@ -6,7 +6,7 @@ import inspect
import copy
import warnings
import xml.etree.ElementTree
from typing import Optional, Union, List
from typing import TYPE_CHECKING, Optional, Union, List
import ayon_api
import pyblish.util
@ -27,6 +27,12 @@ from .constants import (
DEFAULT_HERO_PUBLISH_TEMPLATE,
)
if TYPE_CHECKING:
from ayon_core.pipeline.traits import Representation
TRAIT_INSTANCE_KEY: str = "representations_with_traits"
def get_template_name_profiles(
project_name, project_settings=None, logger=None
@ -1062,3 +1068,66 @@ def main_cli_publish(
sys.exit(1)
log.info("Publish finished.")
def has_trait_representations(
instance: pyblish.api.Instance) -> bool:
"""Check if instance has trait representation.
Args:
instance (pyblish.api.Instance): Instance to check.
Returns:
True: Instance has trait representation.
False: Instance does not have trait representation.
"""
return TRAIT_INSTANCE_KEY in instance.data
def add_trait_representations(
instance: pyblish.api.Instance,
representations: list[Representation]
) -> None:
"""Add trait representations to instance.
Args:
instance (pyblish.api.Instance): Instance to add trait
representations to.
representations (list[Representation]): List of representation
trait based representations to add.
"""
repres = instance.data.setdefault(TRAIT_INSTANCE_KEY, [])
repres.extend(representations)
def set_trait_representations(
instance: pyblish.api.Instance,
representations: list[Representation]
) -> None:
"""Set trait representations to instance.
Args:
instance (pyblish.api.Instance): Instance to set trait
representations to.
representations (list[Representation]): List of trait
based representations.
"""
instance.data[TRAIT_INSTANCE_KEY] = representations
def get_trait_representations(
instance: pyblish.api.Instance) -> list[Representation]:
"""Get trait representations from instance.
Args:
instance (pyblish.api.Instance): Instance to get trait
representations from.
Returns:
list[Representation]: List of representation names.
"""
return instance.data.get(TRAIT_INSTANCE_KEY, [])

View file

@ -41,7 +41,7 @@ def validate(data, schema=None):
if not _CACHED:
_precache()
root, schema = data["schema"].rsplit(":", 1)
_root, schema = data["schema"].rsplit(":", 1)
if isinstance(schema, str):
schema = _cache[schema + ".json"]

View file

@ -209,7 +209,7 @@ def get_staging_dir_info(
staging_dir_config = get_staging_dir_config(
project_entity["name"],
task_type,
task_name ,
task_name,
product_type,
product_name,
host_name,

View file

@ -0,0 +1,453 @@
# Representations and traits
## Introduction
The Representation is the lowest level entity, describing the concrete data chunk that
pipeline can act on. It can be a specific file or just a set of metadata. Idea is that one
product version can have multiple representations - **Image** product can be jpeg or tiff, both formats are representation of the same source.
### Brief look into the past (and current state)
So far, representation was defined as a dict-like structure:
```python
{
"name": "foo",
"ext": "exr",
"files": ["foo_001.exr", "foo_002.exr"],
"stagingDir": "/bar/dir"
}
```
This is minimal form, but it can have additional keys like `frameStart`, `fps`, `resolutionWidth`, and more. Thare is also `tags` key that can hold `review`, `thumbnail`, `delete`, `toScanline` and other tags that are controlling the processing.
This will be *"translated"* to the similar structure in the database:
```python
{
"name": "foo",
"version_id": "...",
"files": [
{
"id": ...,
"hash": ...,
"name": "foo_001.exr",
"path": "{root[work]}/bar/dir/foo_001.exr",
"size": 1234,
"hash_type": "...",
},
...
],
"attrib": {
"path": "root/bar/dir/foo_001.exr",
"template": "{root[work]}/{project[name]}...",
},
"data": {
"context": {
"ext": "exr",
"root": {...},
...
},
"active": True
...
}
```
There are also some assumptions and limitations - like that if `files` in the
representation are list they need to be sequence of files (it can't be a bunch of
unrelated files).
This system is very flexible in one way, but it lacks a few very important things:
- it is not clearly defined — you can add easily keys, values, tags but without
unforeseeable
consequences
- it cannot handle "bundles" — multiple files that need to be versioned together and
belong together
- it cannot describe important information that you can't get from the file itself, or
it is very expensive (like axis orientation and units from alembic files)
### New Representation model
The idea about a new representation model is about solving points mentioned
above and also adding some benefits, like consistent IDE hints, typing, built-in
validators and much more.
### Design
The new representation is "just" a dictionary of traits. Trait can be anything provided
it is based on `TraitBase`. It shouldn't really duplicate information that is
available at the moment of loading (or any usage) by other means. It should contain
information that couldn't be determined by the file, or the AYON context. Some of
those traits are aligned with [OpenAssetIO Media Creation](https://github.com/OpenAssetIO/OpenAssetIO-MediaCreation) with hopes of maintained compatibility (it
should be easy enough to convert between OpenAssetIO Traits and AYON Traits).
#### Details: Representation
`Representation` has methods to deal with adding, removing, getting
traits. It has all the usual stuff like `get_trait()`, `add_trait()`,
`remove_trait()`, etc. But it also has plural forms so you can get/set
several traits at the same time with `get_traits()` and so on.
`Representation` also behaves like dictionary. so you can access/set
traits in the same way as you would do with dict:
```python
# import Image trait
from ayon_core.pipeline.traits import Image, Tagged, Representation
# create new representation with name "foo" and add Image trait to it
rep = Representation(name="foo", traits=[Image()])
# you can add another trait like so
rep.add_trait(Tagged(tags=["tag"]))
# or you can
rep[Tagged.id] = Tagged(tags=["tag"])
# and getting them in analogous
image = rep.get_trait(Image)
# or
image = rep[Image.id]
```
> [!NOTE]
> Trait and their ids — every Trait has its id as a string with a
> version appended - so **Image** has `ayon.2d.Image.v1`. This is used on
> several places (you see its use above for indexing traits). When querying,
> you can also omit the version at the end, and it will try its best to find
> the latest possible version. More on that in [Traits]()
You can construct the `Representation` from dictionary (for example,
serialized as JSON) using `Representation.from_dict()`, or you can
serialize `Representation` to dict to store with `Representation.traits_as_dict()`.
Every time representation is created, a new id is generated. You can pass existing
id when creating the new representation instance.
##### Equality
Two Representations are equal if:
- their names are the same
- their IDs are the same
- they have the same traits
- the traits have the same values
##### Validation
Representation has `validate()` method that will run `validate()` on
all it's traits.
#### Details: Traits
As mentioned there are several traits defined directly in **ayon-core**. They are namespaced
to different packages based on their use:
| namespace | trait | description |
|-------------------|----------------------|----------------------------------------------------------------------------------------------------------|
| color | ColorManaged | hold color management information |
| content | MimeType | use MIME type (RFC 2046) to describe content (like image/jpeg) |
| | LocatableContent | describe some location (file or URI) |
| | FileLocation | path to file, with size and checksum |
| | FileLocations | list of `FileLocation` |
| | RootlessLocation | Path where root is replaced with AYON root token |
| | Compressed | describes compression (of file or other) |
| | Bundle | list of list of Traits - compound of inseparable "sub-representations" |
| | Fragment | compound type marking the representation as a part of larger group of representations |
| cryptography | DigitallySigned | Type traits marking data to be digitally signed |
| | PGPSigned | Representation is signed by [PGP](https://www.openpgp.org/) |
| lifecycle | Transient | Marks the representation to be temporary - not to be stored. |
| | Persistent | Representation should be integrated (stored). Opposite of Transient. |
| meta | Tagged | holds list of tag strings. |
| | TemplatePath | Template consisted of tokens/keys and data to be used to resolve the template into string |
| | Variant | Used to differentiate between data variants of the same output (mp4 as h.264 and h.265 for example) |
| | KeepOriginalLocation | Marks the representation to keep the original location of the file |
| | KeepOriginalName | Marks the representation to keep the original name of the file |
| | SourceApplication | Holds information about producing application, about it's version, variant and platform. |
| | IntendedUse | For specifying the intended use of the representation if it cannot be easily determined by other traits. |
| three dimensional | Spatial | Spatial information like up-axis, units and handedness. |
| | Geometry | Type trait to mark the representation as a geometry. |
| | Shader | Type trait to mark the representation as a Shader. |
| | Lighting | Type trait to mark the representation as Lighting. |
| | IESProfile | States that the representation is IES Profile. |
| time | FrameRanged | Contains start and end frame information with in and out. |
| | Handless | define additional frames at the end or beginning and if those frames are inclusive of the range or not. |
| | Sequence | Describes sequence of frames and how the frames are defined in that sequence. |
| | SMPTETimecode | Adds timecode information in SMPTE format. |
| | Static | Marks the content as not time-variant. |
| two dimensional | Image | Type traits of image. |
| | PixelBased | Defines resolution and pixel aspect for the image data. |
| | Planar | Whether pixel data is in planar configuration or packed. |
| | Deep | Image encodes deep pixel data. |
| | Overscan | holds overscan/underscan information (added pixels to bottom/sides). |
| | UDIM | Representation is UDIM tile set. |
Traits are Python data classes with optional
validation and helper methods. If they implement `TraitBase.validate(Representation)` method, they can validate against all other traits
in the representation if needed.
> [!NOTE]
> They could be easily converted to [Pydantic models](https://docs.pydantic.dev/latest/) but since this must run in diverse Python environments inside DCC, we cannot
> easily resolve pydantic-core dependency (as it is binary written in Rust).
> [!NOTE]
> Every trait has id, name and some human-readable description. Every trait
> also has `persistent` property that is by default set to True. This
> Controls whether this trait should be stored with the persistent representation
> or not. Useful for traits to be used just to control the publishing process.
## Examples
Create a simple image representation to be integrated by AYON:
```python
from pathlib import Path
from ayon_core.pipeline.traits import (
FileLocation,
Image,
PixelBased,
Persistent,
Representation,
Static,
TraitValidationError,
)
rep = Representation(name="reference image", traits=[
FileLocation(
file_path=Path("/foo/bar/baz.exr"),
file_size=1234,
file_hash="sha256:...",
),
Image(),
PixelBased(
display_window_width=1920,
display_window_height=1080,
pixel_aspect_ratio=1.0,
),
Persistent(),
Static()
])
# validate the representation
try:
rep.validate()
except TraitValidationError as e:
print(f"Representation {rep.name} is invalid: {e}")
```
To work with the resolution of such representation:
```python
try:
width = rep.get_trait(PixelBased).display_window_width
# or like this:
height = rep[PixelBased.id].display_window_height
except MissingTraitError:
print(f"resolution isn't set on {rep.name}")
```
Accessing non-existent traits will result in an exception. To test if
the representation has some specific trait, you can use `.contains_trait()` method.
You can also prepare the whole representation data as a dict and
create it from it:
```python
rep_dict = {
"ayon.content.FileLocation.v1": {
"file_path": Path("/path/to/file"),
"file_size": 1024,
"file_hash": None,
},
"ayon.two_dimensional.Image": {},
"ayon.two_dimensional.PixelBased": {
"display_window_width": 1920,
"display_window_height": 1080,
"pixel_aspect_ratio": 1.0,
},
"ayon.two_dimensional.Planar": {
"planar_configuration": "RGB",
}
}
rep = Representation.from_dict(name="image", rep_dict)
```
## Addon specific traits
Addon can define its own traits. To do so, it needs to implement `ITraits` interface:
```python
from ayon_core.pipeline.traits import TraitBase
from ayon_core.addon import (
AYONAddon,
ITraits,
)
class MyTraitFoo(TraitBase):
id = "myaddon.mytrait.foo.v1"
name = "My Trait Foo"
description = "This is my trait foo"
persistent = True
class MyTraitBar(TraitBase):
id = "myaddon.mytrait.bar.v1"
name = "My Trait Bar"
description = "This is my trait bar"
persistent = True
class MyAddon(AYONAddon, ITraits):
def __init__(self):
super().__init__()
def get_addon_traits(self):
return [
MyTraitFoo,
MyTraitBar,
]
```
## Usage in Loaders
In loaders, you can implement `is_compatible_loader()` method to check if the
representation is compatible with the loader. You can use `Representation.from_dict()` to
create the representation from the context. You can also use `Representation.contains_traits()`
to check if the representation contains the required traits. You can even check for specific
values in the traits.
You can use similar concepts directly in the `load()` method to get the traits. Here is
an example of how to use the traits in the hypothetical Maya loader:
```python
"""Alembic loader using traits."""
from __future__ import annotations
import json
from typing import Any, TypeVar, Type
from ayon_maya.api.plugin import MayaLoader
from ayon_core.pipeline.traits import (
FileLocation,
Spatial,
Representation,
TraitBase,
)
T = TypeVar("T", bound=TraitBase)
class AlembicTraitLoader(MayaLoader):
"""Alembic loader using traits."""
label = "Alembic Trait Loader"
...
required_traits: list[T] = [
FileLocation,
Spatial,
]
@staticmethod
def is_compatible_loader(context: dict[str, Any]) -> bool:
traits_raw = context["representation"].get("traits")
if not traits_raw:
return False
# construct Representation object from the context
representation = Representation.from_dict(
name=context["representation"]["name"],
representation_id=context["representation"]["id"],
trait_data=json.loads(traits_raw),
)
# check if the representation is compatible with this loader
if representation.contains_traits(AlembicTraitLoader.required_traits):
# you can also check for specific values in traits here
return True
return False
...
```
## Usage Publishing plugins
You can create the representations in the same way as mentioned in the examples above.
Straightforward way is to use `Representation` class and add the traits to it. Collect
traits in the list and then pass them to the `Representation` constructor. You should add
the new Representation to the instance data using `add_trait_representations()` function.
```python
class SomeExtractor(Extractor):
"""Some extractor."""
...
def extract(self, instance: Instance) -> None:
"""Extract the data."""
# get the path to the file
path = self.get_path(instance)
# create the representation
traits: list[TraitBase] = [
Geometry(),
MimeType(mime_type="application/abc"),
Persistent(),
Spatial(
up_axis=cmds.upAxis(q=True, axis=True),
meters_per_unit=maya_units_to_meters_per_unit(
instance.context.data["linearUnits"]),
handedness="right",
),
]
if instance.data.get("frameStart"):
traits.append(
FrameRanged(
frame_start=instance.data["frameStart"],
frame_end=instance.data["frameEnd"],
frames_per_second=instance.context.data["fps"],
)
)
representation = Representation(
name="alembic",
traits=[
FileLocation(
file_path=Path(path),
file_size=os.path.getsize(path),
file_hash=get_file_hash(Path(path))
),
*traits],
)
add_trait_representations(
instance,
[representation],
)
...
```
## Developer notes
Adding new trait-based representations in to the publishing Instance and working with them is using
a set of helper function defined in `ayon_core.pipeline.publish` module. These are:
* add_trait_representations
* get_trait_representations
* has_trait_representations
* set_trait_representations
And their main purpose is to handle the key under which the representation
is stored in the instance data. This is done to avoid name clashes with
other representations. The key is defined in the `AYON_PUBLISH_REPRESENTATION_KEY`.
It is strongly recommended to use those functions instead of
directly accessing the instance data. This is to ensure that the
code will work even if the key is changed in the future.

View file

@ -0,0 +1,112 @@
"""Trait classes for the pipeline."""
from .color import ColorManaged
from .content import (
Bundle,
Compressed,
FileLocation,
FileLocations,
Fragment,
LocatableContent,
MimeType,
RootlessLocation,
)
from .cryptography import DigitallySigned, PGPSigned
from .lifecycle import Persistent, Transient
from .meta import (
IntendedUse,
KeepOriginalLocation,
SourceApplication,
Tagged,
TemplatePath,
Variant,
)
from .representation import Representation
from .temporal import (
FrameRanged,
GapPolicy,
Handles,
Sequence,
SMPTETimecode,
Static,
)
from .three_dimensional import Geometry, IESProfile, Lighting, Shader, Spatial
from .trait import (
MissingTraitError,
TraitBase,
TraitValidationError,
)
from .two_dimensional import (
UDIM,
Deep,
Image,
Overscan,
PixelBased,
Planar,
)
from .utils import (
get_sequence_from_files,
)
__all__ = [ # noqa: RUF022
# base
"Representation",
"TraitBase",
"MissingTraitError",
"TraitValidationError",
# color
"ColorManaged",
# content
"Bundle",
"Compressed",
"FileLocation",
"FileLocations",
"Fragment",
"LocatableContent",
"MimeType",
"RootlessLocation",
# cryptography
"DigitallySigned",
"PGPSigned",
# life cycle
"Persistent",
"Transient",
# meta
"IntendedUse",
"KeepOriginalLocation",
"SourceApplication",
"Tagged",
"TemplatePath",
"Variant",
# temporal
"FrameRanged",
"GapPolicy",
"Handles",
"Sequence",
"SMPTETimecode",
"Static",
# three-dimensional
"Geometry",
"IESProfile",
"Lighting",
"Shader",
"Spatial",
# two-dimensional
"Compressed",
"Deep",
"Image",
"Overscan",
"PixelBased",
"Planar",
"UDIM",
# utils
"get_sequence_from_files",
]

View file

@ -0,0 +1,30 @@
"""Color-management-related traits."""
from __future__ import annotations
from dataclasses import dataclass
from typing import ClassVar, Optional
from .trait import TraitBase
@dataclass
class ColorManaged(TraitBase):
"""Color managed trait.
Holds color management information. Can be used with Image-related
traits to define color space and config.
Sync with OpenAssetIO MediaCreation Traits.
Attributes:
color_space (str): An OCIO colorspace name available
in the "current" OCIO context.
config (str): An OCIO config name defining color space.
"""
id: ClassVar[str] = "ayon.color.ColorManaged.v1"
name: ClassVar[str] = "ColorManaged"
color_space: str
description: ClassVar[str] = "Color Managed trait."
persistent: ClassVar[bool] = True
config: Optional[str] = None

View file

@ -0,0 +1,485 @@
"""Content traits for the pipeline."""
from __future__ import annotations
import contextlib
import re
from dataclasses import dataclass
# TCH003 is there because Path in TYPECHECKING will fail in tests
from pathlib import Path # noqa: TCH003
from typing import ClassVar, Generator, Optional
from .representation import Representation
from .temporal import FrameRanged, Handles, Sequence
from .trait import (
MissingTraitError,
TraitBase,
TraitValidationError,
)
from .two_dimensional import UDIM
from .utils import get_sequence_from_files
@dataclass
class MimeType(TraitBase):
"""MimeType trait model.
This model represents a mime type trait. For example, image/jpeg.
It is used to describe the type of content in a representation regardless
of the file extension.
For more information, see RFC 2046 and RFC 4288 (and related RFCs).
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with version
mime_type (str): Mime type like image/jpeg.
"""
name: ClassVar[str] = "MimeType"
description: ClassVar[str] = "MimeType Trait Model"
id: ClassVar[str] = "ayon.content.MimeType.v1"
persistent: ClassVar[bool] = True
mime_type: str
@dataclass
class LocatableContent(TraitBase):
"""LocatableContent trait model.
This model represents a locatable content trait. Locatable content
is content that has a location. It doesn't have to be a file - it could
be a URL or some other location.
Sync with OpenAssetIO MediaCreation Traits.
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with version
location (str): Location.
is_templated (Optional[bool]): Is the location templated?
Default is None.
"""
name: ClassVar[str] = "LocatableContent"
description: ClassVar[str] = "LocatableContent Trait Model"
id: ClassVar[str] = "ayon.content.LocatableContent.v1"
persistent: ClassVar[bool] = True
location: str
is_templated: Optional[bool] = None
@dataclass
class FileLocation(TraitBase):
"""FileLocation trait model.
This model represents a file path. It is a specialization of the
LocatableContent trait. It is adding optional file size and file hash
for easy access to file information.
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with version
file_path (str): File path.
file_size (Optional[int]): File size in bytes.
file_hash (Optional[str]): File hash.
"""
name: ClassVar[str] = "FileLocation"
description: ClassVar[str] = "FileLocation Trait Model"
id: ClassVar[str] = "ayon.content.FileLocation.v1"
persistent: ClassVar[bool] = True
file_path: Path
file_size: Optional[int] = None
file_hash: Optional[str] = None
@dataclass
class FileLocations(TraitBase):
"""FileLocation trait model.
This model represents a file path. It is a specialization of the
LocatableContent trait. It is adding optional file size and file hash
for easy access to file information.
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with version
file_paths (list of FileLocation): File locations.
"""
name: ClassVar[str] = "FileLocations"
description: ClassVar[str] = "FileLocations Trait Model"
id: ClassVar[str] = "ayon.content.FileLocations.v1"
persistent: ClassVar[bool] = True
file_paths: list[FileLocation]
def get_files(self) -> Generator[Path, None, None]:
"""Get all file paths from the trait.
This method will return all file paths from the trait.
Yields:
Path: List of file paths.
"""
for file_location in self.file_paths:
yield file_location.file_path
def get_file_location_for_frame(
self,
frame: int,
sequence_trait: Optional[Sequence] = None,
) -> Optional[FileLocation]:
"""Get a file location for a frame.
This method will return the file location for a given frame. If the
frame is not found in the file paths, it will return None.
Args:
frame (int): Frame to get the file location for.
sequence_trait (Sequence): Sequence trait to get the
frame range specs from.
Returns:
Optional[FileLocation]: File location for the frame.
"""
frame_regex = re.compile(r"\.(?P<index>(?P<padding>0*)\d+)\.\D+\d?$")
if sequence_trait and sequence_trait.frame_regex:
frame_regex = sequence_trait.get_frame_pattern()
for location in self.file_paths:
result = re.search(frame_regex, location.file_path.name)
if result:
frame_index = int(result.group("index"))
if frame_index == frame:
return location
return None
def validate_trait(self, representation: Representation) -> None:
"""Validate the trait.
This method validates the trait against others in the representation.
In particular, it checks that the sequence trait is present, and if
so, it will compare the frame range to the file paths.
Args:
representation (Representation): Representation to validate.
Raises:
TraitValidationError: If the trait is invalid within the
representation.
"""
super().validate_trait(representation)
if len(self.file_paths) == 0:
# If there are no file paths, we can't validate
msg = "No file locations defined (empty list)"
raise TraitValidationError(self.name, msg)
if representation.contains_trait(FrameRanged):
self._validate_frame_range(representation)
if not representation.contains_trait(Sequence) \
and not representation.contains_trait(UDIM):
# we have multiple files, but it is not a sequence
# or UDIM tile set what is it then? If the files are not related
# to each other, then this representation is invalid.
msg = (
"Multiple file locations defined, but no Sequence "
"or UDIM trait defined. If the files are not related to "
"each other, the representation is invalid."
)
raise TraitValidationError(self.name, msg)
def _validate_frame_range(self, representation: Representation) -> None:
"""Validate the frame range against the file paths.
If the representation contains a FrameRanged trait, this method will
validate the frame range against the file paths. If the frame range
does not match the file paths, the trait is invalid. It takes into
account the Handles and Sequence traits.
Args:
representation (Representation): Representation to validate.
Raises:
TraitValidationError: If the trait is invalid within the
representation.
"""
tmp_frame_ranged: FrameRanged = get_sequence_from_files(
[f.file_path for f in self.file_paths])
frames_from_spec: list[int] = []
with contextlib.suppress(MissingTraitError):
sequence: Sequence = representation.get_trait(Sequence)
frame_regex = sequence.get_frame_pattern()
if sequence.frame_spec:
frames_from_spec = sequence.get_frame_list(
self, frame_regex)
frame_start_with_handles, frame_end_with_handles = \
self._get_frame_info_with_handles(representation, frames_from_spec)
if frame_start_with_handles \
and tmp_frame_ranged.frame_start != frame_start_with_handles:
# If the detected frame range does not match the combined
# FrameRanged and Handles trait, the
# trait is invalid.
msg = (
f"Frame range defined by {self.name} "
f"({tmp_frame_ranged.frame_start}-"
f"{tmp_frame_ranged.frame_end}) "
"in files does not match "
"frame range "
f"({frame_start_with_handles}-"
f"{frame_end_with_handles}) defined in FrameRanged trait."
)
raise TraitValidationError(self.name, msg)
if frames_from_spec:
if len(frames_from_spec) != len(self.file_paths):
# If the number of file paths does not match the frame range,
# the trait is invalid
msg = (
f"Number of file locations ({len(self.file_paths)}) "
"does not match frame range defined by frame spec "
"on Sequence trait: "
f"({len(frames_from_spec)})"
)
raise TraitValidationError(self.name, msg)
# if there is a frame spec on the Sequence trait,
# we should not validate the frame range from the files.
# the rest is validated by Sequence validators.
return
length_with_handles: int = (
frame_end_with_handles - frame_start_with_handles + 1
)
if len(self.file_paths) != length_with_handles:
# If the number of file paths does not match the frame range,
# the trait is invalid
msg = (
f"Number of file locations ({len(self.file_paths)}) "
"does not match frame range "
f"({length_with_handles})"
)
raise TraitValidationError(self.name, msg)
frame_ranged: FrameRanged = representation.get_trait(FrameRanged)
if frame_start_with_handles != tmp_frame_ranged.frame_start or \
frame_end_with_handles != tmp_frame_ranged.frame_end:
# If the frame range does not match the FrameRanged trait, the
# trait is invalid. Note that we don't check the frame rate
# because it is not stored in the file paths and is not
# determined by `get_sequence_from_files`.
msg = (
"Frame range "
f"({frame_ranged.frame_start}-{frame_ranged.frame_end}) "
"in sequence trait does not match "
"frame range "
f"({tmp_frame_ranged.frame_start}-"
f"{tmp_frame_ranged.frame_end}) "
)
raise TraitValidationError(self.name, msg)
@staticmethod
def _get_frame_info_with_handles(
representation: Representation,
frames_from_spec: list[int]) -> tuple[int, int]:
"""Get the frame range with handles from the representation.
This will return frame start and frame end with handles calculated
in if there actually is the Handles trait in the representation.
Args:
representation (Representation): Representation to get the frame
range from.
frames_from_spec (list[int]): List of frames from the frame spec.
This list is modified in place to take into
account the handles.
Mutates:
frames_from_spec: List of frames from the frame spec.
Returns:
tuple[int, int]: Start and end frame with handles.
"""
frame_start = frame_end = 0
frame_start_handle = frame_end_handle = 0
# If there is no sequence trait, we can't validate it
if frames_from_spec and representation.contains_trait(FrameRanged):
# if there is no FrameRanged trait (but really there should be)
# we can use the frame range from the frame spec
frame_start = min(frames_from_spec)
frame_end = max(frames_from_spec)
# Handle the frame range
with contextlib.suppress(MissingTraitError):
frame_start = representation.get_trait(FrameRanged).frame_start
frame_end = representation.get_trait(FrameRanged).frame_end
# Handle the handles :P
with contextlib.suppress(MissingTraitError):
handles: Handles = representation.get_trait(Handles)
if not handles.inclusive:
# if handless are exclusive, we need to adjust the frame range
frame_start_handle = handles.frame_start_handle or 0
frame_end_handle = handles.frame_end_handle or 0
if frames_from_spec:
frames_from_spec.extend(
range(frame_start - frame_start_handle, frame_start)
)
frames_from_spec.extend(
range(frame_end + 1, frame_end_handle + frame_end + 1)
)
frame_start_with_handles = frame_start - frame_start_handle
frame_end_with_handles = frame_end + frame_end_handle
return frame_start_with_handles, frame_end_with_handles
@dataclass
class RootlessLocation(TraitBase):
"""RootlessLocation trait model.
RootlessLocation trait is a trait that represents a file path that is
without a specific root. To get the absolute path, the root needs to be
resolved by AYON. Rootless path can be used on multiple platforms.
Example::
RootlessLocation(
rootless_path="{root[work]}/project/asset/asset.jpg"
)
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with version
rootless_path (str): Rootless path.
"""
name: ClassVar[str] = "RootlessLocation"
description: ClassVar[str] = "RootlessLocation Trait Model"
id: ClassVar[str] = "ayon.content.RootlessLocation.v1"
persistent: ClassVar[bool] = True
rootless_path: str
@dataclass
class Compressed(TraitBase):
"""Compressed trait model.
This trait can hold information about compressed content. What type
of compression is used.
Example::
Compressed("gzip")
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with version
compression_type (str): Compression type.
"""
name: ClassVar[str] = "Compressed"
description: ClassVar[str] = "Compressed Trait"
id: ClassVar[str] = "ayon.content.Compressed.v1"
persistent: ClassVar[bool] = True
compression_type: str
@dataclass
class Bundle(TraitBase):
"""Bundle trait model.
This model list of independent Representation traits
that are bundled together. This is useful for representing
a collection of sub-entities that are part of a single
entity. You can easily reconstruct representations from
the bundle.
Example::
Bundle(
items=[
[
MimeType(mime_type="image/jpeg"),
FileLocation(file_path="/path/to/file.jpg")
],
[
MimeType(mime_type="image/png"),
FileLocation(file_path="/path/to/file.png")
]
]
)
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with version
items (list[list[TraitBase]]): List of representations.
"""
name: ClassVar[str] = "Bundle"
description: ClassVar[str] = "Bundle Trait"
id: ClassVar[str] = "ayon.content.Bundle.v1"
persistent: ClassVar[bool] = True
items: list[list[TraitBase]]
def to_representations(self) -> Generator[Representation]:
"""Convert a bundle to representations.
Yields:
Representation: Representation of the bundle.
"""
for idx, item in enumerate(self.items):
yield Representation(name=f"{self.name} {idx}", traits=item)
@dataclass
class Fragment(TraitBase):
"""Fragment trait model.
This model represents a fragment trait. A fragment is a part of
a larger entity that is represented by another representation.
Example::
main_representation = Representation(name="parent",
traits=[],
)
fragment_representation = Representation(
name="fragment",
traits=[
Fragment(parent=main_representation.id),
]
)
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be namespaced trait name with version
parent (str): Parent representation id.
"""
name: ClassVar[str] = "Fragment"
description: ClassVar[str] = "Fragment Trait"
id: ClassVar[str] = "ayon.content.Fragment.v1"
persistent: ClassVar[bool] = True
parent: str

View file

@ -0,0 +1,42 @@
"""Cryptography traits."""
from __future__ import annotations
from dataclasses import dataclass
from typing import ClassVar, Optional
from .trait import TraitBase
@dataclass
class DigitallySigned(TraitBase):
"""Digitally signed trait.
This type trait means that the data is digitally signed.
Attributes:
signature (str): Digital signature.
"""
id: ClassVar[str] = "ayon.cryptography.DigitallySigned.v1"
name: ClassVar[str] = "DigitallySigned"
description: ClassVar[str] = "Digitally signed trait."
persistent: ClassVar[bool] = True
@dataclass
class PGPSigned(DigitallySigned):
"""PGP signed trait.
This trait holds PGP (RFC-4880) signed data.
Attributes:
signed_data (str): Signed data.
clear_text (str): Clear text.
"""
id: ClassVar[str] = "ayon.cryptography.PGPSigned.v1"
name: ClassVar[str] = "PGPSigned"
description: ClassVar[str] = "PGP signed trait."
persistent: ClassVar[bool] = True
signed_data: str
clear_text: Optional[str] = None

View file

@ -0,0 +1,77 @@
"""Lifecycle traits."""
from dataclasses import dataclass
from typing import ClassVar
from .trait import TraitBase, TraitValidationError
@dataclass
class Transient(TraitBase):
"""Transient trait model.
Transient trait marks representation as transient. Such representations
are not persisted in the system.
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with the version
"""
name: ClassVar[str] = "Transient"
description: ClassVar[str] = "Transient Trait Model"
id: ClassVar[str] = "ayon.lifecycle.Transient.v1"
persistent: ClassVar[bool] = True # see note in Persistent
def validate_trait(self, representation) -> None: # noqa: ANN001
"""Validate representation is not Persistent.
Args:
representation (Representation): Representation model.
Raises:
TraitValidationError: If representation is marked as both
Persistent and Transient.
"""
if representation.contains_trait(Persistent):
msg = "Representation is marked as both Persistent and Transient."
raise TraitValidationError(self.name, msg)
@dataclass
class Persistent(TraitBase):
"""Persistent trait model.
Persistent trait is opposite to transient trait. It marks representation
as persistent. Such representations are persisted in the system (e.g. in
the database).
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with the version
"""
name: ClassVar[str] = "Persistent"
description: ClassVar[str] = "Persistent Trait Model"
id: ClassVar[str] = "ayon.lifecycle.Persistent.v1"
# Note that this affects the persistence of the trait itself, not
# the representation. This is a class variable, so it is shared
# among all instances of the class.
persistent: bool = True
def validate_trait(self, representation) -> None: # noqa: ANN001
"""Validate representation is not Transient.
Args:
representation (Representation): Representation model.
Raises:
TraitValidationError: If representation is marked
as both Persistent and Transient.
"""
if representation.contains_trait(Transient):
msg = "Representation is marked as both Persistent and Transient."
raise TraitValidationError(self.name, msg)

View file

@ -0,0 +1,162 @@
"""Metadata traits."""
from __future__ import annotations
from dataclasses import dataclass
from typing import ClassVar, List, Optional
from .trait import TraitBase
@dataclass
class Tagged(TraitBase):
"""Tagged trait model.
This trait can hold a list of tags.
Example::
Tagged(tags=["tag1", "tag2"])
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with version
tags (List[str]): Tags.
"""
name: ClassVar[str] = "Tagged"
description: ClassVar[str] = "Tagged Trait Model"
id: ClassVar[str] = "ayon.meta.Tagged.v1"
persistent: ClassVar[bool] = True
tags: List[str]
@dataclass
class TemplatePath(TraitBase):
"""TemplatePath trait model.
This model represents a template path with formatting data.
Template path can be an Anatomy template and data is used to format it.
Example::
TemplatePath(template="path/{key}/file", data={"key": "to"})
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with version
template (str): Template path.
data (dict[str]): Formatting data.
"""
name: ClassVar[str] = "TemplatePath"
description: ClassVar[str] = "Template Path Trait Model"
id: ClassVar[str] = "ayon.meta.TemplatePath.v1"
persistent: ClassVar[bool] = True
template: str
data: dict
@dataclass
class Variant(TraitBase):
"""Variant trait model.
This model represents a variant of the representation.
Example::
Variant(variant="high")
Variant(variant="prores444)
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with version
variant (str): Variant name.
"""
name: ClassVar[str] = "Variant"
description: ClassVar[str] = "Variant Trait Model"
id: ClassVar[str] = "ayon.meta.Variant.v1"
persistent: ClassVar[bool] = True
variant: str
@dataclass
class KeepOriginalLocation(TraitBase):
"""Keep files in its original location.
Note:
This is not a persistent trait.
"""
name: ClassVar[str] = "KeepOriginalLocation"
description: ClassVar[str] = "Keep Original Location Trait Model"
id: ClassVar[str] = "ayon.meta.KeepOriginalLocation.v1"
persistent: ClassVar[bool] = False
@dataclass
class KeepOriginalName(TraitBase):
"""Keep files in its original name.
Note:
This is not a persistent trait.
"""
name: ClassVar[str] = "KeepOriginalName"
description: ClassVar[str] = "Keep Original Name Trait Model"
id: ClassVar[str] = "ayon.meta.KeepOriginalName.v1"
persistent: ClassVar[bool] = False
@dataclass
class SourceApplication(TraitBase):
"""Metadata about the source (producing) application.
This can be useful in cases where this information is
needed, but it cannot be determined from other means - like
.txt files used for various motion tracking applications that
must be interpreted by the loader.
Note that this is not really connected to any logic in
ayon-applications addon.
Attributes:
application (str): Application name.
variant (str): Application variant.
version (str): Application version.
platform (str): Platform name (Windows, darwin, etc.).
host_name (str): AYON host name if applicable.
"""
name: ClassVar[str] = "SourceApplication"
description: ClassVar[str] = "Source Application Trait Model"
id: ClassVar[str] = "ayon.meta.SourceApplication.v1"
persistent: ClassVar[bool] = True
application: str
variant: Optional[str] = None
version: Optional[str] = None
platform: Optional[str] = None
host_name: Optional[str] = None
@dataclass
class IntendedUse(TraitBase):
"""Intended use of the representation.
This trait describes the intended use of the representation. It
can be used in cases where the other traits are not enough to
describe the intended use. For example, a txt file with tracking
points can be used as a corner pin in After Effect but not in Nuke.
Attributes:
use (str): Intended use description.
"""
name: ClassVar[str] = "IntendedUse"
description: ClassVar[str] = "Intended Use Trait Model"
id: ClassVar[str] = "ayon.meta.IntendedUse.v1"
persistent: ClassVar[bool] = True
use: str

View file

@ -0,0 +1,713 @@
"""Defines the base trait model and representation."""
from __future__ import annotations
import contextlib
import inspect
import re
import sys
import uuid
from functools import lru_cache
from types import GenericAlias
from typing import (
ClassVar,
Generic,
ItemsView,
Optional,
Type,
TypeVar,
Union,
)
from .trait import (
IncompatibleTraitVersionError,
LooseMatchingTraitError,
MissingTraitError,
TraitBase,
TraitValidationError,
UpgradableTraitError,
)
T = TypeVar("T", bound="TraitBase")
def _get_version_from_id(_id: str) -> Optional[int]:
"""Get the version from ID.
Args:
_id (str): ID.
Returns:
int: Version.
"""
match = re.search(r"v(\d+)$", _id)
return int(match[1]) if match else None
class Representation(Generic[T]): # noqa: PLR0904
"""Representation of products.
Representation defines a collection of individual properties that describe
the specific "form" of the product. A trait represents a set of
properties therefore, the Representation is a collection of traits.
It holds methods to add, remove, get, and check for the existence of a
trait in the representation.
Note:
`PLR0904` is the rule for checking the number of public methods
in a class.
Arguments:
name (str): Representation name. Must be unique within instance.
representation_id (str): Representation ID.
"""
_data: dict[str, T]
_module_blacklist: ClassVar[list[str]] = [
"_", "builtins", "pydantic",
]
name: str
representation_id: str
def __hash__(self):
"""Return hash of the representation ID."""
return hash(self.representation_id)
def __getitem__(self, key: str) -> T:
"""Get the trait by ID.
Args:
key (str): Trait ID.
Returns:
TraitBase: Trait instance.
"""
return self.get_trait_by_id(key)
def __setitem__(self, key: str, value: T) -> None:
"""Set the trait by ID.
Args:
key (str): Trait ID.
value (TraitBase): Trait instance.
"""
with contextlib.suppress(KeyError):
self._data.pop(key)
self.add_trait(value)
def __delitem__(self, key: str) -> None:
"""Remove the trait by ID.
Args:
key (str): Trait ID.
"""
self.remove_trait_by_id(key)
def __contains__(self, key: str) -> bool:
"""Check if the trait exists by ID.
Args:
key (str): Trait ID.
Returns:
bool: True if the trait exists, False otherwise.
"""
return self.contains_trait_by_id(key)
def __iter__(self):
"""Return the trait ID iterator."""
return iter(self._data)
def __str__(self):
"""Return the representation name."""
return self.name
def items(self) -> ItemsView[str, T]:
"""Return the traits as items."""
return ItemsView(self._data)
def add_trait(self, trait: T, *, exists_ok: bool = False) -> None:
"""Add a trait to the Representation.
Args:
trait (TraitBase): Trait to add.
exists_ok (bool, optional): If True, do not raise an error if the
trait already exists. Defaults to False.
Raises:
ValueError: If the trait ID is not provided, or the trait already
exists.
"""
if not hasattr(trait, "id"):
error_msg = f"Invalid trait {trait} - ID is required."
raise ValueError(error_msg)
if trait.id in self._data and not exists_ok:
error_msg = f"Trait with ID {trait.id} already exists."
raise ValueError(error_msg)
self._data[trait.id] = trait
def add_traits(
self, traits: list[T], *, exists_ok: bool = False) -> None:
"""Add a list of traits to the Representation.
Args:
traits (list[TraitBase]): List of traits to add.
exists_ok (bool, optional): If True, do not raise an error if the
trait already exists. Defaults to False.
"""
for trait in traits:
self.add_trait(trait, exists_ok=exists_ok)
def remove_trait(self, trait: Type[TraitBase]) -> None:
"""Remove a trait from the data.
Args:
trait (TraitBase, optional): Trait class.
Raises:
ValueError: If the trait is not found.
"""
try:
self._data.pop(str(trait.id))
except KeyError as e:
error_msg = f"Trait with ID {trait.id} not found."
raise ValueError(error_msg) from e
def remove_trait_by_id(self, trait_id: str) -> None:
"""Remove a trait from the data by its ID.
Args:
trait_id (str): Trait ID.
Raises:
ValueError: If the trait is not found.
"""
try:
self._data.pop(trait_id)
except KeyError as e:
error_msg = f"Trait with ID {trait_id} not found."
raise ValueError(error_msg) from e
def remove_traits(self, traits: list[Type[T]]) -> None:
"""Remove a list of traits from the Representation.
If no trait IDs or traits are provided, all traits will be removed.
Args:
traits (list[TraitBase]): List of trait classes.
"""
if not traits:
self._data = {}
return
for trait in traits:
self.remove_trait(trait)
def remove_traits_by_id(self, trait_ids: list[str]) -> None:
"""Remove a list of traits from the Representation by their ID.
If no trait IDs or traits are provided, all traits will be removed.
Args:
trait_ids (list[str], optional): List of trait IDs.
"""
for trait_id in trait_ids:
self.remove_trait_by_id(trait_id)
def has_traits(self) -> bool:
"""Check if the Representation has any traits.
Returns:
bool: True if the Representation has any traits, False otherwise.
"""
return bool(self._data)
def contains_trait(self, trait: Type[T]) -> bool:
"""Check if the trait exists in the Representation.
Args:
trait (TraitBase): Trait class.
Returns:
bool: True if the trait exists, False otherwise.
"""
return bool(self._data.get(str(trait.id)))
def contains_trait_by_id(self, trait_id: str) -> bool:
"""Check if the trait exists using trait id.
Args:
trait_id (str): Trait ID.
Returns:
bool: True if the trait exists, False otherwise.
"""
return bool(self._data.get(trait_id))
def contains_traits(self, traits: list[Type[T]]) -> bool:
"""Check if the traits exist.
Args:
traits (list[TraitBase], optional): List of trait classes.
Returns:
bool: True if all traits exist, False otherwise.
"""
return all(self.contains_trait(trait=trait) for trait in traits)
def contains_traits_by_id(self, trait_ids: list[str]) -> bool:
"""Check if the traits exist by id.
If no trait IDs or traits are provided, it will check if the
representation has any traits.
Args:
trait_ids (list[str]): List of trait IDs.
Returns:
bool: True if all traits exist, False otherwise.
"""
return all(
self.contains_trait_by_id(trait_id) for trait_id in trait_ids
)
def get_trait(self, trait: Type[T]) -> T:
"""Get a trait from the representation.
Args:
trait (TraitBase, optional): Trait class.
Returns:
TraitBase: Trait instance.
Raises:
MissingTraitError: If the trait is not found.
"""
try:
return self._data[str(trait.id)]
except KeyError as e:
msg = f"Trait with ID {trait.id} not found."
raise MissingTraitError(msg) from e
def get_trait_by_id(self, trait_id: str) -> T:
# sourcery skip: use-named-expression
"""Get a trait from the representation by id.
Args:
trait_id (str): Trait ID.
Returns:
TraitBase: Trait instance.
Raises:
MissingTraitError: If the trait is not found.
"""
version = _get_version_from_id(trait_id)
if version:
try:
return self._data[trait_id]
except KeyError as e:
msg = f"Trait with ID {trait_id} not found."
raise MissingTraitError(msg) from e
result = next(
(
self._data.get(trait_id)
for trait_id in self._data
if trait_id.startswith(trait_id)
),
None,
)
if result is None:
msg = f"Trait with ID {trait_id} not found."
raise MissingTraitError(msg)
return result
def get_traits(self,
traits: Optional[list[Type[T]]] = None
) -> dict[str, T]:
"""Get a list of traits from the representation.
If no trait IDs or traits are provided, all traits will be returned.
Args:
traits (list[TraitBase], optional): List of trait classes.
Returns:
dict: Dictionary of traits.
"""
result: dict[str, T] = {}
if not traits:
for trait_id in self._data:
result[trait_id] = self.get_trait_by_id(trait_id=trait_id)
return result
for trait in traits:
result[str(trait.id)] = self.get_trait(trait=trait)
return result
def get_traits_by_ids(self, trait_ids: list[str]) -> dict[str, T]:
"""Get a list of traits from the representation by their id.
If no trait IDs or traits are provided, all traits will be returned.
Args:
trait_ids (list[str]): List of trait IDs.
Returns:
dict: Dictionary of traits.
"""
return {
trait_id: self.get_trait_by_id(trait_id)
for trait_id in trait_ids
}
def traits_as_dict(self) -> dict:
"""Return the traits from Representation data as a dictionary.
Returns:
dict: Traits data dictionary.
"""
return {
trait_id: trait.as_dict()
for trait_id, trait in self._data.items()
if trait and trait_id
}
def __len__(self):
"""Return the length of the data."""
return len(self._data)
def __init__(
self,
name: str,
representation_id: Optional[str] = None,
traits: Optional[list[T]] = None):
"""Initialize the data.
Args:
name (str): Representation name. Must be unique within instance.
representation_id (str, optional): Representation ID.
traits (list[TraitBase], optional): List of traits.
"""
self.name = name
self.representation_id = representation_id or uuid.uuid4().hex
self._data = {}
if traits:
for trait in traits:
self.add_trait(trait)
@staticmethod
def _get_version_from_id(trait_id: str) -> Union[int, None]:
# sourcery skip: use-named-expression
"""Check if the trait has a version specified.
Args:
trait_id (str): Trait ID.
Returns:
int: Trait version.
None: If the trait id does not have a version.
"""
version_regex = r"v(\d+)$"
match = re.search(version_regex, trait_id)
return int(match[1]) if match else None
def __eq__(self, other: object) -> bool: # noqa: PLR0911
"""Check if the representation is equal to another.
Args:
other (Representation): Representation to compare.
Returns:
bool: True if the representations are equal, False otherwise.
"""
if not isinstance(other, Representation):
return False
if self.representation_id != other.representation_id:
return False
if self.name != other.name:
return False
# number of traits
if len(self) != len(other):
return False
for trait_id, trait in self._data.items():
if trait_id not in other._data:
return False
if trait != other._data[trait_id]:
return False
return True
@classmethod
@lru_cache(maxsize=64)
def _get_possible_trait_classes_from_modules(
cls,
trait_id: str) -> set[type[T]]:
"""Get possible trait classes from modules.
Args:
trait_id (str): Trait ID.
Returns:
set[type[T]]: Set of trait classes.
"""
modules = sys.modules.copy()
filtered_modules = modules.copy()
for module_name in modules:
for bl_module in cls._module_blacklist:
if module_name.startswith(bl_module):
filtered_modules.pop(module_name)
trait_candidates = set()
for module in filtered_modules.values():
if not module:
continue
for attr_name in dir(module):
klass = getattr(module, attr_name)
if not inspect.isclass(klass):
continue
# This needs to be done because of the bug? In
# python ABCMeta, where ``issubclass`` is not working
# if it hits the GenericAlias (that is in fact
# tuple[int, int]). This is added to the scope by
# the ``types`` module.
if type(klass) is GenericAlias:
continue
if issubclass(klass, TraitBase) \
and str(klass.id).startswith(trait_id):
trait_candidates.add(klass)
# I
return trait_candidates # type: ignore[return-value]
@classmethod
@lru_cache(maxsize=64)
def _get_trait_class(
cls, trait_id: str) -> Union[Type[T], None]:
"""Get the trait class with corresponding to given ID.
This method will search for the trait class in all the modules except
the blocklisted modules. There is some issue in Pydantic where
``issubclass`` is not working properly, so we are excluding explicit
modules with offending classes. This list can be updated as needed to
speed up the search.
Args:
trait_id (str): Trait ID.
Returns:
Type[TraitBase]: Trait class.
"""
version = cls._get_version_from_id(trait_id)
trait_candidates = cls._get_possible_trait_classes_from_modules(
trait_id
)
if not trait_candidates:
return None
for trait_class in trait_candidates:
if trait_class.id == trait_id:
# we found a direct match
return trait_class
# if we didn't find direct match, we will search for the highest
# version of the trait.
if not version:
# sourcery skip: use-named-expression
trait_versions = [
trait_class for trait_class in trait_candidates
if re.match(
rf"{trait_id}.v(\d+)$", str(trait_class.id))
]
if trait_versions:
def _get_version_by_id(trait_klass: Type[T]) -> int:
match = re.search(r"v(\d+)$", str(trait_klass.id))
return int(match[1]) if match else 0
error: LooseMatchingTraitError = LooseMatchingTraitError(
"Found trait that might match.")
error.found_trait = max(
trait_versions, key=_get_version_by_id)
error.expected_id = trait_id
raise error
return None
@classmethod
def get_trait_class_by_trait_id(cls, trait_id: str) -> Type[T]:
"""Get the trait class for the given trait ID.
Args:
trait_id (str): Trait ID.
Returns:
type[TraitBase]: Trait class.
Raises:
IncompatibleTraitVersionError: If the trait version is incompatible
with the current version of the trait.
"""
try:
trait_class = cls._get_trait_class(trait_id=trait_id)
except LooseMatchingTraitError as e:
requested_version = _get_version_from_id(trait_id)
found_version = _get_version_from_id(e.found_trait.id)
if found_version is None and not requested_version:
msg = (
"Trait found with no version and requested version "
"is not specified."
)
raise IncompatibleTraitVersionError(msg) from e
if found_version is None:
msg = (
f"Trait {e.found_trait.id} found with no version, "
"but requested version is specified."
)
raise IncompatibleTraitVersionError(msg) from e
if requested_version is None:
trait_class = e.found_trait
requested_version = found_version
if requested_version > found_version:
error_msg = (
f"Requested trait version {requested_version} is "
f"higher than the found trait version {found_version}."
)
raise IncompatibleTraitVersionError(error_msg) from e
if requested_version < found_version and hasattr(
e.found_trait, "upgrade"):
error_msg = (
"Requested trait version "
f"{requested_version} is lower "
f"than the found trait version {found_version}."
)
error: UpgradableTraitError = UpgradableTraitError(error_msg)
error.trait = e.found_trait
raise error from e
return trait_class # type: ignore[return-value]
@classmethod
def from_dict(
cls: Type[Representation],
name: str,
representation_id: Optional[str] = None,
trait_data: Optional[dict] = None) -> Representation:
"""Create a representation from a dictionary.
Args:
name (str): Representation name.
representation_id (str, optional): Representation ID.
trait_data (dict): Representation data. Dictionary with keys
as trait ids and values as trait data. Example::
{
"ayon.2d.PixelBased.v1": {
"display_window_width": 1920,
"display_window_height": 1080
},
"ayon.2d.Planar.v1": {
"channels": 3
}
}
Returns:
Representation: Representation instance.
Raises:
ValueError: If the trait model with ID is not found.
TypeError: If the trait data is not a dictionary.
IncompatibleTraitVersionError: If the trait version is incompatible
"""
if not trait_data:
trait_data = {}
traits = []
for trait_id, value in trait_data.items():
if not isinstance(value, dict):
msg = (
f"Invalid trait data for trait ID {trait_id}. "
"Trait data must be a dictionary."
)
raise TypeError(msg)
try:
trait_class = cls.get_trait_class_by_trait_id(trait_id)
except UpgradableTraitError as e:
# we found a newer version of trait, we will upgrade the data
if hasattr(e.trait, "upgrade"):
traits.append(e.trait.upgrade(value))
else:
msg = (
f"Newer version of trait {e.trait.id} found "
f"for requested {trait_id} but without "
"upgrade method."
)
raise IncompatibleTraitVersionError(msg) from e
else:
if not trait_class:
error_msg = f"Trait model with ID {trait_id} not found."
raise ValueError(error_msg)
traits.append(trait_class(**value))
return cls(
name=name, representation_id=representation_id, traits=traits)
def validate(self) -> None:
"""Validate the representation.
This method will validate all the traits in the representation.
Raises:
TraitValidationError: If the trait is invalid within representation
"""
errors = []
for trait in self._data.values():
# we do this in the loop to catch all the errors
try:
trait.validate_trait(self)
except TraitValidationError as e: # noqa: PERF203
errors.append(str(e))
if errors:
msg = "\n".join(errors)
scope = self.name
raise TraitValidationError(scope, msg)

View file

@ -0,0 +1,457 @@
"""Temporal (time related) traits."""
from __future__ import annotations
import contextlib
import re
from dataclasses import dataclass
from enum import Enum, auto
from re import Pattern
from typing import TYPE_CHECKING, ClassVar, Optional
import clique
from .trait import MissingTraitError, TraitBase, TraitValidationError
if TYPE_CHECKING:
from .content import FileLocations
from .representation import Representation
class GapPolicy(Enum):
"""Gap policy enumeration.
This type defines how to handle gaps in a sequence.
Attributes:
forbidden (int): Gaps are forbidden.
missing (int): Gaps are interpreted as missing frames.
hold (int): Gaps are interpreted as hold frames (last existing frames).
black (int): Gaps are interpreted as black frames.
"""
forbidden = auto()
missing = auto()
hold = auto()
black = auto()
@dataclass
class FrameRanged(TraitBase):
"""Frame ranged trait model.
Model representing a frame-ranged trait.
Sync with OpenAssetIO MediaCreation Traits. For compatibility with
OpenAssetIO, we'll need to handle different names of attributes:
* frame_start -> start_frame
* frame_end -> end_frame
...
Note: frames_per_second is a string to allow various precision
formats. FPS is a floating point number, but it can be also
represented as a fraction (e.g. "30000/1001") or as a decimal
or even as an irrational number. We need to support all these
formats. To work with FPS, we'll need some helper function
to convert FPS to Decimal from string.
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with a version
frame_start (int): Frame start.
frame_end (int): Frame end.
frame_in (int): Frame in.
frame_out (int): Frame out.
frames_per_second (str): Frames per second.
step (int): Step.
"""
name: ClassVar[str] = "FrameRanged"
description: ClassVar[str] = "Frame Ranged Trait"
id: ClassVar[str] = "ayon.time.FrameRanged.v1"
persistent: ClassVar[bool] = True
frame_start: int
frame_end: int
frame_in: Optional[int] = None
frame_out: Optional[int] = None
frames_per_second: str = None
step: Optional[int] = None
@dataclass
class Handles(TraitBase):
"""Handles trait model.
Handles define the range of frames that are included or excluded
from the sequence.
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with a version
inclusive (bool): Handles are inclusive.
frame_start_handle (int): Frame start handle.
frame_end_handle (int): Frame end handle.
"""
name: ClassVar[str] = "Handles"
description: ClassVar[str] = "Handles Trait"
id: ClassVar[str] = "ayon.time.Handles.v1"
persistent: ClassVar[bool] = True
inclusive: Optional[bool] = False
frame_start_handle: Optional[int] = None
frame_end_handle: Optional[int] = None
@dataclass
class Sequence(TraitBase):
"""Sequence trait model.
This model represents a sequence trait. Based on the FrameRanged trait
and Handles, adding support for gaps policy, frame padding and frame
list specification. Regex is used to match frame numbers.
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with a version
gaps_policy (GapPolicy): Gaps policy - how to handle gaps in
sequence.
frame_padding (int): Frame padding.
frame_regex (str): Frame regex - regular expression to match
frame numbers. Must include 'index' named group and 'padding'
named group.
frame_spec (str): Frame list specification of frames. This takes
string like "1-10,20-30,40-50" etc.
"""
name: ClassVar[str] = "Sequence"
description: ClassVar[str] = "Sequence Trait Model"
id: ClassVar[str] = "ayon.time.Sequence.v1"
persistent: ClassVar[bool] = True
frame_padding: int
gaps_policy: Optional[GapPolicy] = GapPolicy.forbidden
frame_regex: Optional[Pattern] = None
frame_spec: Optional[str] = None
@classmethod
def validate_frame_regex(
cls, v: Optional[Pattern]
) -> Optional[Pattern]:
"""Validate frame regex.
Frame regex must have index and padding named groups.
Returns:
Optional[Pattern]: Compiled regex pattern.
Raises:
ValueError: If frame regex does not include 'index' and 'padding'
"""
if v is None:
return v
if v and any(s not in v.pattern for s in ["?P<index>", "?P<padding>"]):
msg = "Frame regex must include 'index' and `padding named groups"
raise ValueError(msg)
return v
def validate_trait(self, representation: Representation) -> None:
"""Validate the trait."""
super().validate_trait(representation)
# if there is a FileLocations trait, run validation
# on it as well
with contextlib.suppress(MissingTraitError):
self._validate_file_locations(representation)
def _validate_file_locations(self, representation: Representation) -> None:
"""Validate file locations trait.
If along with the Sequence trait, there is a FileLocations trait,
then we need to validate if the file locations match the frame
list specification.
Args:
representation (Representation): Representation instance.
"""
from .content import FileLocations
file_locs: FileLocations = representation.get_trait(
FileLocations)
# Validate if the file locations on representation
# match the frame list (if any).
# We need to extend the expected frames with Handles.
frame_start = None
frame_end = None
handles_frame_start = None
handles_frame_end = None
with contextlib.suppress(MissingTraitError):
handles: Handles = representation.get_trait(Handles)
# if handles are inclusive, they should be already
# accounted for in the FrameRaged frame spec
if not handles.inclusive:
handles_frame_start = handles.frame_start_handle
handles_frame_end = handles.frame_end_handle
with contextlib.suppress(MissingTraitError):
frame_ranged: FrameRanged = representation.get_trait(
FrameRanged)
frame_start = frame_ranged.frame_start
frame_end = frame_ranged.frame_end
if self.frame_spec is not None:
self.validate_frame_list(
file_locs,
frame_start,
frame_end,
handles_frame_start,
handles_frame_end)
self.validate_frame_padding(file_locs)
def validate_frame_list(
self,
file_locations: FileLocations,
frame_start: Optional[int] = None,
frame_end: Optional[int] = None,
handles_frame_start: Optional[int] = None,
handles_frame_end: Optional[int] = None) -> None:
"""Validate a frame list.
This will take FileLocations trait and validate if the
file locations match the frame list specification.
For example, if the frame list is "1-10,20-30,40-50", then
the frame numbers in the file locations should match
these frames.
It will skip the validation if the frame list is not provided.
Args:
file_locations (FileLocations): File locations trait.
frame_start (Optional[int]): Frame start.
frame_end (Optional[int]): Frame end.
handles_frame_start (Optional[int]): Frame start handle.
handles_frame_end (Optional[int]): Frame end handle.
Raises:
TraitValidationError: If the frame list does not match
the expected frames.
"""
if self.frame_spec is None:
return
frames: list[int] = []
if self.frame_regex:
frames = self.get_frame_list(
file_locations, self.frame_regex)
else:
frames = self.get_frame_list(
file_locations)
expected_frames = self.list_spec_to_frames(self.frame_spec)
if frame_start is None or frame_end is None:
if min(expected_frames) != frame_start:
msg = (
"Frame start does not match the expected frame start. "
f"Expected: {frame_start}, Found: {min(expected_frames)}"
)
raise TraitValidationError(self.name, msg)
if max(expected_frames) != frame_end:
msg = (
"Frame end does not match the expected frame end. "
f"Expected: {frame_end}, Found: {max(expected_frames)}"
)
raise TraitValidationError(self.name, msg)
# we need to extend the expected frames with Handles
if handles_frame_start is not None:
expected_frames.extend(
range(
min(frames) - handles_frame_start, min(frames) + 1))
if handles_frame_end is not None:
expected_frames.extend(
range(
max(frames), max(frames) + handles_frame_end + 1))
if set(frames) != set(expected_frames):
msg = (
"Frame list does not match the expected frames. "
f"Expected: {expected_frames}, Found: {frames}"
)
raise TraitValidationError(self.name, msg)
def validate_frame_padding(
self, file_locations: FileLocations) -> None:
"""Validate frame padding.
This will take FileLocations trait and validate if the
frame padding matches the expected frame padding.
Args:
file_locations (FileLocations): File locations trait.
Raises:
TraitValidationError: If frame padding does not match
the expected frame padding.
"""
expected_padding = self.get_frame_padding(file_locations)
if self.frame_padding != expected_padding:
msg = (
"Frame padding does not match the expected frame padding. "
f"Expected: {expected_padding}, Found: {self.frame_padding}"
)
raise TraitValidationError(self.name, msg)
@staticmethod
def list_spec_to_frames(list_spec: str) -> list[int]:
"""Convert list specification to frames.
Returns:
list[int]: List of frame numbers.
Raises:
ValueError: If invalid frame number in the list.
"""
frames = []
segments = list_spec.split(",")
for segment in segments:
ranges = segment.split("-")
if len(ranges) == 1:
if not ranges[0].isdigit():
msg = (
"Invalid frame number "
f"in the list: {ranges[0]}"
)
raise ValueError(msg)
frames.append(int(ranges[0]))
continue
start, end = segment.split("-")
frames.extend(range(int(start), int(end) + 1))
return frames
@staticmethod
def _get_collection(
file_locations: FileLocations,
regex: Optional[Pattern] = None) -> clique.Collection:
r"""Get the collection from file locations.
Args:
file_locations (FileLocations): File locations trait.
regex (Optional[Pattern]): Regular expression to match
frame numbers. This is passed to ``clique.assemble()``.
Default clique pattern is::
\.(?P<index>(?P<padding>0*)\d+)\.\D+\d?$
Returns:
clique.Collection: Collection instance.
Raises:
ValueError: If zero or multiple of collections are found.
"""
patterns = [regex] if regex else None
files: list[str] = [
file.file_path.as_posix()
for file in file_locations.file_paths
]
src_collections, _ = clique.assemble(files, patterns=patterns)
if len(src_collections) != 1:
msg = (
f"Zero or multiple collections found: {len(src_collections)} "
"expected 1"
)
raise ValueError(msg)
return src_collections[0]
@staticmethod
def get_frame_padding(file_locations: FileLocations) -> int:
"""Get frame padding.
Returns:
int: Frame padding.
"""
src_collection = Sequence._get_collection(file_locations)
padding = src_collection.padding
# sometimes Clique doesn't get the padding right, so
# we need to calculate it manually
if padding == 0:
padding = len(str(max(src_collection.indexes)))
return padding
@staticmethod
def get_frame_list(
file_locations: FileLocations,
regex: Optional[Pattern] = None,
) -> list[int]:
r"""Get the frame list.
Args:
file_locations (FileLocations): File locations trait.
regex (Optional[Pattern]): Regular expression to match
frame numbers. This is passed to ``clique.assemble()``.
Default clique pattern is::
\.(?P<index>(?P<padding>0*)\d+)\.\D+\d?$
Returns:
list[int]: List of frame numbers.
"""
src_collection = Sequence._get_collection(file_locations, regex)
return list(src_collection.indexes)
def get_frame_pattern(self) -> Pattern:
"""Get frame regex as a pattern.
If the regex is a string, it will compile it to the pattern.
Returns:
Pattern: Compiled regex pattern.
"""
if self.frame_regex:
if isinstance(self.frame_regex, str):
return re.compile(self.frame_regex)
return self.frame_regex
return re.compile(
r"\.(?P<index>(?P<padding>0*)\d+)\.\D+\d?$")
# Do we need one for drop and non-drop frame?
@dataclass
class SMPTETimecode(TraitBase):
"""SMPTE Timecode trait model.
Attributes:
timecode (str): SMPTE Timecode HH:MM:SS:FF
"""
name: ClassVar[str] = "Timecode"
description: ClassVar[str] = "SMPTE Timecode Trait"
id: ClassVar[str] = "ayon.time.SMPTETimecode.v1"
persistent: ClassVar[bool] = True
timecode: str
@dataclass
class Static(TraitBase):
"""Static time trait.
Used to define static time (single frame).
"""
name: ClassVar[str] = "Static"
description: ClassVar[str] = "Static Time Trait"
id: ClassVar[str] = "ayon.time.Static.v1"
persistent: ClassVar[bool] = True

View file

@ -0,0 +1,93 @@
"""3D traits."""
from dataclasses import dataclass
from typing import ClassVar
from .trait import TraitBase
@dataclass
class Spatial(TraitBase):
"""Spatial trait model.
Trait describing spatial information. Up axis valid strings are
"Y", "Z", "X". Handedness valid strings are "left", "right". Meters per
unit is a float value.
Example::
Spatial(up_axis="Y", handedness="right", meters_per_unit=1.0)
Todo:
* Add value validation for up_axis and handedness.
Attributes:
up_axis (str): Up axis.
handedness (str): Handedness.
meters_per_unit (float): Meters per unit.
"""
id: ClassVar[str] = "ayon.3d.Spatial.v1"
name: ClassVar[str] = "Spatial"
description: ClassVar[str] = "Spatial trait model."
persistent: ClassVar[bool] = True
up_axis: str
handedness: str
meters_per_unit: float
@dataclass
class Geometry(TraitBase):
"""Geometry type trait model.
Type trait for geometry data.
Sync with OpenAssetIO MediaCreation Traits.
"""
id: ClassVar[str] = "ayon.3d.Geometry.v1"
name: ClassVar[str] = "Geometry"
description: ClassVar[str] = "Geometry trait model."
persistent: ClassVar[bool] = True
@dataclass
class Shader(TraitBase):
"""Shader trait model.
Type trait for shader data.
Sync with OpenAssetIO MediaCreation Traits.
"""
id: ClassVar[str] = "ayon.3d.Shader.v1"
name: ClassVar[str] = "Shader"
description: ClassVar[str] = "Shader trait model."
persistent: ClassVar[bool] = True
@dataclass
class Lighting(TraitBase):
"""Lighting trait model.
Type trait for lighting data.
Sync with OpenAssetIO MediaCreation Traits.
"""
id: ClassVar[str] = "ayon.3d.Lighting.v1"
name: ClassVar[str] = "Lighting"
description: ClassVar[str] = "Lighting trait model."
persistent: ClassVar[bool] = True
@dataclass
class IESProfile(TraitBase):
"""IES profile (IES-LM-64) type trait model.
Sync with OpenAssetIO MediaCreation Traits.
"""
id: ClassVar[str] = "ayon.3d.IESProfile.v1"
name: ClassVar[str] = "IESProfile"
description: ClassVar[str] = "IES profile trait model."
persistent: ClassVar[bool] = True

View file

@ -0,0 +1,147 @@
"""Defines the base trait model and representation."""
from __future__ import annotations
import re
from abc import ABC, abstractmethod
from dataclasses import asdict, dataclass
from typing import TYPE_CHECKING, Generic, Optional, TypeVar
if TYPE_CHECKING:
from .representation import Representation
T = TypeVar("T", bound="TraitBase")
@dataclass
class TraitBase(ABC):
"""Base trait model.
This model must be used as a base for all trait models.
``id``, ``name``, and ``description`` are abstract attributes that must be
implemented in the derived classes.
"""
@property
@abstractmethod
def id(self) -> str:
"""Abstract attribute for ID."""
...
@property
@abstractmethod
def name(self) -> str:
"""Abstract attribute for name."""
...
@property
@abstractmethod
def description(self) -> str:
"""Abstract attribute for description."""
...
def validate_trait(self, representation: Representation) -> None: # noqa: PLR6301
"""Validate the trait.
This method should be implemented in the derived classes to validate
the trait data. It can be used by traits to validate against other
traits in the representation.
Args:
representation (Representation): Representation instance.
"""
return
@classmethod
def get_version(cls) -> Optional[int]:
# sourcery skip: use-named-expression
"""Get a trait version from ID.
This assumes Trait ID ends with `.v{version}`. If not, it will
return None.
Returns:
Optional[int]: Trait version
"""
version_regex = r"v(\d+)$"
match = re.search(version_regex, str(cls.id))
return int(match[1]) if match else None
@classmethod
def get_versionless_id(cls) -> str:
"""Get a trait ID without a version.
Returns:
str: Trait ID without a version.
"""
return re.sub(r"\.v\d+$", "", str(cls.id))
def as_dict(self) -> dict:
"""Return a trait as a dictionary.
Returns:
dict: Trait as dictionary.
"""
return asdict(self)
class IncompatibleTraitVersionError(Exception):
"""Incompatible trait version exception.
This exception is raised when the trait version is incompatible with the
current version of the trait.
"""
class UpgradableTraitError(Exception, Generic[T]):
"""Upgradable trait version exception.
This exception is raised when the trait can upgrade existing data
meant for older versions of the trait. It must implement an `upgrade`
method that will take old trait data as an argument to handle the upgrade.
"""
trait: T
old_data: dict
class LooseMatchingTraitError(Exception, Generic[T]):
"""Loose matching trait exception.
This exception is raised when the trait is found with a loose matching
criteria.
"""
found_trait: T
expected_id: str
class TraitValidationError(Exception):
"""Trait validation error exception.
This exception is raised when the trait validation fails.
"""
def __init__(self, scope: str, message: str):
"""Initialize the exception.
We could determine the scope from the stack in the future,
provided the scope is always Trait name.
Args:
scope (str): Scope of the error.
message (str): Error message.
"""
super().__init__(f"{scope}: {message}")
class MissingTraitError(TypeError):
"""Missing trait error exception.
This exception is raised when the trait is missing.
"""

View file

@ -0,0 +1,208 @@
"""Two-dimensional image traits."""
from __future__ import annotations
import re
from dataclasses import dataclass
from typing import TYPE_CHECKING, ClassVar, Optional
from .trait import TraitBase
if TYPE_CHECKING:
from .content import FileLocation, FileLocations
@dataclass
class Image(TraitBase):
"""Image trait model.
Type trait model for image.
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with version
"""
name: ClassVar[str] = "Image"
description: ClassVar[str] = "Image Trait"
id: ClassVar[str] = "ayon.2d.Image.v1"
persistent: ClassVar[bool] = True
@dataclass
class PixelBased(TraitBase):
"""PixelBased trait model.
The pixel-related trait for image data.
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with a version
display_window_width (int): Width of the image display window.
display_window_height (int): Height of the image display window.
pixel_aspect_ratio (float): Pixel aspect ratio.
"""
name: ClassVar[str] = "PixelBased"
description: ClassVar[str] = "PixelBased Trait Model"
id: ClassVar[str] = "ayon.2d.PixelBased.v1"
persistent: ClassVar[bool] = True
display_window_width: int
display_window_height: int
pixel_aspect_ratio: float
@dataclass
class Planar(TraitBase):
"""Planar trait model.
This model represents an Image with planar configuration.
Todo:
* (antirotor): Is this really a planar configuration? As with
bit planes and everything? If it serves as differentiator for
Deep images, should it be named differently? Like Raster?
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be namespaced trait name with version
planar_configuration (str): Planar configuration.
"""
name: ClassVar[str] = "Planar"
description: ClassVar[str] = "Planar Trait Model"
id: ClassVar[str] = "ayon.2d.Planar.v1"
persistent: ClassVar[bool] = True
planar_configuration: str
@dataclass
class Deep(TraitBase):
"""Deep trait model.
Type trait model for deep EXR images.
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with a version
"""
name: ClassVar[str] = "Deep"
description: ClassVar[str] = "Deep Trait Model"
id: ClassVar[str] = "ayon.2d.Deep.v1"
persistent: ClassVar[bool] = True
@dataclass
class Overscan(TraitBase):
"""Overscan trait model.
This model represents an overscan (or underscan) trait. Defines the
extra pixels around the image.
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be a namespaced trait name with a version
left (int): Left overscan/underscan.
right (int): Right overscan/underscan.
top (int): Top overscan/underscan.
bottom (int): Bottom overscan/underscan.
"""
name: ClassVar[str] = "Overscan"
description: ClassVar[str] = "Overscan Trait"
id: ClassVar[str] = "ayon.2d.Overscan.v1"
persistent: ClassVar[bool] = True
left: int
right: int
top: int
bottom: int
@dataclass
class UDIM(TraitBase):
"""UDIM trait model.
This model represents a UDIM trait.
Attributes:
name (str): Trait name.
description (str): Trait description.
id (str): id should be namespaced trait name with version
udim (int): UDIM value.
udim_regex (str): UDIM regex.
"""
name: ClassVar[str] = "UDIM"
description: ClassVar[str] = "UDIM Trait"
id: ClassVar[str] = "ayon.2d.UDIM.v1"
persistent: ClassVar[bool] = True
udim: list[int]
udim_regex: Optional[str] = r"(?:\.|_)(?P<udim>\d+)\.\D+\d?$"
# Field validator for udim_regex - this works in the pydantic model v2
# but not with the pure data classes.
@classmethod
def validate_frame_regex(cls, v: Optional[str]) -> Optional[str]:
"""Validate udim regex.
Returns:
Optional[str]: UDIM regex.
Raises:
ValueError: UDIM regex must include 'udim' named group.
"""
if v is not None and "?P<udim>" not in v:
msg = "UDIM regex must include 'udim' named group"
raise ValueError(msg)
return v
def get_file_location_for_udim(
self,
file_locations: FileLocations,
udim: int,
) -> Optional[FileLocation]:
"""Get file location for UDIM.
Args:
file_locations (FileLocations): File locations.
udim (int): UDIM value.
Returns:
Optional[FileLocation]: File location.
"""
if not self.udim_regex:
return None
pattern = re.compile(self.udim_regex)
for location in file_locations.file_paths:
result = re.search(pattern, location.file_path.name)
if result:
udim_index = int(result.group("udim"))
if udim_index == udim:
return location
return None
def get_udim_from_file_location(
self, file_location: FileLocation) -> Optional[int]:
"""Get UDIM from the file location.
Args:
file_location (FileLocation): File location.
Returns:
Optional[int]: UDIM value.
"""
if not self.udim_regex:
return None
pattern = re.compile(self.udim_regex)
result = re.search(pattern, file_location.file_path.name)
if result:
return int(result.group("udim"))
return None

View file

@ -0,0 +1,90 @@
"""Utility functions for traits."""
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from clique import assemble
from ayon_core.addon import AddonsManager, ITraits
from ayon_core.pipeline.traits.temporal import FrameRanged
if TYPE_CHECKING:
from pathlib import Path
from ayon_core.pipeline.traits.trait import TraitBase
def get_sequence_from_files(paths: list[Path]) -> FrameRanged:
"""Get the original frame range from files.
Note that this cannot guess frame rate, so it's set to 25.
This will also fail on paths that cannot be assembled into
one collection without any reminders.
Args:
paths (list[Path]): List of file paths.
Returns:
FrameRanged: FrameRanged trait.
Raises:
ValueError: If paths cannot be assembled into one collection
"""
cols, rems = assemble([path.as_posix() for path in paths])
if rems:
msg = "Cannot assemble paths into one collection"
raise ValueError(msg)
if len(cols) != 1:
msg = "More than one collection found"
raise ValueError(msg)
col = cols[0]
sorted_frames = sorted(col.indexes)
# First frame used for end value
first_frame = sorted_frames[0]
# Get last frame for padding
last_frame = sorted_frames[-1]
# Use padding from a collection of the last frame lengths as string
# padding = max(col.padding, len(str(last_frame)))
return FrameRanged(
frame_start=first_frame, frame_end=last_frame,
frames_per_second="25.0"
)
def get_available_traits(
addons_manager: Optional[AddonsManager] = None
) -> Optional[list[TraitBase]]:
"""Get available traits from active addons.
Args:
addons_manager (Optional[AddonsManager]): Addons manager instance.
If not provided, a new one will be created. Within pyblish
plugins, you can use an already collected instance of
AddonsManager from context `context.data["ayonAddonsManager"]`.
Returns:
list[TraitBase]: List of available traits.
"""
if addons_manager is None:
# Create a new instance of AddonsManager
addons_manager = AddonsManager()
# Get active addons
enabled_addons = addons_manager.get_enabled_addons()
traits = []
for addon in enabled_addons:
if not issubclass(type(addon), ITraits):
# Skip addons not providing traits
continue
# Get traits from addon
addon_traits = addon.get_addon_traits()
if addon_traits:
# Add traits to a list
for trait in addon_traits:
if trait not in traits:
traits.append(trait)
return traits

View file

@ -329,9 +329,9 @@ def get_last_workfile(
Returns:
str: Last or first workfile as filename of full path to filename.
"""
filename, version = get_last_workfile_with_version(
"""
filename, _version = get_last_workfile_with_version(
workdir, file_template, fill_data, extensions
)
if filename is None:

View file

@ -8,7 +8,7 @@ targeted by task types and names.
Placeholders are created using placeholder plugins which should care about
logic and data of placeholder items. 'PlaceholderItem' is used to keep track
about it's progress.
about its progress.
"""
import os
@ -17,6 +17,7 @@ import collections
import copy
from abc import ABC, abstractmethod
import ayon_api
from ayon_api import (
get_folders,
get_folder_by_path,
@ -60,6 +61,32 @@ from ayon_core.pipeline.create import (
_NOT_SET = object()
class EntityResolutionError(Exception):
"""Exception raised when entity URI resolution fails."""
def resolve_entity_uri(entity_uri: str) -> str:
"""Resolve AYON entity URI to a filesystem path for local system."""
response = ayon_api.post(
"resolve",
resolveRoots=True,
uris=[entity_uri]
)
if response.status_code != 200:
raise RuntimeError(
f"Unable to resolve AYON entity URI filepath for "
f"'{entity_uri}': {response.text}"
)
entities = response.data[0]["entities"]
if len(entities) != 1:
raise EntityResolutionError(
f"Unable to resolve AYON entity URI '{entity_uri}' to a "
f"single filepath. Received data: {response.data}"
)
return entities[0]["filePath"]
class TemplateNotFound(Exception):
"""Exception raised when template does not exist."""
pass
@ -823,7 +850,6 @@ class AbstractTemplateBuilder(ABC):
"""
host_name = self.host_name
project_name = self.project_name
task_name = self.current_task_name
task_type = self.current_task_type
@ -835,7 +861,6 @@ class AbstractTemplateBuilder(ABC):
"task_names": task_name
}
)
if not profile:
raise TemplateProfileNotFound((
"No matching profile found for task '{}' of type '{}' "
@ -843,6 +868,22 @@ class AbstractTemplateBuilder(ABC):
).format(task_name, task_type, host_name))
path = profile["path"]
if not path:
raise TemplateLoadFailed((
"Template path is not set.\n"
"Path need to be set in {}\\Template Workfile Build "
"Settings\\Profiles"
).format(host_name.title()))
resolved_path = self.resolve_template_path(path)
if not resolved_path or not os.path.exists(resolved_path):
raise TemplateNotFound(
"Template file found in AYON settings for task '{}' with host "
"'{}' does not exists. (Not found : {})".format(
task_name, host_name, resolved_path)
)
self.log.info(f"Found template at: '{resolved_path}'")
# switch to remove placeholders after they are used
keep_placeholder = profile.get("keep_placeholder")
@ -852,44 +893,86 @@ class AbstractTemplateBuilder(ABC):
if keep_placeholder is None:
keep_placeholder = True
if not path:
raise TemplateLoadFailed((
"Template path is not set.\n"
"Path need to be set in {}\\Template Workfile Build "
"Settings\\Profiles"
).format(host_name.title()))
# Try to fill path with environments and anatomy roots
anatomy = Anatomy(project_name)
fill_data = {
key: value
for key, value in os.environ.items()
return {
"path": resolved_path,
"keep_placeholder": keep_placeholder,
"create_first_version": create_first_version
}
fill_data["root"] = anatomy.roots
fill_data["project"] = {
"name": project_name,
"code": anatomy.project_code,
}
def resolve_template_path(self, path, fill_data=None) -> str:
"""Resolve the template path.
path = self.resolve_template_path(path, fill_data)
By default, this:
- Resolves AYON entity URI to a filesystem path
- Returns path directly if it exists on disk.
- Resolves template keys through anatomy and environment variables.
This can be overridden in host integrations to perform additional
resolving over the template. Like, `hou.text.expandString` in Houdini.
It's recommended to still call the super().resolve_template_path()
to ensure the basic resolving is done across all integrations.
Arguments:
path (str): The input path.
fill_data (dict[str, str]): Deprecated. This is computed inside
the method using the current environment and project settings.
Used to be the data to use for template formatting.
Returns:
str: The resolved path.
"""
# If the path is an AYON entity URI, then resolve the filepath
# through the backend
if path.startswith("ayon+entity://") or path.startswith("ayon://"):
# This is a special case where the path is an AYON entity URI
# We need to resolve it to a filesystem path
resolved_path = resolve_entity_uri(path)
return resolved_path
# If the path is set and it's found on disk, return it directly
if path and os.path.exists(path):
self.log.info("Found template at: '{}'".format(path))
return {
"path": path,
"keep_placeholder": keep_placeholder,
"create_first_version": create_first_version
return path
# We may have path for another platform, like C:/path/to/file
# or a path with template keys, like {project[code]} or both.
# Try to fill path with environments and anatomy roots
project_name = self.project_name
anatomy = Anatomy(project_name)
# Simple check whether the path contains any template keys
if "{" in path:
fill_data = {
key: value
for key, value in os.environ.items()
}
fill_data["root"] = anatomy.roots
fill_data["project"] = {
"name": project_name,
"code": anatomy.project_code,
}
solved_path = None
# Format the template using local fill data
result = StringTemplate.format_template(path, fill_data)
if not result.solved:
return path
path = result.normalized()
if os.path.exists(path):
return path
# If the path were set in settings using a Windows path and we
# are now on a Linux system, we try to convert the solved path to
# the current platform.
while True:
try:
solved_path = anatomy.path_remapper(path)
except KeyError as missing_key:
raise KeyError(
"Could not solve key '{}' in template path '{}'".format(
missing_key, path))
f"Could not solve key '{missing_key}'"
f" in template path '{path}'"
)
if solved_path is None:
solved_path = path
@ -898,40 +981,7 @@ class AbstractTemplateBuilder(ABC):
path = solved_path
solved_path = os.path.normpath(solved_path)
if not os.path.exists(solved_path):
raise TemplateNotFound(
"Template found in AYON settings for task '{}' with host "
"'{}' does not exists. (Not found : {})".format(
task_name, host_name, solved_path))
self.log.info("Found template at: '{}'".format(solved_path))
return {
"path": solved_path,
"keep_placeholder": keep_placeholder,
"create_first_version": create_first_version
}
def resolve_template_path(self, path, fill_data) -> str:
"""Resolve the template path.
By default, this does nothing except returning the path directly.
This can be overridden in host integrations to perform additional
resolving over the template. Like, `hou.text.expandString` in Houdini.
Arguments:
path (str): The input path.
fill_data (dict[str, str]): Data to use for template formatting.
Returns:
str: The resolved path.
"""
result = StringTemplate.format_template(path, fill_data)
if result.solved:
path = result.normalized()
return path
return solved_path
def emit_event(self, topic, data=None, source=None) -> Event:
return self._event_system.emit(topic, data, source)

View file

@ -211,7 +211,7 @@ class DeleteOldVersions(load.ProductLoaderPlugin):
f"This will keep only the last {versions_to_keep} "
f"versions for the {num_contexts} selected product{s}."
)
informative_text="Warning: This will delete files from disk"
informative_text = "Warning: This will delete files from disk"
detailed_text = (
f"Keep only {versions_to_keep} versions for:\n{contexts_list}"
)

View file

@ -22,6 +22,7 @@ from ayon_core.tools.utils import show_message_dialog
OTIO = None
FRAME_SPLITTER = "__frame_splitter__"
def _import_otio():
global OTIO
if OTIO is None:

View file

@ -1,11 +1,14 @@
# -*- coding: utf-8 -*-
"""Cleanup leftover files from publish."""
import os
import shutil
import pyblish.api
import re
import shutil
import tempfile
import pyblish.api
from ayon_core.lib import is_in_tests
from ayon_core.pipeline import PublishError
class CleanUp(pyblish.api.InstancePlugin):
@ -48,17 +51,15 @@ class CleanUp(pyblish.api.InstancePlugin):
if is_in_tests():
# let automatic test process clean up temporary data
return
# Get the errored instances
failed = []
# If instance has errors, do not clean up
for result in instance.context.data["results"]:
if (result["error"] is not None and result["instance"] is not None
and result["instance"] not in failed):
failed.append(result["instance"])
assert instance not in failed, (
"Result of '{}' instance were not success".format(
instance.data["name"]
)
)
if result["error"] is not None and result["instance"] is instance:
raise PublishError(
"Result of '{}' instance were not success".format(
instance.data["name"]
)
)
_skip_cleanup_filepaths = instance.context.data.get(
"skipCleanupFilepaths"
@ -71,10 +72,17 @@ class CleanUp(pyblish.api.InstancePlugin):
self.log.debug("Cleaning renders new...")
self.clean_renders(instance, skip_cleanup_filepaths)
if [ef for ef in self.exclude_families
if instance.data["productType"] in ef]:
# TODO: Figure out whether this could be refactored to just a
# product_type in self.exclude_families check.
product_type = instance.data["productType"]
if any(
product_type in exclude_family
for exclude_family in self.exclude_families
):
self.log.debug(
"Skipping cleanup for instance because product "
f"type is excluded from cleanup: {product_type}")
return
import tempfile
temp_root = tempfile.gettempdir()
staging_dir = instance.data.get("stagingDir", None)

View file

@ -394,7 +394,6 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
if aov:
anatomy_data["aov"] = aov
def _fill_folder_data(self, instance, project_entity, anatomy_data):
# QUESTION: should we make sure that all folder data are popped if
# folder data cannot be found?

View file

@ -0,0 +1,106 @@
import pyblish.api
from ayon_core.lib import EnumDef
from ayon_core.pipeline import publish
from ayon_core.pipeline.publish import PublishError
class CollectExplicitResolution(
pyblish.api.InstancePlugin,
publish.AYONPyblishPluginMixin,
):
"""Collect explicit user defined resolution attributes for instances"""
label = "Choose Explicit Resolution"
order = pyblish.api.CollectorOrder - 0.091
settings_category = "core"
enabled = False
default_resolution_item = (None, "Don't override")
# Settings
product_types = []
options = []
# caching resoluton items
resolution_items = None
def process(self, instance):
"""Process the instance and collect explicit resolution attributes"""
# Get the values from the instance data
values = self.get_attr_values_from_data(instance.data)
resolution_value = values.get("explicit_resolution", None)
if resolution_value is None:
return
# Get the width, height and pixel_aspect from the resolution value
resolution_data = self._get_resolution_values(resolution_value)
# Set the values to the instance data
instance.data.update(resolution_data)
def _get_resolution_values(self, resolution_value):
"""
Returns width, height and pixel_aspect from the resolution value
Arguments:
resolution_value (str): resolution value
Returns:
dict: dictionary with width, height and pixel_aspect
"""
resolution_items = self._get_resolution_items()
# ensure resolution_value is part of expected items
item_values = resolution_items.get(resolution_value)
# if the item is in the cache, get the values from it
if item_values:
return {
"resolutionWidth": item_values["width"],
"resolutionHeight": item_values["height"],
"pixelAspect": item_values["pixel_aspect"],
}
raise PublishError(
f"Invalid resolution value: {resolution_value} "
f"expected choices: {resolution_items}"
)
@classmethod
def _get_resolution_items(cls):
if cls.resolution_items is None:
resolution_items = {}
for item in cls.options:
item_text = (
f"{item['width']}x{item['height']} "
f"({item['pixel_aspect']})"
)
resolution_items[item_text] = item
cls.resolution_items = resolution_items
return cls.resolution_items
@classmethod
def get_attr_defs_for_instance(
cls, create_context, instance,
):
if instance.product_type not in cls.product_types:
return []
# Get the resolution items
resolution_items = cls._get_resolution_items()
items = [cls.default_resolution_item]
# Add all cached resolution items to the dropdown options
for item_text in resolution_items:
items.append((item_text, item_text))
return [
EnumDef(
"explicit_resolution",
items,
default="Don't override",
label="Force product resolution",
),
]

View file

@ -43,4 +43,3 @@ class CollectCoreJobEnvVars(pyblish.api.ContextPlugin):
if value:
self.log.debug(f"Setting job env: {key}: {value}")
env[key] = value

View file

@ -50,7 +50,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
"comments": instance.data.get("comments", []),
}
shot_data["attributes"] = {}
shot_data["attributes"] = {}
SHOT_ATTRS = (
"handleStart",
"handleEnd",

View file

@ -32,16 +32,16 @@ class CollectManagedStagingDir(pyblish.api.InstancePlugin):
label = "Collect Managed Staging Directory"
order = pyblish.api.CollectorOrder + 0.4990
def process(self, instance):
def process(self, instance: pyblish.api.Instance):
""" Collect the staging data and stores it to the instance.
Args:
instance (object): The instance to inspect.
"""
staging_dir_path = get_instance_staging_dir(instance)
persistance = instance.data.get("stagingDir_persistent", False)
persistence: bool = instance.data.get("stagingDir_persistent", False)
self.log.info((
self.log.debug(
f"Instance staging dir was set to `{staging_dir_path}` "
f"and persistence is set to `{persistance}`"
))
f"and persistence is set to `{persistence}`"
)

View file

@ -194,7 +194,6 @@ class CollectOtioSubsetResources(
repre = self._create_representation(
frame_start, frame_end, file=filename)
else:
_trim = False
dirname, filename = os.path.split(media_ref.target_url)
@ -209,7 +208,6 @@ class CollectOtioSubsetResources(
repre = self._create_representation(
frame_start, frame_end, file=filename, trim=_trim)
instance.data["originalDirname"] = self.staging_dir
# add representation to instance data
@ -221,7 +219,6 @@ class CollectOtioSubsetResources(
instance.data["representations"].append(repre)
self.log.debug(instance.data)
def _create_representation(self, start, end, **kwargs):

View file

@ -31,6 +31,9 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
# Keep "filesequence" for backwards compatibility of older jobs
targets = ["filesequence", "farm"]
label = "Collect rendered frames"
settings_category = "core"
remove_files = False
_context = None
@ -120,7 +123,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
self._fill_staging_dir(repre_data, anatomy)
representations.append(repre_data)
if not staging_dir_persistent:
if self.remove_files and not staging_dir_persistent:
add_repre_files_for_cleanup(instance, repre_data)
instance.data["representations"] = representations
@ -170,7 +173,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
os.environ.update(session_data)
staging_dir_persistent = self._process_path(data, anatomy)
if not staging_dir_persistent:
if self.remove_files and not staging_dir_persistent:
context.data["cleanupFullPaths"].append(path)
context.data["cleanupEmptyDirs"].append(
os.path.dirname(path)

View file

@ -58,7 +58,7 @@ class ExtractOIIOTranscode(publish.Extractor):
optional = True
# Supported extensions
supported_exts = ["exr", "jpg", "jpeg", "png", "dpx"]
supported_exts = {"exr", "jpg", "jpeg", "png", "dpx"}
# Configurable by Settings
profiles = None
@ -280,10 +280,14 @@ class ExtractOIIOTranscode(publish.Extractor):
collection = collections[0]
frames = list(collection.indexes)
if collection.holes():
if collection.holes().indexes:
return files_to_convert
frame_str = "{}-{}#".format(frames[0], frames[-1])
# Get the padding from the collection
# This is the number of digits used in the frame numbers
padding = collection.padding
frame_str = "{}-{}%0{}d".format(frames[0], frames[-1], padding)
file_name = "{}{}{}".format(collection.head, frame_str,
collection.tail)

View file

@ -54,7 +54,7 @@ class ExtractOTIOReview(
# plugin default attributes
to_width = 1280
to_height = 720
output_ext = ".jpg"
output_ext = ".png"
def process(self, instance):
# Not all hosts can import these modules.
@ -510,6 +510,12 @@ class ExtractOTIOReview(
"-tune", "stillimage"
])
if video or sequence:
command.extend([
"-vf", f"scale={self.to_width}:{self.to_height}:flags=lanczos",
"-compression_level", "5",
])
# add output attributes
command.extend([
"-start_number", str(out_frame_start)
@ -520,9 +526,10 @@ class ExtractOTIOReview(
input_extension
and self.output_ext == input_extension
):
command.extend([
"-c", "copy"
])
command.extend(["-c", "copy"])
else:
# For lossy formats, force re-encode
command.extend(["-pix_fmt", "rgba"])
# add output path at the end
command.append(output_path)

View file

@ -1,3 +1,4 @@
from __future__ import annotations
import os
import re
import copy
@ -5,11 +6,16 @@ import json
import shutil
import subprocess
from abc import ABC, abstractmethod
from typing import Any, Optional
from dataclasses import dataclass, field
import tempfile
import clique
import speedcopy
import pyblish.api
from ayon_api import get_last_version_by_product_name, get_representations
from ayon_core.lib import (
get_ffmpeg_tool_args,
filter_profiles,
@ -31,6 +37,39 @@ from ayon_core.pipeline.publish import (
from ayon_core.pipeline.publish.lib import add_repre_files_for_cleanup
@dataclass
class TempData:
"""Temporary data used across extractor's process."""
fps: float
frame_start: int
frame_end: int
handle_start: int
handle_end: int
frame_start_handle: int
frame_end_handle: int
output_frame_start: int
output_frame_end: int
pixel_aspect: float
resolution_width: int
resolution_height: int
origin_repre: dict[str, Any]
input_is_sequence: bool
first_sequence_frame: int
input_allow_bg: bool
with_audio: bool
without_handles: bool
handles_are_set: bool
input_ext: str
explicit_input_paths: list[str]
paths_to_remove: list[str]
# Set later
full_output_path: str = ""
filled_files: dict[int, str] = field(default_factory=dict)
output_ext_is_image: bool = True
output_is_sequence: bool = True
def frame_to_timecode(frame: int, fps: float) -> str:
"""Convert a frame number and FPS to editorial timecode (HH:MM:SS:FF).
@ -96,11 +135,11 @@ class ExtractReview(pyblish.api.InstancePlugin):
]
# Supported extensions
image_exts = ["exr", "jpg", "jpeg", "png", "dpx", "tga", "tiff", "tif"]
video_exts = ["mov", "mp4"]
supported_exts = image_exts + video_exts
image_exts = {"exr", "jpg", "jpeg", "png", "dpx", "tga", "tiff", "tif"}
video_exts = {"mov", "mp4"}
supported_exts = image_exts | video_exts
alpha_exts = ["exr", "png", "dpx"]
alpha_exts = {"exr", "png", "dpx"}
# Preset attributes
profiles = []
@ -400,15 +439,73 @@ class ExtractReview(pyblish.api.InstancePlugin):
)
temp_data = self.prepare_temp_data(instance, repre, output_def)
files_to_clean = []
if temp_data["input_is_sequence"]:
new_frame_files = {}
if temp_data.input_is_sequence:
self.log.debug("Checking sequence to fill gaps in sequence..")
files_to_clean = self.fill_sequence_gaps(
files=temp_data["origin_repre"]["files"],
staging_dir=new_repre["stagingDir"],
start_frame=temp_data["frame_start"],
end_frame=temp_data["frame_end"]
)
files = temp_data.origin_repre["files"]
collections = clique.assemble(
files,
)[0]
if len(collections) != 1:
raise KnownPublishError(
"Multiple collections {} found.".format(collections))
collection = collections[0]
fill_missing_frames = _output_def["fill_missing_frames"]
if fill_missing_frames == "closest_existing":
new_frame_files = self.fill_sequence_gaps_from_existing(
collection=collection,
staging_dir=new_repre["stagingDir"],
start_frame=temp_data.frame_start,
end_frame=temp_data.frame_end,
)
elif fill_missing_frames == "blank":
new_frame_files = self.fill_sequence_gaps_with_blanks(
collection=collection,
staging_dir=new_repre["stagingDir"],
start_frame=temp_data.frame_start,
end_frame=temp_data.frame_end,
resolution_width=temp_data.resolution_width,
resolution_height=temp_data.resolution_height,
extension=temp_data.input_ext,
temp_data=temp_data
)
elif fill_missing_frames == "previous_version":
new_frame_files = self.fill_sequence_gaps_with_previous(
collection=collection,
staging_dir=new_repre["stagingDir"],
instance=instance,
current_repre_name=repre["name"],
start_frame=temp_data.frame_start,
end_frame=temp_data.frame_end,
)
# fallback to original workflow
if new_frame_files is None:
new_frame_files = (
self.fill_sequence_gaps_from_existing(
collection=collection,
staging_dir=new_repre["stagingDir"],
start_frame=temp_data.frame_start,
end_frame=temp_data.frame_end,
))
elif fill_missing_frames == "only_rendered":
temp_data.explicit_input_paths = [
os.path.join(
new_repre["stagingDir"], file
).replace("\\", "/")
for file in files
]
frame_start = min(collection.indexes)
frame_end = max(collection.indexes)
# modify range for burnins
instance.data["frameStart"] = frame_start
instance.data["frameEnd"] = frame_end
temp_data.frame_start = frame_start
temp_data.frame_end = frame_end
temp_data.filled_files = new_frame_files
# create or update outputName
output_name = new_repre.get("outputName", "")
@ -416,7 +513,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
if output_name:
output_name += "_"
output_name += output_def["filename_suffix"]
if temp_data["without_handles"]:
if temp_data.without_handles:
output_name += "_noHandles"
# add outputName to anatomy format fill_data
@ -429,7 +526,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
# like Resolve or Premiere can detect the start frame for e.g.
# review output files
"timecode": frame_to_timecode(
frame=temp_data["frame_start_handle"],
frame=temp_data.frame_start_handle,
fps=float(instance.data["fps"])
)
})
@ -446,7 +543,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
except ZeroDivisionError:
# TODO recalculate width and height using OIIO before
# conversion
if 'exr' in temp_data["origin_repre"]["ext"]:
if 'exr' in temp_data.origin_repre["ext"]:
self.log.warning(
(
"Unsupported compression on input files."
@ -465,17 +562,20 @@ class ExtractReview(pyblish.api.InstancePlugin):
run_subprocess(subprcs_cmd, shell=True, logger=self.log)
# delete files added to fill gaps
if files_to_clean:
for f in files_to_clean:
os.unlink(f)
if new_frame_files:
for filepath in new_frame_files.values():
os.unlink(filepath)
for filepath in temp_data.paths_to_remove:
os.unlink(filepath)
new_repre.update({
"fps": temp_data["fps"],
"fps": temp_data.fps,
"name": "{}_{}".format(output_name, output_ext),
"outputName": output_name,
"outputDef": output_def,
"frameStartFtrack": temp_data["output_frame_start"],
"frameEndFtrack": temp_data["output_frame_end"],
"frameStartFtrack": temp_data.output_frame_start,
"frameEndFtrack": temp_data.output_frame_end,
"ffmpeg_cmd": subprcs_cmd
})
@ -501,7 +601,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
# - there can be more than one collection
return isinstance(repre["files"], (list, tuple))
def prepare_temp_data(self, instance, repre, output_def):
def prepare_temp_data(self, instance, repre, output_def) -> TempData:
"""Prepare dictionary with values used across extractor's process.
All data are collected from instance, context, origin representation
@ -517,7 +617,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
output_def (dict): Definition of output of this plugin.
Returns:
dict: All data which are used across methods during process.
TempData: All data which are used across methods during process.
Their values should not change during process but new keys
with values may be added.
"""
@ -560,6 +660,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
input_is_sequence = self.input_is_sequence(repre)
input_allow_bg = False
first_sequence_frame = None
if input_is_sequence and repre["files"]:
# Calculate first frame that should be used
cols, _ = clique.assemble(repre["files"])
@ -578,28 +679,33 @@ class ExtractReview(pyblish.api.InstancePlugin):
ext = os.path.splitext(repre["files"][0])[1].replace(".", "")
if ext.lower() in self.alpha_exts:
input_allow_bg = True
else:
ext = os.path.splitext(repre["files"])[1].replace(".", "")
return {
"fps": float(instance.data["fps"]),
"frame_start": frame_start,
"frame_end": frame_end,
"handle_start": handle_start,
"handle_end": handle_end,
"frame_start_handle": frame_start_handle,
"frame_end_handle": frame_end_handle,
"output_frame_start": int(output_frame_start),
"output_frame_end": int(output_frame_end),
"pixel_aspect": instance.data.get("pixelAspect", 1),
"resolution_width": instance.data.get("resolutionWidth"),
"resolution_height": instance.data.get("resolutionHeight"),
"origin_repre": repre,
"input_is_sequence": input_is_sequence,
"first_sequence_frame": first_sequence_frame,
"input_allow_bg": input_allow_bg,
"with_audio": with_audio,
"without_handles": without_handles,
"handles_are_set": handles_are_set
}
return TempData(
fps=float(instance.data["fps"]),
frame_start=frame_start,
frame_end=frame_end,
handle_start=handle_start,
handle_end=handle_end,
frame_start_handle=frame_start_handle,
frame_end_handle=frame_end_handle,
output_frame_start=int(output_frame_start),
output_frame_end=int(output_frame_end),
pixel_aspect=instance.data.get("pixelAspect", 1),
resolution_width=instance.data.get("resolutionWidth"),
resolution_height=instance.data.get("resolutionHeight"),
origin_repre=repre,
input_is_sequence=input_is_sequence,
first_sequence_frame=first_sequence_frame,
input_allow_bg=input_allow_bg,
with_audio=with_audio,
without_handles=without_handles,
handles_are_set=handles_are_set,
input_ext=ext,
explicit_input_paths=[], # absolute paths to rendered files
paths_to_remove=[]
)
def _ffmpeg_arguments(
self,
@ -620,7 +726,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
instance (Instance): Currently processed instance.
new_repre (dict): Representation representing output of this
process.
temp_data (dict): Base data for successful process.
temp_data (TempData): Base data for successful process.
"""
# Get FFmpeg arguments from profile presets
@ -662,31 +768,32 @@ class ExtractReview(pyblish.api.InstancePlugin):
# Set output frames len to 1 when output is single image
if (
temp_data["output_ext_is_image"]
and not temp_data["output_is_sequence"]
temp_data.output_ext_is_image
and not temp_data.output_is_sequence
):
output_frames_len = 1
else:
output_frames_len = (
temp_data["output_frame_end"]
- temp_data["output_frame_start"]
temp_data.output_frame_end
- temp_data.output_frame_start
+ 1
)
duration_seconds = float(output_frames_len / temp_data["fps"])
duration_seconds = float(output_frames_len / temp_data.fps)
# Define which layer should be used
if layer_name:
ffmpeg_input_args.extend(["-layer", layer_name])
if temp_data["input_is_sequence"]:
explicit_input_paths = temp_data.explicit_input_paths
if temp_data.input_is_sequence and not explicit_input_paths:
# Set start frame of input sequence (just frame in filename)
# - definition of input filepath
# - add handle start if output should be without handles
start_number = temp_data["first_sequence_frame"]
if temp_data["without_handles"] and temp_data["handles_are_set"]:
start_number += temp_data["handle_start"]
start_number = temp_data.first_sequence_frame
if temp_data.without_handles and temp_data.handles_are_set:
start_number += temp_data.handle_start
ffmpeg_input_args.extend([
"-start_number", str(start_number)
])
@ -699,32 +806,32 @@ class ExtractReview(pyblish.api.InstancePlugin):
# }
# Add framerate to input when input is sequence
ffmpeg_input_args.extend([
"-framerate", str(temp_data["fps"])
"-framerate", str(temp_data.fps)
])
# Add duration of an input sequence if output is video
if not temp_data["output_is_sequence"]:
if not temp_data.output_is_sequence:
ffmpeg_input_args.extend([
"-to", "{:0.10f}".format(duration_seconds)
])
if temp_data["output_is_sequence"]:
if temp_data.output_is_sequence and not explicit_input_paths:
# Set start frame of output sequence (just frame in filename)
# - this is definition of an output
ffmpeg_output_args.extend([
"-start_number", str(temp_data["output_frame_start"])
"-start_number", str(temp_data.output_frame_start)
])
# Change output's duration and start point if should not contain
# handles
if temp_data["without_handles"] and temp_data["handles_are_set"]:
if temp_data.without_handles and temp_data.handles_are_set:
# Set output duration in seconds
ffmpeg_output_args.extend([
"-t", "{:0.10}".format(duration_seconds)
])
# Add -ss (start offset in seconds) if input is not sequence
if not temp_data["input_is_sequence"]:
start_sec = float(temp_data["handle_start"]) / temp_data["fps"]
if not temp_data.input_is_sequence:
start_sec = float(temp_data.handle_start) / temp_data.fps
# Set start time without handles
# - Skip if start sec is 0.0
if start_sec > 0.0:
@ -733,18 +840,42 @@ class ExtractReview(pyblish.api.InstancePlugin):
])
# Set frame range of output when input or output is sequence
elif temp_data["output_is_sequence"]:
elif temp_data.output_is_sequence:
ffmpeg_output_args.extend([
"-frames:v", str(output_frames_len)
])
# Add video/image input path
ffmpeg_input_args.extend([
"-i", path_to_subprocess_arg(temp_data["full_input_path"])
])
if not explicit_input_paths:
# Add video/image input path
ffmpeg_input_args.extend([
"-i", path_to_subprocess_arg(temp_data.full_input_path)
])
else:
frame_duration = 1 / temp_data.fps
explicit_frames_meta = tempfile.NamedTemporaryFile(
mode="w", prefix="explicit_frames", suffix=".txt", delete=False
)
explicit_frames_meta.close()
explicit_frames_path = explicit_frames_meta.name
with open(explicit_frames_path, "w") as fp:
lines = [
f"file '{path}'{os.linesep}duration {frame_duration}"
for path in temp_data.explicit_input_paths
]
fp.write("\n".join(lines))
temp_data.paths_to_remove.append(explicit_frames_path)
# let ffmpeg use only rendered files, might have gaps
ffmpeg_input_args.extend([
"-f", "concat",
"-safe", "0",
"-i", path_to_subprocess_arg(explicit_frames_path),
"-r", str(temp_data.fps)
])
# Add audio arguments if there are any. Skipped when output are images.
if not temp_data["output_ext_is_image"] and temp_data["with_audio"]:
if not temp_data.output_ext_is_image and temp_data.with_audio:
audio_in_args, audio_filters, audio_out_args = self.audio_args(
instance, temp_data, duration_seconds
)
@ -766,7 +897,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
bg_red, bg_green, bg_blue, bg_alpha = bg_color
if bg_alpha > 0.0:
if not temp_data["input_allow_bg"]:
if not temp_data.input_allow_bg:
self.log.info((
"Output definition has defined BG color input was"
" resolved as does not support adding BG."
@ -797,7 +928,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
# NOTE This must be latest added item to output arguments.
ffmpeg_output_args.append(
path_to_subprocess_arg(temp_data["full_output_path"])
path_to_subprocess_arg(temp_data.full_output_path)
)
return self.ffmpeg_full_args(
@ -881,8 +1012,159 @@ class ExtractReview(pyblish.api.InstancePlugin):
return all_args
def fill_sequence_gaps(self, files, staging_dir, start_frame, end_frame):
# type: (list, str, int, int) -> list
def fill_sequence_gaps_with_previous(
self,
collection: str,
staging_dir: str,
instance: pyblish.plugin.Instance,
current_repre_name: str,
start_frame: int,
end_frame: int
) -> Optional[dict[int, str]]:
"""Tries to replace missing frames from ones from last version"""
repre_file_paths = self._get_last_version_files(
instance, current_repre_name)
if repre_file_paths is None:
# issues in getting last version files, falling back
return None
prev_collection = clique.assemble(
repre_file_paths,
patterns=[clique.PATTERNS["frames"]],
minimum_items=1
)[0][0]
prev_col_format = prev_collection.format("{head}{padding}{tail}")
added_files = {}
anatomy = instance.context.data["anatomy"]
col_format = collection.format("{head}{padding}{tail}")
for frame in range(start_frame, end_frame + 1):
if frame in collection.indexes:
continue
hole_fpath = os.path.join(staging_dir, col_format % frame)
previous_version_path = prev_col_format % frame
previous_version_path = anatomy.fill_root(previous_version_path)
if not os.path.exists(previous_version_path):
self.log.warning(
"Missing frame should be replaced from "
f"'{previous_version_path}' but that doesn't exist. "
"Falling back to filling from currently last rendered."
)
return None
self.log.warning(
f"Replacing missing '{hole_fpath}' with "
f"'{previous_version_path}'"
)
speedcopy.copyfile(previous_version_path, hole_fpath)
added_files[frame] = hole_fpath
return added_files
def _get_last_version_files(
self,
instance: pyblish.plugin.Instance,
current_repre_name: str,
):
product_name = instance.data["productName"]
project_name = instance.data["projectEntity"]["name"]
folder_entity = instance.data["folderEntity"]
version_entity = get_last_version_by_product_name(
project_name,
product_name,
folder_entity["id"],
fields={"id"}
)
if not version_entity:
return None
matching_repres = get_representations(
project_name,
version_ids=[version_entity["id"]],
representation_names=[current_repre_name],
fields={"files"}
)
if not matching_repres:
return None
matching_repre = list(matching_repres)[0]
repre_file_paths = [
file_info["path"]
for file_info in matching_repre["files"]
]
return repre_file_paths
def fill_sequence_gaps_with_blanks(
self,
collection: str,
staging_dir: str,
start_frame: int,
end_frame: int,
resolution_width: int,
resolution_height: int,
extension: str,
temp_data: TempData
) -> Optional[dict[int, str]]:
"""Fills missing files by blank frame."""
blank_frame_path = None
added_files = {}
col_format = collection.format("{head}{padding}{tail}")
for frame in range(start_frame, end_frame + 1):
if frame in collection.indexes:
continue
hole_fpath = os.path.join(staging_dir, col_format % frame)
if blank_frame_path is None:
blank_frame_path = self._create_blank_frame(
staging_dir, extension, resolution_width, resolution_height
)
temp_data.paths_to_remove.append(blank_frame_path)
speedcopy.copyfile(blank_frame_path, hole_fpath)
added_files[frame] = hole_fpath
return added_files
def _create_blank_frame(
self,
staging_dir,
extension,
resolution_width,
resolution_height
):
blank_frame_path = os.path.join(staging_dir, f"blank.{extension}")
command = get_ffmpeg_tool_args(
"ffmpeg",
"-f", "lavfi",
"-i", "color=c=black:s={}x{}:d=1".format(
resolution_width, resolution_height
),
"-tune", "stillimage",
"-frames:v", "1",
blank_frame_path
)
self.log.debug("Executing: {}".format(" ".join(command)))
output = run_subprocess(
command, logger=self.log
)
self.log.debug("Output: {}".format(output))
return blank_frame_path
def fill_sequence_gaps_from_existing(
self,
collection,
staging_dir: str,
start_frame: int,
end_frame: int
) -> dict[int, str]:
"""Fill missing files in sequence by duplicating existing ones.
This will take nearest frame file and copy it with so as to fill
@ -890,40 +1172,33 @@ class ExtractReview(pyblish.api.InstancePlugin):
hole ahead.
Args:
files (list): List of representation files.
collection (clique.collection)
staging_dir (str): Path to staging directory.
start_frame (int): Sequence start (no matter what files are there)
end_frame (int): Sequence end (no matter what files are there)
Returns:
list of added files. Those should be cleaned after work
dict[int, str] of added files. Those should be cleaned after work
is done.
Raises:
KnownPublishError: if more than one collection is obtained.
"""
collections = clique.assemble(files)[0]
if len(collections) != 1:
raise KnownPublishError(
"Multiple collections {} found.".format(collections))
col = collections[0]
# Prepare which hole is filled with what frame
# - the frame is filled only with already existing frames
prev_frame = next(iter(col.indexes))
prev_frame = next(iter(collection.indexes))
hole_frame_to_nearest = {}
for frame in range(int(start_frame), int(end_frame) + 1):
if frame in col.indexes:
if frame in collection.indexes:
prev_frame = frame
else:
# Use previous frame as source for hole
hole_frame_to_nearest[frame] = prev_frame
# Calculate paths
added_files = []
col_format = col.format("{head}{padding}{tail}")
added_files = {}
col_format = collection.format("{head}{padding}{tail}")
for hole_frame, src_frame in hole_frame_to_nearest.items():
hole_fpath = os.path.join(staging_dir, col_format % hole_frame)
src_fpath = os.path.join(staging_dir, col_format % src_frame)
@ -932,11 +1207,11 @@ class ExtractReview(pyblish.api.InstancePlugin):
"Missing previously detected file: {}".format(src_fpath))
speedcopy.copyfile(src_fpath, hole_fpath)
added_files.append(hole_fpath)
added_files[hole_frame] = hole_fpath
return added_files
def input_output_paths(self, new_repre, output_def, temp_data):
def input_output_paths(self, new_repre, output_def, temp_data: TempData):
"""Deduce input nad output file paths based on entered data.
Input may be sequence of images, video file or single image file and
@ -949,11 +1224,11 @@ class ExtractReview(pyblish.api.InstancePlugin):
"sequence_file" (if output is sequence) keys to new representation.
"""
repre = temp_data["origin_repre"]
repre = temp_data.origin_repre
src_staging_dir = repre["stagingDir"]
dst_staging_dir = new_repre["stagingDir"]
if temp_data["input_is_sequence"]:
if temp_data.input_is_sequence:
collections = clique.assemble(repre["files"])[0]
full_input_path = os.path.join(
src_staging_dir,
@ -978,6 +1253,14 @@ class ExtractReview(pyblish.api.InstancePlugin):
# Make sure to have full path to one input file
full_input_path_single_file = full_input_path
filled_files = temp_data.filled_files
if filled_files:
first_frame, first_file = next(iter(filled_files.items()))
if first_file < full_input_path_single_file:
self.log.warning(f"Using filled frame: '{first_file}'")
full_input_path_single_file = first_file
temp_data.first_sequence_frame = first_frame
filename_suffix = output_def["filename_suffix"]
output_ext = output_def.get("ext")
@ -1004,8 +1287,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
)
if output_is_sequence:
new_repre_files = []
frame_start = temp_data["output_frame_start"]
frame_end = temp_data["output_frame_end"]
frame_start = temp_data.output_frame_start
frame_end = temp_data.output_frame_end
filename_base = "{}_{}".format(filename, filename_suffix)
# Temporary template for frame filling. Example output:
@ -1042,18 +1325,18 @@ class ExtractReview(pyblish.api.InstancePlugin):
new_repre["stagingDir"] = dst_staging_dir
# Store paths to temp data
temp_data["full_input_path"] = full_input_path
temp_data["full_input_path_single_file"] = full_input_path_single_file
temp_data["full_output_path"] = full_output_path
temp_data.full_input_path = full_input_path
temp_data.full_input_path_single_file = full_input_path_single_file
temp_data.full_output_path = full_output_path
# Store information about output
temp_data["output_ext_is_image"] = output_ext_is_image
temp_data["output_is_sequence"] = output_is_sequence
temp_data.output_ext_is_image = output_ext_is_image
temp_data.output_is_sequence = output_is_sequence
self.log.debug("Input path {}".format(full_input_path))
self.log.debug("Output path {}".format(full_output_path))
def audio_args(self, instance, temp_data, duration_seconds):
def audio_args(self, instance, temp_data: TempData, duration_seconds):
"""Prepares FFMpeg arguments for audio inputs."""
audio_in_args = []
audio_filters = []
@ -1070,7 +1353,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
frame_start_ftrack = instance.data.get("frameStartFtrack")
if frame_start_ftrack is not None:
offset_frames = frame_start_ftrack - audio["offset"]
offset_seconds = offset_frames / temp_data["fps"]
offset_seconds = offset_frames / temp_data.fps
if offset_seconds > 0:
audio_in_args.append(
@ -1254,7 +1537,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
return output
def rescaling_filters(self, temp_data, output_def, new_repre):
def rescaling_filters(self, temp_data: TempData, output_def, new_repre):
"""Prepare vieo filters based on tags in new representation.
It is possible to add letterboxes to output video or rescale to
@ -1274,7 +1557,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
self.log.debug("reformat_in_baking: `{}`".format(reformat_in_baking))
# NOTE Skipped using instance's resolution
full_input_path_single_file = temp_data["full_input_path_single_file"]
full_input_path_single_file = temp_data.full_input_path_single_file
try:
streams = get_ffprobe_streams(
full_input_path_single_file, self.log
@ -1299,7 +1582,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
break
# Get instance data
pixel_aspect = temp_data["pixel_aspect"]
pixel_aspect = temp_data.pixel_aspect
if reformat_in_baking:
self.log.debug((
"Using resolution from input. It is already "
@ -1333,7 +1616,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
bg_red, bg_green, bg_blue = overscan_color
else:
# Backwards compatibility
bg_red, bg_green, bg_blue, _ = overscan_color
bg_red, bg_green, bg_blue, _ = overscan_color
overscan_color_value = "#{0:0>2X}{1:0>2X}{2:0>2X}".format(
bg_red, bg_green, bg_blue
@ -1394,8 +1677,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
# - use instance resolution only if there were not scale changes
# that may massivelly affect output 'use_input_res'
if not use_input_res and output_width is None or output_height is None:
output_width = temp_data["resolution_width"]
output_height = temp_data["resolution_height"]
output_width = temp_data.resolution_width
output_height = temp_data.resolution_height
# Use source's input resolution instance does not have set it.
if output_width is None or output_height is None:

View file

@ -17,7 +17,7 @@ from ayon_core.lib import (
)
from ayon_core.lib.transcoding import convert_colorspace
from ayon_core.lib.transcoding import VIDEO_EXTENSIONS
from ayon_core.lib.transcoding import VIDEO_EXTENSIONS, IMAGE_EXTENSIONS
class ExtractThumbnail(pyblish.api.InstancePlugin):
@ -164,7 +164,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
instance.context.data["cleanupFullPaths"].append(dst_staging)
oiio_supported = is_oiio_supported()
repre_thumb_created = False
thumbnail_created = False
for repre in filtered_repres:
# Reset for each iteration to handle cases where multiple
# reviewable thumbnails are needed
@ -241,6 +241,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
if not repre_thumb_created:
continue
thumbnail_created = True
if len(explicit_repres) > 1:
repre_name = "thumbnail_{}".format(repre["outputName"])
else:
@ -294,7 +295,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# There is no need to create more then one thumbnail
break
if not repre_thumb_created:
if not thumbnail_created:
self.log.warning("Thumbnail has not been created.")
def _is_review_instance(self, instance):
@ -335,7 +336,8 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
return need_thumb_repres
def _get_filtered_repres(self, instance):
filtered_repres = []
review_repres = []
other_repres = []
src_repres = instance.data.get("representations") or []
for repre in src_repres:
@ -347,17 +349,36 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# to be published locally
continue
if "review" not in tags:
continue
if not repre.get("files"):
self.log.debug((
"Representation \"{}\" doesn't have files. Skipping"
).format(repre["name"]))
continue
filtered_repres.append(repre)
return filtered_repres
if "review" in tags:
review_repres.append(repre)
elif self._is_valid_images_repre(repre):
other_repres.append(repre)
return review_repres + other_repres
def _is_valid_images_repre(self, repre):
"""Check if representation contains valid image files
Args:
repre (dict): representation
Returns:
bool: whether the representation has the valid image content
"""
# Get first file's extension
first_file = repre["files"]
if isinstance(first_file, (list, tuple)):
first_file = first_file[0]
ext = os.path.splitext(first_file)[1].lower()
return ext in IMAGE_EXTENSIONS or ext in VIDEO_EXTENSIONS
def _create_thumbnail_oiio(
self,
@ -485,27 +506,36 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# Set video input attributes
max_int = str(2147483647)
video_data = get_ffprobe_data(video_file_path, logger=self.log)
# Use duration of the individual streams since it is returned with
# higher decimal precision than 'format.duration'. We need this
# more precise value for calculating the correct amount of frames
# for higher FPS ranges or decimal ranges, e.g. 29.97 FPS
duration = max(
float(stream.get("duration", 0))
for stream in video_data["streams"]
if stream.get("codec_type") == "video"
)
cmd_args = [
"-y",
"-ss", str(duration * self.duration_split),
# Get duration or use a safe default (single frame)
duration = 0
for stream in video_data["streams"]:
if stream.get("codec_type") == "video":
stream_duration = float(stream.get("duration", 0))
if stream_duration > duration:
duration = stream_duration
# For very short videos, just use the first frame
# Calculate seek position safely
seek_position = 0.0
# Only use timestamp calculation for videos longer than 0.1 seconds
if duration > 0.1:
seek_position = duration * self.duration_split
# Build command args
cmd_args = []
if seek_position > 0.0:
cmd_args.extend(["-ss", str(seek_position)])
# Add generic ffmpeg commands
cmd_args.extend([
"-i", video_file_path,
"-analyzeduration", max_int,
"-probesize", max_int,
"-frames:v", "1"
]
# add output file path
cmd_args.append(output_thumb_file_path)
"-y",
"-frames:v", "1",
output_thumb_file_path
])
# create ffmpeg command
cmd = get_ffmpeg_tool_args(
@ -516,15 +546,53 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# run subprocess
self.log.debug("Executing: {}".format(" ".join(cmd)))
run_subprocess(cmd, logger=self.log)
self.log.debug(
"Thumbnail created: {}".format(output_thumb_file_path))
return output_thumb_file_path
# Verify the output file was created
if (
os.path.exists(output_thumb_file_path)
and os.path.getsize(output_thumb_file_path) > 0
):
self.log.debug(
"Thumbnail created: {}".format(output_thumb_file_path))
return output_thumb_file_path
self.log.warning("Output file was not created or is empty")
# Try to create thumbnail without offset
# - skip if offset did not happen
if "-ss" not in cmd_args:
return None
self.log.debug("Trying fallback without offset")
# Remove -ss and its value
ss_index = cmd_args.index("-ss")
cmd_args.pop(ss_index) # Remove -ss
cmd_args.pop(ss_index) # Remove the timestamp value
# Create new command and try again
cmd = get_ffmpeg_tool_args("ffmpeg", *cmd_args)
self.log.debug("Fallback command: {}".format(" ".join(cmd)))
run_subprocess(cmd, logger=self.log)
if (
os.path.exists(output_thumb_file_path)
and os.path.getsize(output_thumb_file_path) > 0
):
self.log.debug("Fallback thumbnail created")
return output_thumb_file_path
return None
except RuntimeError as error:
self.log.warning(
"Failed intermediate thumb source using ffmpeg: {}".format(
error)
)
return None
finally:
# Remove output file if is empty
if (
os.path.exists(output_thumb_file_path)
and os.path.getsize(output_thumb_file_path) == 0
):
os.remove(output_thumb_file_path)
def _get_resolution_arg(
self,

View file

@ -683,7 +683,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
elif is_sequence_representation:
# Collection of files (sequence)
src_collections, remainders = clique.assemble(files)
src_collections, _remainders = clique.assemble(files)
src_collection = src_collections[0]
destination_indexes = list(src_collection.indexes)

View file

@ -1,7 +1,11 @@
import os
import copy
import errno
import itertools
import shutil
from concurrent.futures import ThreadPoolExecutor
from speedcopy import copyfile
import clique
import pyblish.api
@ -13,6 +17,7 @@ from ayon_api.operations import (
from ayon_api.utils import create_entity_id
from ayon_core.lib import create_hard_link, source_hash
from ayon_core.lib.file_transaction import wait_for_future_errors
from ayon_core.pipeline.publish import (
get_publish_template_name,
OptionalPyblishPluginMixin,
@ -415,11 +420,14 @@ class IntegrateHeroVersion(
# Copy(hardlink) paths of source and destination files
# TODO should we *only* create hardlinks?
# TODO should we keep files for deletion until this is successful?
for src_path, dst_path in src_to_dst_file_paths:
self.copy_file(src_path, dst_path)
for src_path, dst_path in other_file_paths_mapping:
self.copy_file(src_path, dst_path)
with ThreadPoolExecutor(max_workers=8) as executor:
futures = [
executor.submit(self.copy_file, src_path, dst_path)
for src_path, dst_path in itertools.chain(
src_to_dst_file_paths, other_file_paths_mapping
)
]
wait_for_future_errors(executor, futures)
# Update prepared representation etity data with files
# and integrate it to server.
@ -648,7 +656,7 @@ class IntegrateHeroVersion(
src_path, dst_path
))
shutil.copy(src_path, dst_path)
copyfile(src_path, dst_path)
def version_from_representations(self, project_name, repres):
for repre in repres:

View file

@ -7,7 +7,7 @@ class IntegrateResourcesPath(pyblish.api.InstancePlugin):
label = "Integrate Resources Path"
order = pyblish.api.IntegratorOrder - 0.05
families = ["clip", "projectfile", "plate"]
families = ["clip", "projectfile", "plate"]
def process(self, instance):
resources = instance.data.get("resources") or []

File diff suppressed because it is too large Load diff

View file

@ -173,7 +173,6 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
if frame_end is not None:
options["frame_end"] = frame_end
options["label"] = align
self._add_burnin(text, align, options, DRAWTEXT)

View file

@ -175,7 +175,7 @@ class BaseObj:
self.log.warning("Invalid range '{}'".format(part))
continue
for idx in range(sub_parts[0], sub_parts[1]+1):
for idx in range(sub_parts[0], sub_parts[1] + 1):
indexes.append(idx)
return indexes
@ -353,7 +353,6 @@ class BaseObj:
self.items[item.id] = item
item.fill_data_format()
def reset(self):
for item in self.items.values():
item.reset()

View file

@ -282,7 +282,7 @@ class ItemTable(BaseItem):
value.draw(image, drawer)
def value_width(self):
row_heights, col_widths = self.size_values
_row_heights, col_widths = self.size_values
width = 0
for _width in col_widths:
width += _width
@ -292,7 +292,7 @@ class ItemTable(BaseItem):
return width
def value_height(self):
row_heights, col_widths = self.size_values
row_heights, _col_widths = self.size_values
height = 0
for _height in row_heights:
height += _height
@ -569,21 +569,21 @@ class TableField(BaseItem):
@property
def item_pos_x(self):
pos_x, pos_y, width, height = (
pos_x, _pos_y, _width, _height = (
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
)
return pos_x
@property
def item_pos_y(self):
pos_x, pos_y, width, height = (
_pos_x, pos_y, _width, _height = (
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
)
return pos_y
@property
def value_pos_x(self):
pos_x, pos_y, width, height = (
pos_x, _pos_y, width, _height = (
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
)
alignment_hor = self.style["alignment-horizontal"].lower()
@ -605,7 +605,7 @@ class TableField(BaseItem):
@property
def value_pos_y(self):
pos_x, pos_y, width, height = (
_pos_x, pos_y, _width, height = (
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
)

View file

@ -56,14 +56,9 @@ class _AyonSettingsCache:
@classmethod
def _get_variant(cls):
if _AyonSettingsCache.variant is None:
from ayon_core.lib import is_staging_enabled, is_dev_mode_enabled
variant = "production"
if is_dev_mode_enabled():
variant = cls._get_bundle_name()
elif is_staging_enabled():
variant = "staging"
from ayon_core.lib import get_settings_variant
variant = get_settings_variant()
# Cache variant
_AyonSettingsCache.variant = variant

View file

@ -829,6 +829,49 @@ HintedLineEditButton {
}
/* Launcher specific stylesheets */
ActionsView {
/* font size can't be set on items */
font-size: 8pt;
border: 0px;
padding: 0px;
margin: 0px;
}
ActionsView::item {
padding-top: 8px;
padding-bottom: 4px;
border: 0px;
border-radius: 5px;
}
ActionsView::item:hover {
color: {color:font-hover};
background: #424A57;
}
ActionsView::icon {}
ActionMenuPopup #GroupLabel {
padding: 5px;
color: #ffffff;
}
ActionMenuPopup #ShadowFrame {
border-radius: 5px;
background: rgba(12, 13, 24, 0.5);
}
ActionMenuPopup #Wrapper {
border-radius: 5px;
background: #353B46;
}
ActionMenuPopup ActionsView {
background: transparent;
border: none;
margin: 4px;
}
#IconView[mode="icon"] {
/* font size can't be set on items */
font-size: 9pt;
@ -862,6 +905,70 @@ HintedLineEditButton {
border-radius: 0.1em;
}
/* Launcher specific stylesheets */
FiltersBar {
background: {color:bg-inputs};
border: 1px solid {color:border};
border-radius: 5px;
}
FiltersBar #ScrollArea {
background: {color:bg-inputs};
}
FiltersBar #SearchButton {
background: transparent;
}
FiltersBar #BackButton {
background: transparent;
}
FiltersBar #BackButton:hover {
background: {color:bg-buttons-hover};
}
FiltersBar #ConfirmButton {
background: #91CDFB;
color: #03344D;
}
FiltersPopup #PopupWrapper, FilterValuePopup #PopupWrapper {
border-radius: 5px;
background: {color:bg-inputs};
}
FiltersPopup #ShadowFrame, FilterValuePopup #ShadowFrame {
border-radius: 5px;
background: rgba(0, 0, 0, 0.5);
}
FilterItemButton, FilterValueItemButton {
border-radius: 5px;
background: transparent;
}
FilterItemButton:hover, FilterValueItemButton:hover {
background: {color:bg-buttons-hover};
}
FilterValueItemButton[selected="1"] {
background: {color:bg-view-selection};
}
FilterValueItemButton[selected="1"]:hover {
background: {color:bg-view-selection-hover};
}
FilterValueItemsView #ContentWidget {
background: {color:bg-inputs};
}
SearchItemDisplayWidget {
border-radius: 5px;
}
SearchItemDisplayWidget:hover {
background: {color:bg-buttons};
}
SearchItemDisplayWidget #ValueWidget {
border-radius: 3px;
background: {color:bg-buttons};
}
/* Subset Manager */
#SubsetManagerDetailsText {}
#SubsetManagerDetailsText[state="invalid"] {

View file

@ -1,22 +1,58 @@
from qtpy import QtWidgets
from __future__ import annotations
from typing import Optional
from qtpy import QtWidgets, QtGui
from ayon_core.style import load_stylesheet
from ayon_core.resources import get_ayon_icon_filepath
from ayon_core.lib import AbstractAttrDef
from .widgets import AttributeDefinitionsWidget
class AttributeDefinitionsDialog(QtWidgets.QDialog):
def __init__(self, attr_defs, parent=None):
super(AttributeDefinitionsDialog, self).__init__(parent)
def __init__(
self,
attr_defs: list[AbstractAttrDef],
title: Optional[str] = None,
submit_label: Optional[str] = None,
cancel_label: Optional[str] = None,
submit_icon: Optional[QtGui.QIcon] = None,
cancel_icon: Optional[QtGui.QIcon] = None,
parent: Optional[QtWidgets.QWidget] = None,
):
super().__init__(parent)
if title:
self.setWindowTitle(title)
icon = QtGui.QIcon(get_ayon_icon_filepath())
self.setWindowIcon(icon)
self.setStyleSheet(load_stylesheet())
attrs_widget = AttributeDefinitionsWidget(attr_defs, self)
if submit_label is None:
submit_label = "OK"
if cancel_label is None:
cancel_label = "Cancel"
btns_widget = QtWidgets.QWidget(self)
ok_btn = QtWidgets.QPushButton("OK", btns_widget)
cancel_btn = QtWidgets.QPushButton("Cancel", btns_widget)
cancel_btn = QtWidgets.QPushButton(cancel_label, btns_widget)
submit_btn = QtWidgets.QPushButton(submit_label, btns_widget)
if submit_icon is not None:
submit_btn.setIcon(submit_icon)
if cancel_icon is not None:
cancel_btn.setIcon(cancel_icon)
btns_layout = QtWidgets.QHBoxLayout(btns_widget)
btns_layout.setContentsMargins(0, 0, 0, 0)
btns_layout.addStretch(1)
btns_layout.addWidget(ok_btn, 0)
btns_layout.addWidget(submit_btn, 0)
btns_layout.addWidget(cancel_btn, 0)
main_layout = QtWidgets.QVBoxLayout(self)
@ -24,10 +60,33 @@ class AttributeDefinitionsDialog(QtWidgets.QDialog):
main_layout.addStretch(1)
main_layout.addWidget(btns_widget, 0)
ok_btn.clicked.connect(self.accept)
submit_btn.clicked.connect(self.accept)
cancel_btn.clicked.connect(self.reject)
self._attrs_widget = attrs_widget
self._submit_btn = submit_btn
self._cancel_btn = cancel_btn
def get_values(self):
return self._attrs_widget.current_value()
def set_values(self, values):
self._attrs_widget.set_value(values)
def set_submit_label(self, text: str):
self._submit_btn.setText(text)
def set_submit_icon(self, icon: QtGui.QIcon):
self._submit_btn.setIcon(icon)
def set_submit_visible(self, visible: bool):
self._submit_btn.setVisible(visible)
def set_cancel_label(self, text: str):
self._cancel_btn.setText(text)
def set_cancel_icon(self, icon: QtGui.QIcon):
self._cancel_btn.setIcon(icon)
def set_cancel_visible(self, visible: bool):
self._cancel_btn.setVisible(visible)

View file

@ -22,6 +22,7 @@ from ayon_core.tools.utils import (
FocusSpinBox,
FocusDoubleSpinBox,
MultiSelectionComboBox,
MarkdownLabel,
PlaceholderLineEdit,
PlaceholderPlainTextEdit,
set_style_property,
@ -247,12 +248,10 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
def set_value(self, value):
new_value = copy.deepcopy(value)
unused_keys = set(new_value.keys())
for widget in self._widgets_by_id.values():
attr_def = widget.attr_def
if attr_def.key not in new_value:
continue
unused_keys.remove(attr_def.key)
widget_value = new_value[attr_def.key]
if widget_value is None:
@ -350,7 +349,7 @@ class SeparatorAttrWidget(_BaseAttrDefWidget):
class LabelAttrWidget(_BaseAttrDefWidget):
def _ui_init(self):
input_widget = QtWidgets.QLabel(self)
input_widget = MarkdownLabel(self)
label = self.attr_def.label
if label:
input_widget.setText(str(label))

View file

@ -2,6 +2,7 @@
from .cache import CacheItem, NestedCacheItem
from .projects import (
TagItem,
StatusItem,
StatusStates,
ProjectItem,
@ -25,6 +26,7 @@ __all__ = (
"CacheItem",
"NestedCacheItem",
"TagItem",
"StatusItem",
"StatusStates",
"ProjectItem",

View file

@ -100,12 +100,14 @@ class TaskItem:
label: Union[str, None],
task_type: str,
parent_id: str,
tags: list[str],
):
self.task_id = task_id
self.name = name
self.label = label
self.task_type = task_type
self.parent_id = parent_id
self.tags = tags
self._full_label = None
@ -145,6 +147,7 @@ class TaskItem:
"label": self.label,
"parent_id": self.parent_id,
"task_type": self.task_type,
"tags": self.tags,
}
@classmethod
@ -176,7 +179,8 @@ def _get_task_items_from_tasks(tasks):
task["name"],
task["label"],
task["type"],
folder_id
folder_id,
task["tags"],
))
return output
@ -217,6 +221,8 @@ class HierarchyModel(object):
lifetime = 60 # A minute
def __init__(self, controller):
self._tags_by_entity_type = NestedCacheItem(
levels=1, default_factory=dict, lifetime=self.lifetime)
self._folders_items = NestedCacheItem(
levels=1, default_factory=dict, lifetime=self.lifetime)
self._folders_by_id = NestedCacheItem(
@ -235,6 +241,7 @@ class HierarchyModel(object):
self._controller = controller
def reset(self):
self._tags_by_entity_type.reset()
self._folders_items.reset()
self._folders_by_id.reset()
@ -514,6 +521,31 @@ class HierarchyModel(object):
return output
def get_available_tags_by_entity_type(
self, project_name: str
) -> dict[str, list[str]]:
"""Get available tags for all entity types in a project."""
cache = self._tags_by_entity_type.get(project_name)
if not cache.is_valid:
tags = None
if project_name:
response = ayon_api.get(f"projects/{project_name}/tags")
if response.status_code == 200:
tags = response.data
# Fake empty tags
if tags is None:
tags = {
"folders": [],
"tasks": [],
"products": [],
"versions": [],
"representations": [],
"workfiles": []
}
cache.update_data(tags)
return cache.get_data()
@contextlib.contextmanager
def _folder_refresh_event_manager(self, project_name, sender):
self._folders_refreshing.add(project_name)
@ -617,6 +649,6 @@ class HierarchyModel(object):
tasks = list(ayon_api.get_tasks(
project_name,
folder_ids=[folder_id],
fields={"id", "name", "label", "folderId", "type"}
fields={"id", "name", "label", "folderId", "type", "tags"}
))
return _get_task_items_from_tasks(tasks)

View file

@ -1,6 +1,9 @@
from __future__ import annotations
import contextlib
from abc import ABC, abstractmethod
from typing import Dict, Any
from dataclasses import dataclass
import ayon_api
@ -72,6 +75,13 @@ class StatusItem:
)
@dataclass
class TagItem:
"""Tag definition set on project anatomy."""
name: str
color: str
class FolderTypeItem:
"""Item representing folder type of project.
@ -140,6 +150,7 @@ class TaskTypeItem:
)
@dataclass
class ProjectItem:
"""Item representing folder entity on a server.
@ -150,21 +161,14 @@ class ProjectItem:
active (Union[str, None]): Parent folder id. If 'None' then project
is parent.
"""
def __init__(self, name, active, is_library, icon=None):
self.name = name
self.active = active
self.is_library = is_library
if icon is None:
icon = {
"type": "awesome-font",
"name": "fa.book" if is_library else "fa.map",
"color": get_default_entity_icon_color(),
}
self.icon = icon
name: str
active: bool
is_library: bool
icon: dict[str, Any]
is_pinned: bool = False
@classmethod
def from_entity(cls, project_entity):
def from_entity(cls, project_entity: dict[str, Any]) -> "ProjectItem":
"""Creates folder item from entity.
Args:
@ -174,10 +178,16 @@ class ProjectItem:
ProjectItem: Project item.
"""
icon = {
"type": "awesome-font",
"name": "fa.book" if project_entity["library"] else "fa.map",
"color": get_default_entity_icon_color(),
}
return cls(
project_entity["name"],
project_entity["active"],
project_entity["library"],
icon
)
def to_data(self):
@ -208,16 +218,18 @@ class ProjectItem:
return cls(**data)
def _get_project_items_from_entitiy(projects):
def _get_project_items_from_entitiy(
projects: list[dict[str, Any]]
) -> list[ProjectItem]:
"""
Args:
projects (list[dict[str, Any]]): List of projects.
Returns:
ProjectItem: Project item.
"""
list[ProjectItem]: Project item.
"""
return [
ProjectItem.from_entity(project)
for project in projects
@ -288,6 +300,22 @@ class ProjectsModel(object):
project_cache.update_data(entity)
return project_cache.get_data()
def get_project_anatomy_tags(self, project_name: str) -> list[TagItem]:
"""Get project anatomy tags.
Args:
project_name (str): Project name.
Returns:
list[TagItem]: Tag definitions.
"""
project_entity = self.get_project_entity(project_name)
return [
TagItem(tag["name"], tag["color"])
for tag in project_entity["tags"]
]
def get_project_status_items(self, project_name, sender):
"""Get project status items.
@ -428,9 +456,20 @@ class ProjectsModel(object):
self._projects_cache.update_data(project_items)
return self._projects_cache.get_data()
def _query_projects(self):
def _query_projects(self) -> list[ProjectItem]:
projects = ayon_api.get_projects(fields=["name", "active", "library"])
return _get_project_items_from_entitiy(projects)
user = ayon_api.get_user()
pinned_projects = (
user
.get("data", {})
.get("frontendPreferences", {})
.get("pinnedProjects")
) or []
pinned_projects = set(pinned_projects)
project_items = _get_project_items_from_entitiy(list(projects))
for project in project_items:
project.is_pinned = project.name in pinned_projects
return project_items
def _status_items_getter(self, project_entity):
if not project_entity:

View file

@ -248,4 +248,3 @@ class EnhancedTabBar(QtWidgets.QTabBar):
else:
super().mouseReleaseEvent(event)

View file

@ -492,7 +492,7 @@ def show(parent=None):
try:
module.window.close()
del(module.window)
del module.window
except (AttributeError, RuntimeError):
pass

View file

@ -32,7 +32,7 @@ from qtpy import QtWidgets, QtCore, QtGui
import pyblish.api
from ayon_core import style
TAB = 4* "&nbsp;"
TAB = 4 * "&nbsp;"
HEADER_SIZE = "15px"
KEY_COLOR = QtGui.QColor("#ffffff")
@ -243,7 +243,7 @@ class DebugUI(QtWidgets.QDialog):
self._set_window_title(plugin=result["plugin"])
print(10*"<", result["plugin"].__name__, 10*">")
print(10 * "<", result["plugin"].__name__, 10 * ">")
plugin_order = result["plugin"].order
plugin_name = result["plugin"].__name__

View file

@ -1,4 +1,59 @@
from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Optional, Any
from ayon_core.tools.common_models import (
ProjectItem,
FolderItem,
FolderTypeItem,
TaskItem,
TaskTypeItem,
)
@dataclass
class WebactionContext:
"""Context used for methods related to webactions."""
identifier: str
project_name: str
folder_id: str
task_id: str
addon_name: str
addon_version: str
@dataclass
class ActionItem:
"""Item representing single action to trigger.
Attributes:
action_type (Literal["webaction", "local"]): Type of action.
identifier (str): Unique identifier of action item.
order (int): Action ordering.
label (str): Action label.
variant_label (Union[str, None]): Variant label, full label is
concatenated with space. Actions are grouped under single
action if it has same 'label' and have set 'variant_label'.
full_label (str): Full label, if not set it is generated
from 'label' and 'variant_label'.
icon (dict[str, str]): Icon definition.
addon_name (Optional[str]): Addon name.
addon_version (Optional[str]): Addon version.
config_fields (list[dict]): Config fields for webaction.
"""
action_type: str
identifier: str
order: int
label: str
variant_label: Optional[str]
full_label: str
icon: Optional[dict[str, str]]
config_fields: list[dict]
addon_name: Optional[str] = None
addon_version: Optional[str] = None
class AbstractLauncherCommon(ABC):
@ -88,7 +143,9 @@ class AbstractLauncherBackend(AbstractLauncherCommon):
class AbstractLauncherFrontEnd(AbstractLauncherCommon):
# Entity items for UI
@abstractmethod
def get_project_items(self, sender=None):
def get_project_items(
self, sender: Optional[str] = None
) -> list[ProjectItem]:
"""Project items for all projects.
This function may trigger events 'projects.refresh.started' and
@ -106,7 +163,9 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def get_folder_type_items(self, project_name, sender=None):
def get_folder_type_items(
self, project_name: str, sender: Optional[str] = None
) -> list[FolderTypeItem]:
"""Folder type items for a project.
This function may trigger events with topics
@ -126,7 +185,9 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def get_task_type_items(self, project_name, sender=None):
def get_task_type_items(
self, project_name: str, sender: Optional[str] = None
) -> list[TaskTypeItem]:
"""Task type items for a project.
This function may trigger events with topics
@ -146,7 +207,9 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def get_folder_items(self, project_name, sender=None):
def get_folder_items(
self, project_name: str, sender: Optional[str] = None
) -> list[FolderItem]:
"""Folder items to visualize project hierarchy.
This function may trigger events 'folders.refresh.started' and
@ -165,7 +228,9 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def get_task_items(self, project_name, folder_id, sender=None):
def get_task_items(
self, project_name: str, folder_id: str, sender: Optional[str] = None
) -> list[TaskItem]:
"""Task items.
This function may trigger events 'tasks.refresh.started' and
@ -185,7 +250,7 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def get_selected_project_name(self):
def get_selected_project_name(self) -> Optional[str]:
"""Selected project name.
Returns:
@ -195,7 +260,7 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def get_selected_folder_id(self):
def get_selected_folder_id(self) -> Optional[str]:
"""Selected folder id.
Returns:
@ -205,7 +270,7 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def get_selected_task_id(self):
def get_selected_task_id(self) -> Optional[str]:
"""Selected task id.
Returns:
@ -215,7 +280,7 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def get_selected_task_name(self):
def get_selected_task_name(self) -> Optional[str]:
"""Selected task name.
Returns:
@ -225,7 +290,7 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def get_selected_context(self):
def get_selected_context(self) -> dict[str, Optional[str]]:
"""Get whole selected context.
Example:
@ -243,7 +308,7 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def set_selected_project(self, project_name):
def set_selected_project(self, project_name: Optional[str]):
"""Change selected folder.
Args:
@ -254,7 +319,7 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def set_selected_folder(self, folder_id):
def set_selected_folder(self, folder_id: Optional[str]):
"""Change selected folder.
Args:
@ -265,7 +330,9 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def set_selected_task(self, task_id, task_name):
def set_selected_task(
self, task_id: Optional[str], task_name: Optional[str]
):
"""Change selected task.
Args:
@ -279,7 +346,12 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
# Actions
@abstractmethod
def get_action_items(self, project_name, folder_id, task_id):
def get_action_items(
self,
project_name: Optional[str],
folder_id: Optional[str],
task_id: Optional[str],
) -> list[ActionItem]:
"""Get action items for given context.
Args:
@ -295,30 +367,67 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def trigger_action(self, project_name, folder_id, task_id, action_id):
def trigger_action(
self,
action_id: str,
project_name: Optional[str],
folder_id: Optional[str],
task_id: Optional[str],
):
"""Trigger action on given context.
Args:
action_id (str): Action identifier.
project_name (Union[str, None]): Project name.
folder_id (Union[str, None]): Folder id.
task_id (Union[str, None]): Task id.
action_id (str): Action identifier.
"""
pass
@abstractmethod
def set_application_force_not_open_workfile(
self, project_name, folder_id, task_id, action_ids, enabled
def trigger_webaction(
self,
context: WebactionContext,
action_label: str,
form_data: Optional[dict[str, Any]] = None,
):
"""This is application action related to force not open last workfile.
"""Trigger action on the given context.
Args:
project_name (Union[str, None]): Project name.
folder_id (Union[str, None]): Folder id.
task_id (Union[str, None]): Task id.
action_ids (Iterable[str]): Action identifiers.
enabled (bool): New value of force not open workfile.
context (WebactionContext): Webaction context.
action_label (str): Action label.
form_data (Optional[dict[str, Any]]): Form values of action.
"""
pass
@abstractmethod
def get_action_config_values(
self, context: WebactionContext
) -> dict[str, Any]:
"""Get action config values.
Args:
context (WebactionContext): Webaction context.
Returns:
dict[str, Any]: Action config values.
"""
pass
@abstractmethod
def set_action_config_values(
self,
context: WebactionContext,
values: dict[str, Any],
):
"""Set action config values.
Args:
context (WebactionContext): Webaction context.
values (dict[str, Any]): Action config values.
"""
pass
@ -343,14 +452,16 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def get_my_tasks_entity_ids(self, project_name: str):
def get_my_tasks_entity_ids(
self, project_name: str
) -> dict[str, list[str]]:
"""Get entity ids for my tasks.
Args:
project_name (str): Project name.
Returns:
dict[str, Union[list[str]]]: Folder and task ids.
dict[str, list[str]]: Folder and task ids.
"""
pass

View file

@ -1,6 +1,6 @@
from ayon_core.lib import Logger, get_ayon_username
from ayon_core.lib.events import QueuedEventSystem
from ayon_core.settings import get_project_settings
from ayon_core.settings import get_project_settings, get_studio_settings
from ayon_core.tools.common_models import ProjectsModel, HierarchyModel
from .abstract import AbstractLauncherFrontEnd, AbstractLauncherBackend
@ -32,7 +32,7 @@ class BaseLauncherController(
@property
def event_system(self):
"""Inner event system for workfiles tool controller.
"""Inner event system for launcher tool controller.
Is used for communication with UI. Event system is created on demand.
@ -85,7 +85,10 @@ class BaseLauncherController(
def get_project_settings(self, project_name):
if project_name in self._project_settings:
return self._project_settings[project_name]
settings = get_project_settings(project_name)
if project_name:
settings = get_project_settings(project_name)
else:
settings = get_studio_settings()
self._project_settings[project_name] = settings
return settings
@ -135,16 +138,30 @@ class BaseLauncherController(
return self._actions_model.get_action_items(
project_name, folder_id, task_id)
def set_application_force_not_open_workfile(
self, project_name, folder_id, task_id, action_ids, enabled
def trigger_action(
self,
identifier,
project_name,
folder_id,
task_id,
):
self._actions_model.set_application_force_not_open_workfile(
project_name, folder_id, task_id, action_ids, enabled
self._actions_model.trigger_action(
identifier,
project_name,
folder_id,
task_id,
)
def trigger_action(self, project_name, folder_id, task_id, identifier):
self._actions_model.trigger_action(
project_name, folder_id, task_id, identifier)
def trigger_webaction(self, context, action_label, form_data=None):
self._actions_model.trigger_webaction(
context, action_label, form_data
)
def get_action_config_values(self, context):
return self._actions_model.get_action_config_values(context)
def set_action_config_values(self, context, values):
return self._actions_model.set_action_config_values(context, values)
# General methods
def refresh(self):

View file

@ -1,219 +1,47 @@
import os
import uuid
from dataclasses import dataclass, asdict
from urllib.parse import urlencode, urlparse
from typing import Any, Optional
import webbrowser
import ayon_api
from ayon_core import resources
from ayon_core.lib import Logger, AYONSettingsRegistry
from ayon_core.lib import (
Logger,
NestedCacheItem,
CacheItem,
get_settings_variant,
run_detached_ayon_launcher_process,
)
from ayon_core.addon import AddonsManager
from ayon_core.pipeline.actions import (
discover_launcher_actions,
LauncherAction,
LauncherActionSelection,
register_launcher_action_path,
)
from ayon_core.pipeline.workfile import should_use_last_workfile_on_launch
try:
# Available since applications addon 0.2.4
from ayon_applications.action import ApplicationAction
except ImportError:
# Backwards compatibility from 0.3.3 (24/06/10)
# TODO: Remove in future releases
class ApplicationAction(LauncherAction):
"""Action to launch an application.
Application action based on 'ApplicationManager' system.
Handling of applications in launcher is not ideal and should be
completely redone from scratch. This is just a temporary solution
to keep backwards compatibility with AYON launcher.
Todos:
Move handling of errors to frontend.
"""
# Application object
application = None
# Action attributes
name = None
label = None
label_variant = None
group = None
icon = None
color = None
order = 0
data = {}
project_settings = {}
project_entities = {}
_log = None
@property
def log(self):
if self._log is None:
self._log = Logger.get_logger(self.__class__.__name__)
return self._log
def is_compatible(self, selection):
if not selection.is_task_selected:
return False
project_entity = self.project_entities[selection.project_name]
apps = project_entity["attrib"].get("applications")
if not apps or self.application.full_name not in apps:
return False
project_settings = self.project_settings[selection.project_name]
only_available = project_settings["applications"]["only_available"]
if only_available and not self.application.find_executable():
return False
return True
def _show_message_box(self, title, message, details=None):
from qtpy import QtWidgets, QtGui
from ayon_core import style
dialog = QtWidgets.QMessageBox()
icon = QtGui.QIcon(resources.get_ayon_icon_filepath())
dialog.setWindowIcon(icon)
dialog.setStyleSheet(style.load_stylesheet())
dialog.setWindowTitle(title)
dialog.setText(message)
if details:
dialog.setDetailedText(details)
dialog.exec_()
def process(self, selection, **kwargs):
"""Process the full Application action"""
from ayon_applications import (
ApplicationExecutableNotFound,
ApplicationLaunchFailed,
)
try:
self.application.launch(
project_name=selection.project_name,
folder_path=selection.folder_path,
task_name=selection.task_name,
**self.data
)
except ApplicationExecutableNotFound as exc:
details = exc.details
msg = exc.msg
log_msg = str(msg)
if details:
log_msg += "\n" + details
self.log.warning(log_msg)
self._show_message_box(
"Application executable not found", msg, details
)
except ApplicationLaunchFailed as exc:
msg = str(exc)
self.log.warning(msg, exc_info=True)
self._show_message_box("Application launch failed", msg)
from ayon_core.tools.launcher.abstract import ActionItem, WebactionContext
# class Action:
# def __init__(self, label, icon=None, identifier=None):
# self._label = label
# self._icon = icon
# self._callbacks = []
# self._identifier = identifier or uuid.uuid4().hex
# self._checked = True
# self._checkable = False
#
# def set_checked(self, checked):
# self._checked = checked
#
# def set_checkable(self, checkable):
# self._checkable = checkable
#
# def set_label(self, label):
# self._label = label
#
# def add_callback(self, callback):
# self._callbacks = callback
#
#
# class Menu:
# def __init__(self, label, icon=None):
# self.label = label
# self.icon = icon
# self._actions = []
#
# def add_action(self, action):
# self._actions.append(action)
@dataclass
class WebactionForm:
fields: list[dict[str, Any]]
title: str
submit_label: str
submit_icon: str
cancel_label: str
cancel_icon: str
class ActionItem:
"""Item representing single action to trigger.
Todos:
Get rid of application specific logic.
Args:
identifier (str): Unique identifier of action item.
label (str): Action label.
variant_label (Union[str, None]): Variant label, full label is
concatenated with space. Actions are grouped under single
action if it has same 'label' and have set 'variant_label'.
icon (dict[str, str]): Icon definition.
order (int): Action ordering.
is_application (bool): Is action application action.
force_not_open_workfile (bool): Force not open workfile. Application
related.
full_label (Optional[str]): Full label, if not set it is generated
from 'label' and 'variant_label'.
"""
def __init__(
self,
identifier,
label,
variant_label,
icon,
order,
is_application,
force_not_open_workfile,
full_label=None
):
self.identifier = identifier
self.label = label
self.variant_label = variant_label
self.icon = icon
self.order = order
self.is_application = is_application
self.force_not_open_workfile = force_not_open_workfile
self._full_label = full_label
def copy(self):
return self.from_data(self.to_data())
@property
def full_label(self):
if self._full_label is None:
if self.variant_label:
self._full_label = " ".join([self.label, self.variant_label])
else:
self._full_label = self.label
return self._full_label
def to_data(self):
return {
"identifier": self.identifier,
"label": self.label,
"variant_label": self.variant_label,
"icon": self.icon,
"order": self.order,
"is_application": self.is_application,
"force_not_open_workfile": self.force_not_open_workfile,
"full_label": self._full_label,
}
@classmethod
def from_data(cls, data):
return cls(**data)
@dataclass
class WebactionResponse:
response_type: str
success: bool
message: Optional[str] = None
clipboard_text: Optional[str] = None
form: Optional[WebactionForm] = None
error_message: Optional[str] = None
def get_action_icon(action):
@ -264,8 +92,6 @@ class ActionsModel:
controller (AbstractLauncherBackend): Controller instance.
"""
_not_open_workfile_reg_key = "force_not_open_workfile"
def __init__(self, controller):
self._controller = controller
@ -274,11 +100,21 @@ class ActionsModel:
self._discovered_actions = None
self._actions = None
self._action_items = {}
self._launcher_tool_reg = AYONSettingsRegistry("launcher_tool")
self._webaction_items = NestedCacheItem(
levels=2, default_factory=list, lifetime=20,
)
self._addons_manager = None
self._variant = get_settings_variant()
@staticmethod
def calculate_full_label(label: str, variant_label: Optional[str]) -> str:
"""Calculate full label from label and variant_label."""
if variant_label:
return " ".join([label, variant_label])
return label
@property
def log(self):
if self._log is None:
@ -289,39 +125,12 @@ class ActionsModel:
self._discovered_actions = None
self._actions = None
self._action_items = {}
self._webaction_items.reset()
self._controller.emit_event("actions.refresh.started")
self._get_action_objects()
self._controller.emit_event("actions.refresh.finished")
def _should_start_last_workfile(
self,
project_name,
task_id,
identifier,
host_name,
not_open_workfile_actions
):
if identifier in not_open_workfile_actions:
return not not_open_workfile_actions[identifier]
task_name = None
task_type = None
if task_id is not None:
task_entity = self._controller.get_task_entity(
project_name, task_id
)
task_name = task_entity["name"]
task_type = task_entity["taskType"]
output = should_use_last_workfile_on_launch(
project_name,
host_name,
task_name,
task_type
)
return output
def get_action_items(self, project_name, folder_id, task_id):
"""Get actions for project.
@ -332,53 +141,31 @@ class ActionsModel:
Returns:
list[ActionItem]: List of actions.
"""
not_open_workfile_actions = self._get_no_last_workfile_for_context(
project_name, folder_id, task_id)
selection = self._prepare_selection(project_name, folder_id, task_id)
output = []
action_items = self._get_action_items(project_name)
for identifier, action in self._get_action_objects().items():
if not action.is_compatible(selection):
continue
if action.is_compatible(selection):
output.append(action_items[identifier])
output.extend(self._get_webactions(selection))
action_item = action_items[identifier]
# Handling of 'force_not_open_workfile' for applications
if action_item.is_application:
action_item = action_item.copy()
start_last_workfile = self._should_start_last_workfile(
project_name,
task_id,
identifier,
action.application.host_name,
not_open_workfile_actions
)
action_item.force_not_open_workfile = (
not start_last_workfile
)
output.append(action_item)
return output
def set_application_force_not_open_workfile(
self, project_name, folder_id, task_id, action_ids, enabled
def trigger_action(
self,
identifier,
project_name,
folder_id,
task_id,
):
no_workfile_reg_data = self._get_no_last_workfile_reg_data()
project_data = no_workfile_reg_data.setdefault(project_name, {})
folder_data = project_data.setdefault(folder_id, {})
task_data = folder_data.setdefault(task_id, {})
for action_id in action_ids:
task_data[action_id] = enabled
self._launcher_tool_reg.set_item(
self._not_open_workfile_reg_key, no_workfile_reg_data
)
def trigger_action(self, project_name, folder_id, task_id, identifier):
selection = self._prepare_selection(project_name, folder_id, task_id)
failed = False
error_message = None
action_label = identifier
action_items = self._get_action_items(project_name)
trigger_id = uuid.uuid4().hex
try:
action = self._actions[identifier]
action_item = action_items[identifier]
@ -386,22 +173,11 @@ class ActionsModel:
self._controller.emit_event(
"action.trigger.started",
{
"trigger_id": trigger_id,
"identifier": identifier,
"full_label": action_label,
}
)
if isinstance(action, ApplicationAction):
per_action = self._get_no_last_workfile_for_context(
project_name, folder_id, task_id
)
start_last_workfile = self._should_start_last_workfile(
project_name,
task_id,
identifier,
action.application.host_name,
per_action
)
action.data["start_last_workfile"] = start_last_workfile
action.process(selection)
except Exception as exc:
@ -412,6 +188,7 @@ class ActionsModel:
self._controller.emit_event(
"action.trigger.finished",
{
"trigger_id": trigger_id,
"identifier": identifier,
"failed": failed,
"error_message": error_message,
@ -419,32 +196,148 @@ class ActionsModel:
}
)
def trigger_webaction(self, context, action_label, form_data):
entity_type = None
entity_ids = []
identifier = context.identifier
folder_id = context.folder_id
task_id = context.task_id
project_name = context.project_name
addon_name = context.addon_name
addon_version = context.addon_version
if task_id:
entity_type = "task"
entity_ids.append(task_id)
elif folder_id:
entity_type = "folder"
entity_ids.append(folder_id)
query = {
"addonName": addon_name,
"addonVersion": addon_version,
"identifier": identifier,
"variant": self._variant,
}
url = f"actions/execute?{urlencode(query)}"
request_data = {
"projectName": project_name,
"entityType": entity_type,
"entityIds": entity_ids,
}
if form_data is not None:
request_data["formData"] = form_data
trigger_id = uuid.uuid4().hex
failed = False
try:
self._controller.emit_event(
"webaction.trigger.started",
{
"trigger_id": trigger_id,
"identifier": identifier,
"full_label": action_label,
}
)
conn = ayon_api.get_server_api_connection()
# Add 'referer' header to the request
# - ayon-api 1.1.1 adds the value to the header automatically
headers = conn.get_headers()
if "referer" in headers:
headers = None
else:
headers["referer"] = conn.get_base_url()
response = ayon_api.raw_post(
url, headers=headers, json=request_data
)
response.raise_for_status()
handle_response = self._handle_webaction_response(response.data)
except Exception:
failed = True
self.log.warning("Action trigger failed.", exc_info=True)
handle_response = WebactionResponse(
"unknown",
False,
error_message="Failed to trigger webaction.",
)
data = asdict(handle_response)
data.update({
"trigger_failed": failed,
"trigger_id": trigger_id,
"identifier": identifier,
"full_label": action_label,
"project_name": project_name,
"folder_id": folder_id,
"task_id": task_id,
"addon_name": addon_name,
"addon_version": addon_version,
})
self._controller.emit_event(
"webaction.trigger.finished",
data,
)
def get_action_config_values(self, context: WebactionContext):
selection = self._prepare_selection(
context.project_name, context.folder_id, context.task_id
)
if not selection.is_project_selected:
return {}
request_data = self._get_webaction_request_data(selection)
query = {
"addonName": context.addon_name,
"addonVersion": context.addon_version,
"identifier": context.identifier,
"variant": self._variant,
}
url = f"actions/config?{urlencode(query)}"
try:
response = ayon_api.post(url, **request_data)
response.raise_for_status()
except Exception:
self.log.warning(
"Failed to collect webaction config values.",
exc_info=True
)
return {}
return response.data
def set_action_config_values(self, context, values):
selection = self._prepare_selection(
context.project_name, context.folder_id, context.task_id
)
if not selection.is_project_selected:
return {}
request_data = self._get_webaction_request_data(selection)
request_data["value"] = values
query = {
"addonName": context.addon_name,
"addonVersion": context.addon_version,
"identifier": context.identifier,
"variant": self._variant,
}
url = f"actions/config?{urlencode(query)}"
try:
response = ayon_api.post(url, **request_data)
response.raise_for_status()
except Exception:
self.log.warning(
"Failed to store webaction config values.",
exc_info=True
)
def _get_addons_manager(self):
if self._addons_manager is None:
self._addons_manager = AddonsManager()
return self._addons_manager
def _get_no_last_workfile_reg_data(self):
try:
no_workfile_reg_data = self._launcher_tool_reg.get_item(
self._not_open_workfile_reg_key)
except ValueError:
no_workfile_reg_data = {}
self._launcher_tool_reg.set_item(
self._not_open_workfile_reg_key, no_workfile_reg_data)
return no_workfile_reg_data
def _get_no_last_workfile_for_context(
self, project_name, folder_id, task_id
):
not_open_workfile_reg_data = self._get_no_last_workfile_reg_data()
return (
not_open_workfile_reg_data
.get(project_name, {})
.get(folder_id, {})
.get(task_id, {})
)
def _prepare_selection(self, project_name, folder_id, task_id):
project_entity = None
if project_name:
@ -458,6 +351,179 @@ class ActionsModel:
project_settings=project_settings,
)
def _get_webaction_request_data(self, selection: LauncherActionSelection):
if not selection.is_project_selected:
return None
entity_type = None
entity_id = None
entity_subtypes = []
if selection.is_task_selected:
entity_type = "task"
entity_id = selection.task_entity["id"]
entity_subtypes = [selection.task_entity["taskType"]]
elif selection.is_folder_selected:
entity_type = "folder"
entity_id = selection.folder_entity["id"]
entity_subtypes = [selection.folder_entity["folderType"]]
entity_ids = []
if entity_id:
entity_ids.append(entity_id)
project_name = selection.project_name
return {
"projectName": project_name,
"entityType": entity_type,
"entitySubtypes": entity_subtypes,
"entityIds": entity_ids,
}
def _get_webactions(self, selection: LauncherActionSelection):
if not selection.is_project_selected:
return []
request_data = self._get_webaction_request_data(selection)
project_name = selection.project_name
entity_id = None
if request_data["entityIds"]:
entity_id = request_data["entityIds"][0]
cache: CacheItem = self._webaction_items[project_name][entity_id]
if cache.is_valid:
return cache.get_data()
try:
response = ayon_api.post("actions/list", **request_data)
response.raise_for_status()
except Exception:
self.log.warning("Failed to collect webactions.", exc_info=True)
return []
action_items = []
for action in response.data["actions"]:
# NOTE Settings variant may be important for triggering?
# - action["variant"]
icon = action.get("icon")
if icon and icon["type"] == "url":
if not urlparse(icon["url"]).scheme:
icon["type"] = "ayon_url"
config_fields = action.get("configFields") or []
variant_label = action["label"]
group_label = action.get("groupLabel")
if not group_label:
group_label = variant_label
variant_label = None
full_label = self.calculate_full_label(
group_label, variant_label
)
action_items.append(ActionItem(
action_type="webaction",
identifier=action["identifier"],
order=action["order"],
label=group_label,
variant_label=variant_label,
full_label=full_label,
icon=icon,
addon_name=action["addonName"],
addon_version=action["addonVersion"],
config_fields=config_fields,
# category=action["category"],
))
cache.update_data(action_items)
return cache.get_data()
def _handle_webaction_response(self, data) -> WebactionResponse:
response_type = data["type"]
# Backwards compatibility -> 'server' type is not available since
# AYON backend 1.8.3
if response_type == "server":
return WebactionResponse(
response_type,
False,
error_message="Please use AYON web UI to run the action.",
)
payload = data.get("payload") or {}
download_uri = payload.get("extra_download")
if download_uri is not None:
# Find out if is relative or absolute URL
if not urlparse(download_uri).scheme:
ayon_url = ayon_api.get_base_url().rstrip("/")
path = download_uri.lstrip("/")
download_uri = f"{ayon_url}/{path}"
# Use webbrowser to open file
webbrowser.open_new_tab(download_uri)
response = WebactionResponse(
response_type,
data["success"],
data.get("message"),
payload.get("extra_clipboard"),
)
if response_type == "simple":
pass
elif response_type == "redirect":
# NOTE unused 'newTab' key because we always have to
# open new tab from desktop app.
if not webbrowser.open_new_tab(payload["uri"]):
payload.error_message = "Failed to open web browser."
elif response_type == "form":
submit_icon = payload["submit_icon"] or None
cancel_icon = payload["cancel_icon"] or None
if submit_icon:
submit_icon = {
"type": "material-symbols",
"name": submit_icon,
}
if cancel_icon:
cancel_icon = {
"type": "material-symbols",
"name": cancel_icon,
}
response.form = WebactionForm(
fields=payload["fields"],
title=payload["title"],
submit_label=payload["submit_label"],
cancel_label=payload["cancel_label"],
submit_icon=submit_icon,
cancel_icon=cancel_icon,
)
elif response_type == "launcher":
# Run AYON launcher process with uri in arguments
# NOTE This does pass environment variables of current process
# to the subprocess.
# NOTE We could 'take action' directly and use the arguments here
if payload is not None:
uri = payload["uri"]
else:
uri = data["uri"]
run_detached_ayon_launcher_process(uri)
elif response_type in ("query", "navigate"):
response.error_message = (
"Please use AYON web UI to run the action."
)
else:
self.log.warning(
f"Unknown webaction response type '{response_type}'"
)
response.error_message = "Unknown webaction response type."
return response
def _get_discovered_action_classes(self):
if self._discovered_actions is None:
# NOTE We don't need to register the paths, but that would
@ -470,7 +536,6 @@ class ActionsModel:
register_launcher_action_path(path)
self._discovered_actions = (
discover_launcher_actions()
+ self._get_applications_action_classes()
)
return self._discovered_actions
@ -498,62 +563,29 @@ class ActionsModel:
action_items = {}
for identifier, action in self._get_action_objects().items():
is_application = isinstance(action, ApplicationAction)
# Backwards compatibility from 0.3.3 (24/06/10)
# TODO: Remove in future releases
if is_application and hasattr(action, "project_settings"):
if hasattr(action, "project_settings"):
action.project_entities[project_name] = project_entity
action.project_settings[project_name] = project_settings
label = action.label or identifier
variant_label = getattr(action, "label_variant", None)
full_label = self.calculate_full_label(
label, variant_label
)
icon = get_action_icon(action)
item = ActionItem(
identifier,
label,
variant_label,
icon,
action.order,
is_application,
False
action_type="local",
identifier=identifier,
order=action.order,
label=label,
variant_label=variant_label,
full_label=full_label,
icon=icon,
config_fields=[],
)
action_items[identifier] = item
self._action_items[project_name] = action_items
return action_items
def _get_applications_action_classes(self):
addons_manager = self._get_addons_manager()
applications_addon = addons_manager.get_enabled_addon("applications")
if hasattr(applications_addon, "get_applications_action_classes"):
return applications_addon.get_applications_action_classes()
# Backwards compatibility from 0.3.3 (24/06/10)
# TODO: Remove in future releases
actions = []
if applications_addon is None:
return actions
manager = applications_addon.get_applications_manager()
for full_name, application in manager.applications.items():
if not application.enabled:
continue
action = type(
"app_{}".format(full_name),
(ApplicationAction,),
{
"identifier": "application.{}".format(full_name),
"application": application,
"name": application.name,
"label": application.group.label,
"label_variant": application.label,
"group": None,
"icon": application.icon,
"color": getattr(application, "color", None),
"order": getattr(application, "order", None) or 0,
"data": {}
}
)
actions.append(action)
return actions

File diff suppressed because it is too large Load diff

View file

@ -1,154 +0,0 @@
from qtpy import QtWidgets, QtCore
from ayon_core.tools.flickcharm import FlickCharm
from ayon_core.tools.utils import (
PlaceholderLineEdit,
RefreshButton,
ProjectsQtModel,
ProjectSortFilterProxy,
)
from ayon_core.tools.common_models import PROJECTS_MODEL_SENDER
class ProjectIconView(QtWidgets.QListView):
"""Styled ListView that allows to toggle between icon and list mode.
Toggling between the two modes is done by Right Mouse Click.
"""
IconMode = 0
ListMode = 1
def __init__(self, parent=None, mode=ListMode):
super(ProjectIconView, self).__init__(parent=parent)
# Workaround for scrolling being super slow or fast when
# toggling between the two visual modes
self.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.setObjectName("IconView")
self._mode = None
self.set_mode(mode)
def set_mode(self, mode):
if mode == self._mode:
return
self._mode = mode
if mode == self.IconMode:
self.setViewMode(QtWidgets.QListView.IconMode)
self.setResizeMode(QtWidgets.QListView.Adjust)
self.setWrapping(True)
self.setWordWrap(True)
self.setGridSize(QtCore.QSize(151, 90))
self.setIconSize(QtCore.QSize(50, 50))
self.setSpacing(0)
self.setAlternatingRowColors(False)
self.setProperty("mode", "icon")
self.style().polish(self)
self.verticalScrollBar().setSingleStep(30)
elif self.ListMode:
self.setProperty("mode", "list")
self.style().polish(self)
self.setViewMode(QtWidgets.QListView.ListMode)
self.setResizeMode(QtWidgets.QListView.Adjust)
self.setWrapping(False)
self.setWordWrap(False)
self.setIconSize(QtCore.QSize(20, 20))
self.setGridSize(QtCore.QSize(100, 25))
self.setSpacing(0)
self.setAlternatingRowColors(False)
self.verticalScrollBar().setSingleStep(34)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.RightButton:
self.set_mode(int(not self._mode))
return super(ProjectIconView, self).mousePressEvent(event)
class ProjectsWidget(QtWidgets.QWidget):
"""Projects Page"""
refreshed = QtCore.Signal()
def __init__(self, controller, parent=None):
super(ProjectsWidget, self).__init__(parent=parent)
header_widget = QtWidgets.QWidget(self)
projects_filter_text = PlaceholderLineEdit(header_widget)
projects_filter_text.setPlaceholderText("Filter projects...")
refresh_btn = RefreshButton(header_widget)
header_layout = QtWidgets.QHBoxLayout(header_widget)
header_layout.setContentsMargins(0, 0, 0, 0)
header_layout.addWidget(projects_filter_text, 1)
header_layout.addWidget(refresh_btn, 0)
projects_view = ProjectIconView(parent=self)
projects_view.setSelectionMode(QtWidgets.QListView.NoSelection)
flick = FlickCharm(parent=self)
flick.activateOn(projects_view)
projects_model = ProjectsQtModel(controller)
projects_proxy_model = ProjectSortFilterProxy()
projects_proxy_model.setSourceModel(projects_model)
projects_view.setModel(projects_proxy_model)
main_layout = QtWidgets.QVBoxLayout(self)
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.addWidget(header_widget, 0)
main_layout.addWidget(projects_view, 1)
projects_view.clicked.connect(self._on_view_clicked)
projects_model.refreshed.connect(self.refreshed)
projects_filter_text.textChanged.connect(
self._on_project_filter_change)
refresh_btn.clicked.connect(self._on_refresh_clicked)
controller.register_event_callback(
"projects.refresh.finished",
self._on_projects_refresh_finished
)
self._controller = controller
self._projects_view = projects_view
self._projects_model = projects_model
self._projects_proxy_model = projects_proxy_model
def has_content(self):
"""Model has at least one project.
Returns:
bool: True if there is any content in the model.
"""
return self._projects_model.has_content()
def _on_view_clicked(self, index):
if not index.isValid():
return
model = index.model()
flags = model.flags(index)
if not flags & QtCore.Qt.ItemIsEnabled:
return
project_name = index.data(QtCore.Qt.DisplayRole)
self._controller.set_selected_project(project_name)
def _on_project_filter_change(self, text):
self._projects_proxy_model.setFilterFixedString(text)
def _on_refresh_clicked(self):
self._controller.refresh()
def _on_projects_refresh_finished(self, event):
if event["sender"] != PROJECTS_MODEL_SENDER:
self._projects_model.refresh()

View file

@ -1,7 +0,0 @@
import os
RESOURCES_DIR = os.path.dirname(os.path.abspath(__file__))
def get_options_image_path():
return os.path.join(RESOURCES_DIR, "options.png")

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 KiB

View file

@ -1,11 +1,15 @@
from qtpy import QtWidgets, QtCore, QtGui
from ayon_core import style
from ayon_core import resources
from ayon_core import style, resources
from ayon_core.tools.launcher.control import BaseLauncherController
from ayon_core.tools.utils import (
MessageOverlayObject,
PlaceholderLineEdit,
RefreshButton,
ProjectsWidget,
)
from .projects_widget import ProjectsWidget
from .hierarchy_page import HierarchyPage
from .actions_widget import ActionsWidget
@ -41,6 +45,8 @@ class LauncherWindow(QtWidgets.QWidget):
self._controller = controller
overlay_object = MessageOverlayObject(self)
# Main content - Pages & Actions
content_body = QtWidgets.QSplitter(self)
@ -48,7 +54,25 @@ class LauncherWindow(QtWidgets.QWidget):
pages_widget = QtWidgets.QWidget(content_body)
# - First page - Projects
projects_page = ProjectsWidget(controller, pages_widget)
projects_page = QtWidgets.QWidget(pages_widget)
projects_header_widget = QtWidgets.QWidget(projects_page)
projects_filter_text = PlaceholderLineEdit(projects_header_widget)
projects_filter_text.setPlaceholderText("Filter projects...")
refresh_btn = RefreshButton(projects_header_widget)
projects_header_layout = QtWidgets.QHBoxLayout(projects_header_widget)
projects_header_layout.setContentsMargins(0, 0, 0, 0)
projects_header_layout.addWidget(projects_filter_text, 1)
projects_header_layout.addWidget(refresh_btn, 0)
projects_widget = ProjectsWidget(controller, pages_widget)
projects_layout = QtWidgets.QVBoxLayout(projects_page)
projects_layout.setContentsMargins(0, 0, 0, 0)
projects_layout.addWidget(projects_header_widget, 0)
projects_layout.addWidget(projects_widget, 1)
# - Second page - Hierarchy (folders & tasks)
hierarchy_page = HierarchyPage(controller, pages_widget)
@ -78,26 +102,18 @@ class LauncherWindow(QtWidgets.QWidget):
content_body.setSizes([580, 160])
# Footer
footer_widget = QtWidgets.QWidget(self)
# - Message label
message_label = QtWidgets.QLabel(footer_widget)
# footer_widget = QtWidgets.QWidget(self)
#
# action_history = ActionHistory(footer_widget)
# action_history.setStatusTip("Show Action History")
footer_layout = QtWidgets.QHBoxLayout(footer_widget)
footer_layout.setContentsMargins(0, 0, 0, 0)
footer_layout.addWidget(message_label, 1)
#
# footer_layout = QtWidgets.QHBoxLayout(footer_widget)
# footer_layout.setContentsMargins(0, 0, 0, 0)
# footer_layout.addWidget(action_history, 0)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(content_body, 1)
layout.addWidget(footer_widget, 0)
message_timer = QtCore.QTimer()
message_timer.setInterval(self.message_interval)
message_timer.setSingleShot(True)
# layout.addWidget(footer_widget, 0)
actions_refresh_timer = QtCore.QTimer()
actions_refresh_timer.setInterval(self.refresh_interval)
@ -108,13 +124,16 @@ class LauncherWindow(QtWidgets.QWidget):
page_slide_anim.setEndValue(1.0)
page_slide_anim.setEasingCurve(QtCore.QEasingCurve.OutQuad)
projects_page.refreshed.connect(self._on_projects_refresh)
message_timer.timeout.connect(self._on_message_timeout)
refresh_btn.clicked.connect(self._on_refresh_request)
projects_widget.refreshed.connect(self._on_projects_refresh)
actions_refresh_timer.timeout.connect(
self._on_actions_refresh_timeout)
page_slide_anim.valueChanged.connect(
self._on_page_slide_value_changed)
page_slide_anim.finished.connect(self._on_page_slide_finished)
projects_filter_text.textChanged.connect(
self._on_project_filter_change)
controller.register_event_callback(
"selection.project.changed",
@ -128,6 +147,16 @@ class LauncherWindow(QtWidgets.QWidget):
"action.trigger.finished",
self._on_action_trigger_finished,
)
controller.register_event_callback(
"webaction.trigger.started",
self._on_webaction_trigger_started,
)
controller.register_event_callback(
"webaction.trigger.finished",
self._on_webaction_trigger_finished,
)
self._overlay_object = overlay_object
self._controller = controller
@ -139,13 +168,11 @@ class LauncherWindow(QtWidgets.QWidget):
self._pages_widget = pages_widget
self._pages_layout = pages_layout
self._projects_page = projects_page
self._projects_widget = projects_widget
self._hierarchy_page = hierarchy_page
self._actions_widget = actions_widget
self._message_label = message_label
# self._action_history = action_history
self._message_timer = message_timer
self._actions_refresh_timer = actions_refresh_timer
self._page_slide_anim = page_slide_anim
@ -185,13 +212,6 @@ class LauncherWindow(QtWidgets.QWidget):
else:
self._refresh_on_activate = True
def _echo(self, message):
self._message_label.setText(str(message))
self._message_timer.start()
def _on_message_timeout(self):
self._message_label.setText("")
def _on_project_selection_change(self, event):
project_name = event["project_name"]
self._selected_project_name = project_name
@ -201,6 +221,12 @@ class LauncherWindow(QtWidgets.QWidget):
elif self._is_on_projects_page:
self._go_to_hierarchy_page(project_name)
def _on_project_filter_change(self, text):
self._projects_widget.set_name_filter(text)
def _on_refresh_request(self):
self._controller.refresh()
def _on_projects_refresh(self):
# Refresh only actions on projects page
if self._is_on_projects_page:
@ -208,20 +234,76 @@ class LauncherWindow(QtWidgets.QWidget):
return
# No projects were found -> go back to projects page
if not self._projects_page.has_content():
if not self._projects_widget.has_content():
self._go_to_projects_page()
return
self._hierarchy_page.refresh()
self._actions_widget.refresh()
def _show_toast_message(self, message, success=True, message_id=None):
message_type = None
if not success:
message_type = "error"
self._overlay_object.add_message(
message, message_type, message_id=message_id
)
def _on_action_trigger_started(self, event):
self._echo("Running action: {}".format(event["full_label"]))
self._show_toast_message(
"Running: {}".format(event["full_label"]),
message_id=event["trigger_id"],
)
def _on_action_trigger_finished(self, event):
if not event["failed"]:
action_label = event["full_label"]
if event["failed"]:
message = f"Failed to run: {action_label}"
else:
message = f"Finished: {action_label}"
self._show_toast_message(
message,
not event["failed"],
message_id=event["trigger_id"],
)
def _on_webaction_trigger_started(self, event):
self._show_toast_message(
"Running: {}".format(event["full_label"]),
message_id=event["trigger_id"],
)
def _on_webaction_trigger_finished(self, event):
clipboard_text = event["clipboard_text"]
if clipboard_text:
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setText(clipboard_text)
action_label = event["full_label"]
# Avoid to show exception message
if event["trigger_failed"]:
self._show_toast_message(
f"Failed to run: {action_label}",
message_id=event["trigger_id"]
)
return
self._echo("Failed: {}".format(event["error_message"]))
# Failed to run webaction, e.g. because of missing webaction handling
# - not reported by server
if event["error_message"]:
self._show_toast_message(
event["error_message"],
success=False,
message_id=event["trigger_id"]
)
return
if event["message"]:
self._show_toast_message(event["message"], event["success"])
if event["form"]:
self._actions_widget.handle_webaction_form_event(event)
def _is_page_slide_anim_running(self):
return (
@ -231,6 +313,9 @@ class LauncherWindow(QtWidgets.QWidget):
def _go_to_projects_page(self):
if self._is_on_projects_page:
return
# Deselect project in projects widget
self._projects_widget.set_selected_project(None)
self._is_on_projects_page = True
self._hierarchy_page.set_page_visible(False)

View file

@ -1,11 +1,15 @@
"""Abstract base classes for loader tool."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import List
from typing import Iterable, Any, Optional
from ayon_core.lib.attribute_definitions import (
AbstractAttrDef,
serialize_attr_defs,
deserialize_attr_defs,
serialize_attr_defs,
)
from ayon_core.tools.common_models import TaskItem, TagItem
class ProductTypeItem:
@ -16,7 +20,7 @@ class ProductTypeItem:
icon (dict[str, Any]): Product type icon definition.
"""
def __init__(self, name, icon):
def __init__(self, name: str, icon: dict[str, Any]):
self.name = name
self.icon = icon
@ -31,6 +35,41 @@ class ProductTypeItem:
return cls(**data)
class ProductBaseTypeItem:
"""Item representing the product base type."""
def __init__(self, name: str, icon: dict[str, Any]):
"""Initialize product base type item."""
self.name = name
self.icon = icon
def to_data(self) -> dict[str, Any]:
"""Convert item to data dictionary.
Returns:
dict[str, Any]: Data representation of the item.
"""
return {
"name": self.name,
"icon": self.icon,
}
@classmethod
def from_data(
cls, data: dict[str, Any]) -> ProductBaseTypeItem:
"""Create item from data dictionary.
Args:
data (dict[str, Any]): Data to create item from.
Returns:
ProductBaseTypeItem: Item created from the provided data.
"""
return cls(**data)
class ProductItem:
"""Product item with it versions.
@ -49,35 +88,41 @@ class ProductItem:
def __init__(
self,
product_id,
product_type,
product_name,
product_icon,
product_type_icon,
product_in_scene,
group_name,
folder_id,
folder_label,
version_items,
product_id: str,
product_type: str,
product_base_type: str,
product_name: str,
product_icon: dict[str, Any],
product_type_icon: dict[str, Any],
product_base_type_icon: dict[str, Any],
group_name: str,
folder_id: str,
folder_label: str,
version_items: dict[str, VersionItem],
product_in_scene: bool,
):
self.product_id = product_id
self.product_type = product_type
self.product_base_type = product_base_type
self.product_name = product_name
self.product_icon = product_icon
self.product_type_icon = product_type_icon
self.product_base_type_icon = product_base_type_icon
self.product_in_scene = product_in_scene
self.group_name = group_name
self.folder_id = folder_id
self.folder_label = folder_label
self.version_items = version_items
def to_data(self):
def to_data(self) -> dict[str, Any]:
return {
"product_id": self.product_id,
"product_type": self.product_type,
"product_base_type": self.product_base_type,
"product_name": self.product_name,
"product_icon": self.product_icon,
"product_type_icon": self.product_type_icon,
"product_base_type_icon": self.product_base_type_icon,
"product_in_scene": self.product_in_scene,
"group_name": self.group_name,
"folder_id": self.folder_id,
@ -113,6 +158,7 @@ class VersionItem:
published_time (Union[str, None]): Published time in format
'%Y%m%dT%H%M%SZ'.
status (Union[str, None]): Status name.
tags (Union[list[str], None]): Tags.
author (Union[str, None]): Author.
frame_range (Union[str, None]): Frame range.
duration (Union[int, None]): Duration.
@ -124,21 +170,22 @@ class VersionItem:
def __init__(
self,
version_id,
version,
is_hero,
product_id,
task_id,
thumbnail_id,
published_time,
author,
status,
frame_range,
duration,
handles,
step,
comment,
source,
version_id: str,
version: int,
is_hero: bool,
product_id: str,
task_id: Optional[str],
thumbnail_id: Optional[str],
published_time: Optional[str],
tags: Optional[list[str]],
author: Optional[str],
status: Optional[str],
frame_range: Optional[str],
duration: Optional[int],
handles: Optional[str],
step: Optional[int],
comment: Optional[str],
source: Optional[str],
):
self.version_id = version_id
self.product_id = product_id
@ -148,6 +195,7 @@ class VersionItem:
self.is_hero = is_hero
self.published_time = published_time
self.author = author
self.tags = tags
self.status = status
self.frame_range = frame_range
self.duration = duration
@ -198,7 +246,7 @@ class VersionItem:
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def to_data(self):
def to_data(self) -> dict[str, Any]:
return {
"version_id": self.version_id,
"product_id": self.product_id,
@ -208,6 +256,7 @@ class VersionItem:
"is_hero": self.is_hero,
"published_time": self.published_time,
"author": self.author,
"tags": self.tags,
"status": self.status,
"frame_range": self.frame_range,
"duration": self.duration,
@ -218,7 +267,7 @@ class VersionItem:
}
@classmethod
def from_data(cls, data):
def from_data(cls, data: dict[str, Any]) -> VersionItem:
return cls(**data)
@ -354,8 +403,8 @@ class ProductTypesFilter:
Defines the filtering for product types.
"""
def __init__(self, product_types: List[str], is_allow_list: bool):
self.product_types: List[str] = product_types
def __init__(self, product_types: list[str], is_allow_list: bool):
self.product_types: list[str] = product_types
self.is_allow_list: bool = is_allow_list
@ -517,8 +566,21 @@ class FrontendLoaderController(_BaseLoaderController):
Returns:
list[ProjectItem]: List of project items.
"""
"""
pass
@abstractmethod
def get_project_anatomy_tags(self, project_name: str) -> list[TagItem]:
"""Tag items defined on project anatomy.
Args:
project_name (str): Project name.
Returns:
list[TagItem]: Tag definition items.
"""
pass
@abstractmethod
@ -542,7 +604,12 @@ class FrontendLoaderController(_BaseLoaderController):
pass
@abstractmethod
def get_task_items(self, project_name, folder_ids, sender=None):
def get_task_items(
self,
project_name: str,
folder_ids: Iterable[str],
sender: Optional[str] = None,
) -> list[TaskItem]:
"""Task items for folder ids.
Args:
@ -590,6 +657,21 @@ class FrontendLoaderController(_BaseLoaderController):
"""
pass
@abstractmethod
def get_available_tags_by_entity_type(
self, project_name: str
) -> dict[str, list[str]]:
"""Get available tags by entity type.
Args:
project_name (str): Project name.
Returns:
dict[str, list[str]]: Available tags by entity type.
"""
pass
@abstractmethod
def get_project_status_items(self, project_name, sender=None):
"""Items for all projects available on server.

View file

@ -13,6 +13,7 @@ from ayon_core.tools.common_models import (
ProjectsModel,
HierarchyModel,
ThumbnailsModel,
TagItem,
)
from .abstract import (
@ -223,6 +224,16 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
output[folder_id] = label
return output
def get_available_tags_by_entity_type(
self, project_name: str
) -> dict[str, list[str]]:
return self._hierarchy_model.get_available_tags_by_entity_type(
project_name
)
def get_project_anatomy_tags(self, project_name: str) -> list[TagItem]:
return self._projects_model.get_project_anatomy_tags(project_name)
def get_product_items(self, project_name, folder_ids, sender=None):
return self._products_model.get_product_items(
project_name, folder_ids, sender)

View file

@ -322,7 +322,6 @@ class LoaderActionsModel:
available_loaders = self._filter_loaders_by_tool_name(
project_name, discover_loader_plugins(project_name)
)
repre_loaders = []
product_loaders = []
loaders_by_identifier = {}
@ -340,6 +339,7 @@ class LoaderActionsModel:
loaders_by_identifier_c.update_data(loaders_by_identifier)
product_loaders_c.update_data(product_loaders)
repre_loaders_c.update_data(repre_loaders)
return product_loaders, repre_loaders
def _get_loader_by_identifier(self, project_name, identifier):
@ -719,7 +719,12 @@ class LoaderActionsModel:
loader, repre_contexts, options
)
def _load_representations_by_loader(self, loader, repre_contexts, options):
def _load_representations_by_loader(
self,
loader,
repre_contexts,
options
):
"""Loops through list of repre_contexts and loads them with one loader
Args:
@ -770,7 +775,12 @@ class LoaderActionsModel:
))
return error_info
def _load_products_by_loader(self, loader, version_contexts, options):
def _load_products_by_loader(
self,
loader,
version_contexts,
options
):
"""Triggers load with ProductLoader type of loaders.
Warning:
@ -796,7 +806,6 @@ class LoaderActionsModel:
version_contexts,
options=options
)
except Exception as exc:
formatted_traceback = None
if not isinstance(exc, LoadError):

View file

@ -1,24 +1,34 @@
"""Products model for loader tools."""
from __future__ import annotations
import collections
import contextlib
from typing import TYPE_CHECKING, Iterable, Optional
import arrow
import ayon_api
from ayon_api.operations import OperationsSession
from ayon_core.lib import NestedCacheItem
from ayon_core.style import get_default_entity_icon_color
from ayon_core.tools.loader.abstract import (
ProductTypeItem,
ProductBaseTypeItem,
ProductItem,
VersionItem,
RepreItem,
)
if TYPE_CHECKING:
from ayon_api.typing import ProductBaseTypeDict, ProductDict, VersionDict
PRODUCTS_MODEL_SENDER = "products.model"
def version_item_from_entity(version):
version_attribs = version["attrib"]
tags = version["tags"]
frame_start = version_attribs.get("frameStart")
frame_end = version_attribs.get("frameEnd")
handle_start = version_attribs.get("handleStart")
@ -59,6 +69,7 @@ def version_item_from_entity(version):
thumbnail_id=version["thumbnailId"],
published_time=published_time,
author=author,
tags=tags,
status=version["status"],
frame_range=frame_range,
duration=duration,
@ -70,9 +81,10 @@ def version_item_from_entity(version):
def product_item_from_entity(
product_entity,
product_entity: ProductDict,
version_entities,
product_type_items_by_name,
product_type_items_by_name: dict[str, ProductTypeItem],
product_base_type_items_by_name: dict[str, ProductBaseTypeItem],
folder_label,
product_in_scene,
):
@ -88,8 +100,20 @@ def product_item_from_entity(
# Cache the item for future use
product_type_items_by_name[product_type] = product_type_item
product_type_icon = product_type_item.icon
product_base_type = product_entity.get("productBaseType")
product_base_type_item = product_base_type_items_by_name.get(
product_base_type)
# Same as for product type item above. Not sure if this is still needed
# though.
if product_base_type_item is None:
product_base_type_item = create_default_product_base_type_item(
product_base_type)
# Cache the item for future use
product_base_type_items_by_name[product_base_type] = (
product_base_type_item)
product_type_icon = product_type_item.icon
product_base_type_icon = product_base_type_item.icon
product_icon = {
"type": "awesome-font",
"name": "fa.file-o",
@ -103,9 +127,11 @@ def product_item_from_entity(
return ProductItem(
product_id=product_entity["id"],
product_type=product_type,
product_base_type=product_base_type,
product_name=product_entity["name"],
product_icon=product_icon,
product_type_icon=product_type_icon,
product_base_type_icon=product_base_type_icon,
product_in_scene=product_in_scene,
group_name=group,
folder_id=product_entity["folderId"],
@ -114,7 +140,8 @@ def product_item_from_entity(
)
def product_type_item_from_data(product_type_data):
def product_type_item_from_data(
product_type_data: ProductDict) -> ProductTypeItem:
# TODO implement icon implementation
# icon = product_type_data["icon"]
# color = product_type_data["color"]
@ -127,7 +154,29 @@ def product_type_item_from_data(product_type_data):
return ProductTypeItem(product_type_data["name"], icon)
def create_default_product_type_item(product_type):
def product_base_type_item_from_data(
product_base_type_data: ProductBaseTypeDict
) -> ProductBaseTypeItem:
"""Create product base type item from data.
Args:
product_base_type_data (ProductBaseTypeDict): Product base type data.
Returns:
ProductBaseTypeDict: Product base type item.
"""
icon = {
"type": "awesome-font",
"name": "fa.folder",
"color": "#0091B2",
}
return ProductBaseTypeItem(
name=product_base_type_data["name"],
icon=icon)
def create_default_product_type_item(product_type: str) -> ProductTypeItem:
icon = {
"type": "awesome-font",
"name": "fa.folder",
@ -136,10 +185,28 @@ def create_default_product_type_item(product_type):
return ProductTypeItem(product_type, icon)
def create_default_product_base_type_item(
product_base_type: str) -> ProductBaseTypeItem:
"""Create default product base type item.
Args:
product_base_type (str): Product base type name.
Returns:
ProductBaseTypeItem: Default product base type item.
"""
icon = {
"type": "awesome-font",
"name": "fa.folder",
"color": "#0091B2",
}
return ProductBaseTypeItem(product_base_type, icon)
class ProductsModel:
"""Model for products, version and representation.
All of the entities are product based. This model prepares data for UI
All the entities are product based. This model prepares data for UI
and caches it for faster access.
Note:
@ -161,6 +228,8 @@ class ProductsModel:
# Cache helpers
self._product_type_items_cache = NestedCacheItem(
levels=1, default_factory=list, lifetime=self.lifetime)
self._product_base_type_items_cache = NestedCacheItem(
levels=1, default_factory=list, lifetime=self.lifetime)
self._product_items_cache = NestedCacheItem(
levels=2, default_factory=dict, lifetime=self.lifetime)
self._repre_items_cache = NestedCacheItem(
@ -199,6 +268,36 @@ class ProductsModel:
])
return cache.get_data()
def get_product_base_type_items(
self,
project_name: Optional[str]) -> list[ProductBaseTypeItem]:
"""Product base type items for the project.
Args:
project_name (optional, str): Project name.
Returns:
list[ProductBaseTypeDict]: Product base type items.
"""
if not project_name:
return []
cache = self._product_base_type_items_cache[project_name]
if not cache.is_valid:
product_base_types = []
# TODO add temp implementation here when it is actually
# implemented and available on server.
if hasattr(ayon_api, "get_project_product_base_types"):
product_base_types = ayon_api.get_project_product_base_types(
project_name
)
cache.update_data([
product_base_type_item_from_data(product_base_type)
for product_base_type in product_base_types
])
return cache.get_data()
def get_product_items(self, project_name, folder_ids, sender):
"""Product items with versions for project and folder ids.
@ -449,11 +548,12 @@ class ProductsModel:
def _create_product_items(
self,
project_name,
products,
versions,
project_name: str,
products: Iterable[ProductDict],
versions: Iterable[VersionDict],
folder_items=None,
product_type_items=None,
product_base_type_items: Optional[Iterable[ProductBaseTypeItem]] = None
):
if folder_items is None:
folder_items = self._controller.get_folder_items(project_name)
@ -461,6 +561,11 @@ class ProductsModel:
if product_type_items is None:
product_type_items = self.get_product_type_items(project_name)
if product_base_type_items is None:
product_base_type_items = self.get_product_base_type_items(
project_name
)
loaded_product_ids = self._controller.get_loaded_product_ids()
versions_by_product_id = collections.defaultdict(list)
@ -470,7 +575,13 @@ class ProductsModel:
product_type_item.name: product_type_item
for product_type_item in product_type_items
}
output = {}
product_base_type_items_by_name: dict[str, ProductBaseTypeItem] = {
product_base_type_item.name: product_base_type_item
for product_base_type_item in product_base_type_items
}
output: dict[str, ProductItem] = {}
for product in products:
product_id = product["id"]
folder_id = product["folderId"]
@ -484,6 +595,7 @@ class ProductsModel:
product,
versions,
product_type_items_by_name,
product_base_type_items_by_name,
folder_item.label,
product_id in loaded_product_ids,
)

View file

@ -84,15 +84,17 @@ def _get_options(action, action_item, parent):
if not getattr(action, "optioned", False) or not options:
return {}
dialog_title = action.label + " Options"
if isinstance(options[0], AbstractAttrDef):
qargparse_options = False
dialog = AttributeDefinitionsDialog(options, parent)
dialog = AttributeDefinitionsDialog(
options, title=dialog_title, parent=parent
)
else:
qargparse_options = True
dialog = OptionDialog(parent)
dialog.create(options)
dialog.setWindowTitle(action.label + " Options")
dialog.setWindowTitle(dialog_title)
if not dialog.exec_():
return None

View file

@ -1,170 +0,0 @@
from __future__ import annotations
from qtpy import QtGui, QtCore
from ._multicombobox import (
CustomPaintMultiselectComboBox,
BaseQtModel,
)
STATUS_ITEM_TYPE = 0
SELECT_ALL_TYPE = 1
DESELECT_ALL_TYPE = 2
SWAP_STATE_TYPE = 3
PRODUCT_TYPE_ROLE = QtCore.Qt.UserRole + 1
ITEM_TYPE_ROLE = QtCore.Qt.UserRole + 2
ITEM_SUBTYPE_ROLE = QtCore.Qt.UserRole + 3
class ProductTypesQtModel(BaseQtModel):
refreshed = QtCore.Signal()
def __init__(self, controller):
self._reset_filters_on_refresh = True
self._refreshing = False
self._bulk_change = False
self._items_by_name = {}
super().__init__(
item_type_role=ITEM_TYPE_ROLE,
item_subtype_role=ITEM_SUBTYPE_ROLE,
empty_values_label="No product types...",
controller=controller,
)
def is_refreshing(self):
return self._refreshing
def refresh(self, project_name):
self._refreshing = True
super().refresh(project_name)
self._reset_filters_on_refresh = False
self._refreshing = False
self.refreshed.emit()
def reset_product_types_filter_on_refresh(self):
self._reset_filters_on_refresh = True
def _get_standard_items(self) -> list[QtGui.QStandardItem]:
return list(self._items_by_name.values())
def _clear_standard_items(self):
self._items_by_name.clear()
def _prepare_new_value_items(self, project_name: str, _: bool) -> tuple[
list[QtGui.QStandardItem], list[QtGui.QStandardItem]
]:
product_type_items = self._controller.get_product_type_items(
project_name)
self._last_project = project_name
names_to_remove = set(self._items_by_name.keys())
items = []
items_filter_required = {}
for product_type_item in product_type_items:
name = product_type_item.name
names_to_remove.discard(name)
item = self._items_by_name.get(name)
# Apply filter to new items or if filters reset is requested
filter_required = self._reset_filters_on_refresh
if item is None:
filter_required = True
item = QtGui.QStandardItem(name)
item.setData(name, PRODUCT_TYPE_ROLE)
item.setEditable(False)
item.setCheckable(True)
self._items_by_name[name] = item
items.append(item)
if filter_required:
items_filter_required[name] = item
if items_filter_required:
product_types_filter = self._controller.get_product_types_filter()
for product_type, item in items_filter_required.items():
matching = (
int(product_type in product_types_filter.product_types)
+ int(product_types_filter.is_allow_list)
)
item.setCheckState(
QtCore.Qt.Checked
if matching % 2 == 0
else QtCore.Qt.Unchecked
)
items_to_remove = []
for name in names_to_remove:
items_to_remove.append(
self._items_by_name.pop(name)
)
# Uncheck all if all are checked (same result)
if all(
item.checkState() == QtCore.Qt.Checked
for item in items
):
for item in items:
item.setCheckState(QtCore.Qt.Unchecked)
return items, items_to_remove
class ProductTypesCombobox(CustomPaintMultiselectComboBox):
def __init__(self, controller, parent):
self._controller = controller
model = ProductTypesQtModel(controller)
super().__init__(
PRODUCT_TYPE_ROLE,
PRODUCT_TYPE_ROLE,
QtCore.Qt.ForegroundRole,
QtCore.Qt.DecorationRole,
item_type_role=ITEM_TYPE_ROLE,
model=model,
parent=parent
)
model.refreshed.connect(self._on_model_refresh)
self.set_placeholder_text("Product types filter...")
self._model = model
self._last_project_name = None
self._fully_disabled_filter = False
controller.register_event_callback(
"selection.project.changed",
self._on_project_change
)
controller.register_event_callback(
"projects.refresh.finished",
self._on_projects_refresh
)
self.setToolTip("Product types filter")
self.value_changed.connect(
self._on_product_type_filter_change
)
def reset_product_types_filter_on_refresh(self):
self._model.reset_product_types_filter_on_refresh()
def _on_model_refresh(self):
self.value_changed.emit()
def _on_product_type_filter_change(self):
lines = ["Product types filter"]
for item in self.get_value_info():
status_name, enabled = item
lines.append(f"{'' if enabled else ''} {status_name}")
self.setToolTip("\n".join(lines))
def _on_project_change(self, event):
project_name = event["project_name"]
self._last_project_name = project_name
self._model.refresh(project_name)
def _on_projects_refresh(self):
if self._last_project_name:
self._model.refresh(self._last_project_name)
self._on_product_type_filter_change()

View file

@ -1,3 +1,5 @@
from __future__ import annotations
import numbers
import uuid
from typing import Dict
@ -18,16 +20,19 @@ from .products_model import (
SYNC_REMOTE_SITE_AVAILABILITY,
)
STATUS_NAME_ROLE = QtCore.Qt.UserRole + 1
TASK_ID_ROLE = QtCore.Qt.UserRole + 2
COMBO_VERSION_ID_ROLE = QtCore.Qt.UserRole + 1
COMBO_TASK_ID_ROLE = QtCore.Qt.UserRole + 2
COMBO_STATUS_NAME_ROLE = QtCore.Qt.UserRole + 3
COMBO_VERSION_TAGS_ROLE = QtCore.Qt.UserRole + 4
COMBO_TASK_TAGS_ROLE = QtCore.Qt.UserRole + 5
class VersionsModel(QtGui.QStandardItemModel):
class ComboVersionsModel(QtGui.QStandardItemModel):
def __init__(self):
super().__init__()
self._items_by_id = {}
def update_versions(self, version_items):
def update_versions(self, version_items, task_tags_by_version_id):
version_ids = {
version_item.version_id
for version_item in version_items
@ -39,6 +44,7 @@ class VersionsModel(QtGui.QStandardItemModel):
item = self._items_by_id.pop(item_id)
root_item.removeRow(item.row())
version_tags_by_version_id = {}
for idx, version_item in enumerate(version_items):
version_id = version_item.version_id
@ -48,34 +54,74 @@ class VersionsModel(QtGui.QStandardItemModel):
item = QtGui.QStandardItem(label)
item.setData(version_id, QtCore.Qt.UserRole)
self._items_by_id[version_id] = item
item.setData(version_item.status, STATUS_NAME_ROLE)
item.setData(version_item.task_id, TASK_ID_ROLE)
version_tags = set(version_item.tags)
task_tags = task_tags_by_version_id[version_id]
item.setData(version_id, COMBO_VERSION_ID_ROLE)
item.setData(version_item.status, COMBO_STATUS_NAME_ROLE)
item.setData(version_item.task_id, COMBO_TASK_ID_ROLE)
item.setData("|".join(version_tags), COMBO_VERSION_TAGS_ROLE)
item.setData("|".join(task_tags), COMBO_TASK_TAGS_ROLE)
version_tags_by_version_id[version_id] = set(version_item.tags)
if item.row() != idx:
root_item.insertRow(idx, item)
class VersionsFilterModel(QtCore.QSortFilterProxyModel):
class ComboVersionsFilterModel(QtCore.QSortFilterProxyModel):
def __init__(self):
super().__init__()
self._status_filter = None
self._task_ids_filter = None
self._version_tags_filter = None
self._task_tags_filter = None
def filterAcceptsRow(self, row, parent):
index = None
if self._status_filter is not None:
if not self._status_filter:
return False
index = self.sourceModel().index(row, 0, parent)
status = index.data(STATUS_NAME_ROLE)
if index is None:
index = self.sourceModel().index(row, 0, parent)
status = index.data(COMBO_STATUS_NAME_ROLE)
if status not in self._status_filter:
return False
if self._task_ids_filter:
index = self.sourceModel().index(row, 0, parent)
task_id = index.data(TASK_ID_ROLE)
if index is None:
index = self.sourceModel().index(row, 0, parent)
task_id = index.data(COMBO_TASK_ID_ROLE)
if task_id not in self._task_ids_filter:
return False
if self._version_tags_filter is not None:
if not self._version_tags_filter:
return False
if index is None:
model = self.sourceModel()
index = model.index(row, 0, parent)
version_tags_s = index.data(COMBO_TASK_TAGS_ROLE)
version_tags = set()
if version_tags_s:
version_tags = set(version_tags_s.split("|"))
if not version_tags & self._version_tags_filter:
return False
if self._task_tags_filter is not None:
if not self._task_tags_filter:
return False
if index is None:
model = self.sourceModel()
index = model.index(row, 0, parent)
task_tags_s = index.data(COMBO_TASK_TAGS_ROLE)
task_tags = set()
if task_tags_s:
task_tags = set(task_tags_s.split("|"))
if not (task_tags & self._task_tags_filter):
return False
return True
def set_tasks_filter(self, task_ids):
@ -84,12 +130,24 @@ class VersionsFilterModel(QtCore.QSortFilterProxyModel):
self._task_ids_filter = task_ids
self.invalidateFilter()
def set_task_tags_filter(self, tags):
if self._task_tags_filter == tags:
return
self._task_tags_filter = tags
self.invalidateFilter()
def set_statuses_filter(self, status_names):
if self._status_filter == status_names:
return
self._status_filter = status_names
self.invalidateFilter()
def set_version_tags_filter(self, tags):
if self._version_tags_filter == tags:
return
self._version_tags_filter = tags
self.invalidateFilter()
class VersionComboBox(QtWidgets.QComboBox):
value_changed = QtCore.Signal(str, str)
@ -97,8 +155,8 @@ class VersionComboBox(QtWidgets.QComboBox):
def __init__(self, product_id, parent):
super().__init__(parent)
versions_model = VersionsModel()
proxy_model = VersionsFilterModel()
versions_model = ComboVersionsModel()
proxy_model = ComboVersionsFilterModel()
proxy_model.setSourceModel(versions_model)
self.setModel(proxy_model)
@ -123,6 +181,13 @@ class VersionComboBox(QtWidgets.QComboBox):
if self.currentIndex() != 0:
self.setCurrentIndex(0)
def set_task_tags_filter(self, tags):
self._proxy_model.set_task_tags_filter(tags)
if self.count() == 0:
return
if self.currentIndex() != 0:
self.setCurrentIndex(0)
def set_statuses_filter(self, status_names):
self._proxy_model.set_statuses_filter(status_names)
if self.count() == 0:
@ -130,12 +195,24 @@ class VersionComboBox(QtWidgets.QComboBox):
if self.currentIndex() != 0:
self.setCurrentIndex(0)
def set_version_tags_filter(self, tags):
self._proxy_model.set_version_tags_filter(tags)
if self.count() == 0:
return
if self.currentIndex() != 0:
self.setCurrentIndex(0)
def all_versions_filtered_out(self):
if self._items_by_id:
return self.count() == 0
return False
def update_versions(self, version_items, current_version_id):
def update_versions(
self,
version_items,
current_version_id,
task_tags_by_version_id,
):
self.blockSignals(True)
version_items = list(version_items)
version_ids = [
@ -146,7 +223,9 @@ class VersionComboBox(QtWidgets.QComboBox):
current_version_id = version_ids[0]
self._current_id = current_version_id
self._versions_model.update_versions(version_items)
self._versions_model.update_versions(
version_items, task_tags_by_version_id
)
index = version_ids.index(current_version_id)
if self.currentIndex() != index:
@ -173,6 +252,8 @@ class VersionDelegate(QtWidgets.QStyledItemDelegate):
self._editor_by_id: Dict[str, VersionComboBox] = {}
self._task_ids_filter = None
self._statuses_filter = None
self._version_tags_filter = None
self._task_tags_filter = None
def displayText(self, value, locale):
if not isinstance(value, numbers.Integral):
@ -185,10 +266,26 @@ class VersionDelegate(QtWidgets.QStyledItemDelegate):
widget.set_tasks_filter(task_ids)
def set_statuses_filter(self, status_names):
self._statuses_filter = set(status_names)
if status_names is not None:
status_names = set(status_names)
self._statuses_filter = status_names
for widget in self._editor_by_id.values():
widget.set_statuses_filter(status_names)
def set_version_tags_filter(self, tags):
if tags is not None:
tags = set(tags)
self._version_tags_filter = tags
for widget in self._editor_by_id.values():
widget.set_version_tags_filter(tags)
def set_task_tags_filter(self, tags):
if tags is not None:
tags = set(tags)
self._task_tags_filter = tags
for widget in self._editor_by_id.values():
widget.set_task_tags_filter(tags)
def paint(self, painter, option, index):
fg_color = index.data(QtCore.Qt.ForegroundRole)
if fg_color:
@ -200,7 +297,7 @@ class VersionDelegate(QtWidgets.QStyledItemDelegate):
fg_color = None
if not fg_color:
return super(VersionDelegate, self).paint(painter, option, index)
return super().paint(painter, option, index)
if option.widget:
style = option.widget.style()
@ -263,11 +360,22 @@ class VersionDelegate(QtWidgets.QStyledItemDelegate):
editor.clear()
# Current value of the index
versions = index.data(VERSION_NAME_EDIT_ROLE) or []
product_id = index.data(PRODUCT_ID_ROLE)
version_id = index.data(VERSION_ID_ROLE)
model = index.model()
while hasattr(model, "sourceModel"):
model = model.sourceModel()
versions = model.get_version_items_by_product_id(product_id)
task_tags_by_version_id = {
version_item.version_id: model.get_task_tags_by_id(
version_item.task_id
)
for version_item in versions
}
editor.update_versions(versions, version_id)
editor.update_versions(versions, version_id, task_tags_by_version_id)
editor.set_tasks_filter(self._task_ids_filter)
editor.set_task_tags_filter(self._task_tags_filter)
editor.set_statuses_filter(self._statuses_filter)
def setModelData(self, editor, model, index):

View file

@ -16,31 +16,34 @@ TASK_ID_ROLE = QtCore.Qt.UserRole + 5
PRODUCT_ID_ROLE = QtCore.Qt.UserRole + 6
PRODUCT_NAME_ROLE = QtCore.Qt.UserRole + 7
PRODUCT_TYPE_ROLE = QtCore.Qt.UserRole + 8
PRODUCT_TYPE_ICON_ROLE = QtCore.Qt.UserRole + 9
PRODUCT_IN_SCENE_ROLE = QtCore.Qt.UserRole + 10
VERSION_ID_ROLE = QtCore.Qt.UserRole + 11
VERSION_HERO_ROLE = QtCore.Qt.UserRole + 12
VERSION_NAME_ROLE = QtCore.Qt.UserRole + 13
VERSION_NAME_EDIT_ROLE = QtCore.Qt.UserRole + 14
VERSION_PUBLISH_TIME_ROLE = QtCore.Qt.UserRole + 15
VERSION_STATUS_NAME_ROLE = QtCore.Qt.UserRole + 16
VERSION_STATUS_SHORT_ROLE = QtCore.Qt.UserRole + 17
VERSION_STATUS_COLOR_ROLE = QtCore.Qt.UserRole + 18
VERSION_STATUS_ICON_ROLE = QtCore.Qt.UserRole + 19
VERSION_AUTHOR_ROLE = QtCore.Qt.UserRole + 20
VERSION_FRAME_RANGE_ROLE = QtCore.Qt.UserRole + 21
VERSION_DURATION_ROLE = QtCore.Qt.UserRole + 22
VERSION_HANDLES_ROLE = QtCore.Qt.UserRole + 23
VERSION_STEP_ROLE = QtCore.Qt.UserRole + 24
VERSION_AVAILABLE_ROLE = QtCore.Qt.UserRole + 25
VERSION_THUMBNAIL_ID_ROLE = QtCore.Qt.UserRole + 26
ACTIVE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 27
REMOTE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 28
REPRESENTATIONS_COUNT_ROLE = QtCore.Qt.UserRole + 29
SYNC_ACTIVE_SITE_AVAILABILITY = QtCore.Qt.UserRole + 30
SYNC_REMOTE_SITE_AVAILABILITY = QtCore.Qt.UserRole + 31
PRODUCT_BASE_TYPE_ROLE = QtCore.Qt.UserRole + 9
PRODUCT_TYPE_ICON_ROLE = QtCore.Qt.UserRole + 10
PRODUCT_IN_SCENE_ROLE = QtCore.Qt.UserRole + 11
VERSION_ID_ROLE = QtCore.Qt.UserRole + 12
VERSION_HERO_ROLE = QtCore.Qt.UserRole + 13
VERSION_NAME_ROLE = QtCore.Qt.UserRole + 14
VERSION_NAME_EDIT_ROLE = QtCore.Qt.UserRole + 15
VERSION_PUBLISH_TIME_ROLE = QtCore.Qt.UserRole + 16
VERSION_STATUS_NAME_ROLE = QtCore.Qt.UserRole + 17
VERSION_STATUS_SHORT_ROLE = QtCore.Qt.UserRole + 18
VERSION_STATUS_COLOR_ROLE = QtCore.Qt.UserRole + 19
VERSION_STATUS_ICON_ROLE = QtCore.Qt.UserRole + 20
VERSION_AUTHOR_ROLE = QtCore.Qt.UserRole + 21
VERSION_FRAME_RANGE_ROLE = QtCore.Qt.UserRole + 22
VERSION_DURATION_ROLE = QtCore.Qt.UserRole + 23
VERSION_HANDLES_ROLE = QtCore.Qt.UserRole + 24
VERSION_STEP_ROLE = QtCore.Qt.UserRole + 25
VERSION_AVAILABLE_ROLE = QtCore.Qt.UserRole + 26
VERSION_THUMBNAIL_ID_ROLE = QtCore.Qt.UserRole + 27
ACTIVE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 28
REMOTE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 29
REPRESENTATIONS_COUNT_ROLE = QtCore.Qt.UserRole + 30
SYNC_ACTIVE_SITE_AVAILABILITY = QtCore.Qt.UserRole + 31
SYNC_REMOTE_SITE_AVAILABILITY = QtCore.Qt.UserRole + 32
STATUS_NAME_FILTER_ROLE = QtCore.Qt.UserRole + 32
STATUS_NAME_FILTER_ROLE = QtCore.Qt.UserRole + 33
TASK_TAGS_FILTER_ROLE = QtCore.Qt.UserRole + 34
VERSION_TAGS_FILTER_ROLE = QtCore.Qt.UserRole + 35
class ProductsModel(QtGui.QStandardItemModel):
@ -49,6 +52,7 @@ class ProductsModel(QtGui.QStandardItemModel):
column_labels = [
"Product name",
"Product type",
"Product base type",
"Folder",
"Version",
"Status",
@ -79,6 +83,7 @@ class ProductsModel(QtGui.QStandardItemModel):
product_name_col = column_labels.index("Product name")
product_type_col = column_labels.index("Product type")
product_base_type_col = column_labels.index("Product base type")
folders_label_col = column_labels.index("Folder")
version_col = column_labels.index("Version")
status_col = column_labels.index("Status")
@ -93,6 +98,7 @@ class ProductsModel(QtGui.QStandardItemModel):
_display_role_mapping = {
product_name_col: QtCore.Qt.DisplayRole,
product_type_col: PRODUCT_TYPE_ROLE,
product_base_type_col: PRODUCT_BASE_TYPE_ROLE,
folders_label_col: FOLDER_LABEL_ROLE,
version_col: VERSION_NAME_ROLE,
status_col: VERSION_STATUS_NAME_ROLE,
@ -130,6 +136,7 @@ class ProductsModel(QtGui.QStandardItemModel):
self._last_folder_ids = []
self._last_project_statuses = {}
self._last_status_icons_by_name = {}
self._last_task_tags_by_task_id = {}
def get_product_item_indexes(self):
return [
@ -170,6 +177,17 @@ class ProductsModel(QtGui.QStandardItemModel):
self._last_folder_ids
)
def get_task_tags_by_id(self, task_id):
return self._last_task_tags_by_task_id.get(task_id, set())
def get_version_items_by_product_id(self, product_id: str):
product_item = self._product_items_by_id.get(product_id)
if product_item is None:
return None
version_items = list(product_item.version_items.values())
version_items.sort(reverse=True)
return version_items
def flags(self, index):
# Make the version column editable
if index.column() == self.version_col and index.data(PRODUCT_ID_ROLE):
@ -224,9 +242,9 @@ class ProductsModel(QtGui.QStandardItemModel):
product_item = self._product_items_by_id.get(product_id)
if product_item is None:
return None
product_items = list(product_item.version_items.values())
product_items.sort(reverse=True)
return product_items
version_items = list(product_item.version_items.values())
version_items.sort(reverse=True)
return version_items
if role == QtCore.Qt.EditRole:
return None
@ -422,6 +440,16 @@ class ProductsModel(QtGui.QStandardItemModel):
version_item.status
for version_item in product_item.version_items.values()
}
version_tags = set()
task_tags = set()
for version_item in product_item.version_items.values():
version_tags |= set(version_item.tags)
_task_tags = self._last_task_tags_by_task_id.get(
version_item.task_id
)
if _task_tags:
task_tags |= set(_task_tags)
if model_item is None:
product_id = product_item.product_id
model_item = QtGui.QStandardItem(product_item.product_name)
@ -432,6 +460,9 @@ class ProductsModel(QtGui.QStandardItemModel):
model_item.setData(icon, QtCore.Qt.DecorationRole)
model_item.setData(product_id, PRODUCT_ID_ROLE)
model_item.setData(product_item.product_name, PRODUCT_NAME_ROLE)
model_item.setData(
product_item.product_base_type, PRODUCT_BASE_TYPE_ROLE
)
model_item.setData(product_item.product_type, PRODUCT_TYPE_ROLE)
model_item.setData(product_type_icon, PRODUCT_TYPE_ICON_ROLE)
model_item.setData(product_item.folder_id, FOLDER_ID_ROLE)
@ -440,6 +471,8 @@ class ProductsModel(QtGui.QStandardItemModel):
self._items_by_id[product_id] = model_item
model_item.setData("|".join(statuses), STATUS_NAME_FILTER_ROLE)
model_item.setData("|".join(version_tags), VERSION_TAGS_FILTER_ROLE)
model_item.setData("|".join(task_tags), TASK_TAGS_FILTER_ROLE)
model_item.setData(product_item.folder_label, FOLDER_LABEL_ROLE)
in_scene = 1 if product_item.product_in_scene else 0
model_item.setData(in_scene, PRODUCT_IN_SCENE_ROLE)
@ -470,6 +503,14 @@ class ProductsModel(QtGui.QStandardItemModel):
}
self._last_status_icons_by_name = {}
task_items = self._controller.get_task_items(
project_name, folder_ids, sender=PRODUCTS_MODEL_SENDER_NAME
)
self._last_task_tags_by_task_id = {
task_item.task_id: task_item.tags
for task_item in task_items
}
active_site_icon_def = self._controller.get_active_site_icon_def(
project_name
)
@ -484,6 +525,7 @@ class ProductsModel(QtGui.QStandardItemModel):
folder_ids,
sender=PRODUCTS_MODEL_SENDER_NAME
)
product_items_by_id = {
product_item.product_id: product_item
for product_item in product_items

View file

@ -4,6 +4,7 @@ from typing import Optional
from qtpy import QtWidgets, QtCore
from ayon_core.pipeline.compatibility import is_product_base_type_supported
from ayon_core.tools.utils import (
RecursiveSortFilterProxyModel,
DeselectableTreeView,
@ -26,6 +27,8 @@ from .products_model import (
VERSION_STATUS_ICON_ROLE,
VERSION_THUMBNAIL_ID_ROLE,
STATUS_NAME_FILTER_ROLE,
VERSION_TAGS_FILTER_ROLE,
TASK_TAGS_FILTER_ROLE,
)
from .products_delegates import (
VersionDelegate,
@ -41,6 +44,8 @@ class ProductsProxyModel(RecursiveSortFilterProxyModel):
self._product_type_filters = None
self._statuses_filter = None
self._version_tags_filter = None
self._task_tags_filter = None
self._task_ids_filter = None
self._ascending_sort = True
@ -67,6 +72,18 @@ class ProductsProxyModel(RecursiveSortFilterProxyModel):
self._statuses_filter = statuses_filter
self.invalidateFilter()
def set_version_tags_filter(self, tags):
if self._version_tags_filter == tags:
return
self._version_tags_filter = tags
self.invalidateFilter()
def set_task_tags_filter(self, tags):
if self._task_tags_filter == tags:
return
self._task_tags_filter = tags
self.invalidateFilter()
def filterAcceptsRow(self, source_row, source_parent):
source_model = self.sourceModel()
index = source_model.index(source_row, 0, source_parent)
@ -83,6 +100,16 @@ class ProductsProxyModel(RecursiveSortFilterProxyModel):
):
return False
if not self._accept_row_by_role_value(
index, self._version_tags_filter, VERSION_TAGS_FILTER_ROLE
):
return False
if not self._accept_row_by_role_value(
index, self._task_tags_filter, TASK_TAGS_FILTER_ROLE
):
return False
return super().filterAcceptsRow(source_row, source_parent)
def _accept_task_ids_filter(self, index):
@ -102,10 +129,11 @@ class ProductsProxyModel(RecursiveSortFilterProxyModel):
if not filter_value:
return False
status_s = index.data(role)
for status in status_s.split("|"):
if status in filter_value:
return True
value_s = index.data(role)
if value_s:
for value in value_s.split("|"):
if value in filter_value:
return True
return False
def lessThan(self, left, right):
@ -142,6 +170,7 @@ class ProductsWidget(QtWidgets.QWidget):
default_widths = (
200, # Product name
90, # Product type
90, # Product base type
130, # Folder label
60, # Version
100, # Status
@ -261,6 +290,12 @@ class ProductsWidget(QtWidgets.QWidget):
self._controller.is_sitesync_enabled()
)
if not is_product_base_type_supported():
# Hide product base type column
products_view.setColumnHidden(
products_model.product_base_type_col, True
)
def set_name_filter(self, name):
"""Set filter of product name.
@ -290,6 +325,14 @@ class ProductsWidget(QtWidgets.QWidget):
self._version_delegate.set_statuses_filter(status_names)
self._products_proxy_model.set_statuses_filter(status_names)
def set_version_tags_filter(self, version_tags):
self._version_delegate.set_version_tags_filter(version_tags)
self._products_proxy_model.set_version_tags_filter(version_tags)
def set_task_tags_filter(self, task_tags):
self._version_delegate.set_task_tags_filter(task_tags)
self._products_proxy_model.set_task_tags_filter(task_tags)
def set_product_type_filter(self, product_type_filters):
"""

File diff suppressed because it is too large Load diff

View file

@ -1,157 +0,0 @@
from __future__ import annotations
from qtpy import QtCore, QtGui
from ayon_core.tools.utils import get_qt_icon
from ayon_core.tools.common_models import StatusItem
from ._multicombobox import (
CustomPaintMultiselectComboBox,
BaseQtModel,
)
STATUS_ITEM_TYPE = 0
SELECT_ALL_TYPE = 1
DESELECT_ALL_TYPE = 2
SWAP_STATE_TYPE = 3
STATUSES_FILTER_SENDER = "loader.statuses_filter"
STATUS_NAME_ROLE = QtCore.Qt.UserRole + 1
STATUS_SHORT_ROLE = QtCore.Qt.UserRole + 2
STATUS_COLOR_ROLE = QtCore.Qt.UserRole + 3
STATUS_ICON_ROLE = QtCore.Qt.UserRole + 4
ITEM_TYPE_ROLE = QtCore.Qt.UserRole + 5
ITEM_SUBTYPE_ROLE = QtCore.Qt.UserRole + 6
class StatusesQtModel(BaseQtModel):
def __init__(self, controller):
self._items_by_name: dict[str, QtGui.QStandardItem] = {}
self._icons_by_name_n_color: dict[str, QtGui.QIcon] = {}
super().__init__(
ITEM_TYPE_ROLE,
ITEM_SUBTYPE_ROLE,
"No statuses...",
controller,
)
def _get_standard_items(self) -> list[QtGui.QStandardItem]:
return list(self._items_by_name.values())
def _clear_standard_items(self):
self._items_by_name.clear()
def _prepare_new_value_items(
self, project_name: str, project_changed: bool
):
status_items: list[StatusItem] = (
self._controller.get_project_status_items(
project_name, sender=STATUSES_FILTER_SENDER
)
)
items = []
items_to_remove = []
if not status_items:
return items, items_to_remove
names_to_remove = set(self._items_by_name)
for row_idx, status_item in enumerate(status_items):
name = status_item.name
if name in self._items_by_name:
item = self._items_by_name[name]
names_to_remove.discard(name)
else:
item = QtGui.QStandardItem()
item.setData(ITEM_SUBTYPE_ROLE, STATUS_ITEM_TYPE)
item.setCheckState(QtCore.Qt.Unchecked)
item.setFlags(
QtCore.Qt.ItemIsEnabled
| QtCore.Qt.ItemIsSelectable
| QtCore.Qt.ItemIsUserCheckable
)
self._items_by_name[name] = item
icon = self._get_icon(status_item)
for role, value in (
(STATUS_NAME_ROLE, status_item.name),
(STATUS_SHORT_ROLE, status_item.short),
(STATUS_COLOR_ROLE, status_item.color),
(STATUS_ICON_ROLE, icon),
):
if item.data(role) != value:
item.setData(value, role)
if project_changed:
item.setCheckState(QtCore.Qt.Unchecked)
items.append(item)
for name in names_to_remove:
items_to_remove.append(self._items_by_name.pop(name))
return items, items_to_remove
def _get_icon(self, status_item: StatusItem) -> QtGui.QIcon:
name = status_item.name
color = status_item.color
unique_id = "|".join([name or "", color or ""])
icon = self._icons_by_name_n_color.get(unique_id)
if icon is not None:
return icon
icon: QtGui.QIcon = get_qt_icon({
"type": "material-symbols",
"name": status_item.icon,
"color": status_item.color
})
self._icons_by_name_n_color[unique_id] = icon
return icon
class StatusesCombobox(CustomPaintMultiselectComboBox):
def __init__(self, controller, parent):
self._controller = controller
model = StatusesQtModel(controller)
super().__init__(
STATUS_NAME_ROLE,
STATUS_SHORT_ROLE,
STATUS_COLOR_ROLE,
STATUS_ICON_ROLE,
item_type_role=ITEM_TYPE_ROLE,
model=model,
parent=parent
)
self.set_placeholder_text("Version status filter...")
self._model = model
self._last_project_name = None
self._fully_disabled_filter = False
controller.register_event_callback(
"selection.project.changed",
self._on_project_change
)
controller.register_event_callback(
"projects.refresh.finished",
self._on_projects_refresh
)
self.setToolTip("Statuses filter")
self.value_changed.connect(
self._on_status_filter_change
)
def _on_status_filter_change(self):
lines = ["Statuses filter"]
for item in self.get_value_info():
status_name, enabled = item
lines.append(f"{'' if enabled else ''} {status_name}")
self.setToolTip("\n".join(lines))
def _on_project_change(self, event):
project_name = event["project_name"]
self._last_project_name = project_name
self._model.refresh(project_name)
def _on_projects_refresh(self):
if self._last_project_name:
self._model.refresh(self._last_project_name)
self._on_status_filter_change()

Some files were not shown because too many files have changed in this diff Show more