Merge branch 'develop' into enhancement/per-project-bundle

# Conflicts:
#	client/ayon_core/settings/lib.py
This commit is contained in:
Jakub Trllo 2025-06-16 18:04:05 +02:00
commit 3e60bd9ba4
126 changed files with 4635 additions and 2050 deletions

View file

@ -35,6 +35,20 @@ body:
label: Version
description: What version are you running? Look to AYON Tray
options:
- 1.3.2
- 1.3.1
- 1.3.0
- 1.2.0
- 1.1.9
- 1.1.8
- 1.1.7
- 1.1.6
- 1.1.5
- 1.1.4
- 1.1.3
- 1.1.2
- 1.1.1
- 1.1.0
- 1.0.14
- 1.0.13
- 1.0.12

View file

@ -21,6 +21,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: astral-sh/ruff-action@v1
- uses: astral-sh/ruff-action@v3
with:
changed-files: "true"
version-file: "pyproject.toml"

View file

@ -1,10 +1,11 @@
name: 🐞 Update Bug Report
on:
workflow_run:
workflows: ["🚀 Release Trigger"]
types:
- completed
workflow_dispatch:
release:
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#release
types: [published]
jobs:
update-bug-report:

4
.gitignore vendored
View file

@ -77,9 +77,13 @@ dump.sql
# Poetry
########
.poetry/
poetry.lock
.python-version
.editorconfig
.pre-commit-config.yaml
mypy.ini
.github_changelog_generator
# ignore mkdocs build
site/

View file

@ -37,7 +37,7 @@ def _handle_error(
if process_context.headless:
if detail:
print(detail)
print(f"{10*'*'}\n{message}\n{10*'*'}")
print(f"{10 * '*'}\n{message}\n{10 * '*'}")
return
current_dir = os.path.dirname(os.path.abspath(__file__))

View file

@ -186,7 +186,6 @@ def contextselection(
main(output_path, project, folder, strict)
@main_cli.command(
context_settings=dict(
ignore_unknown_options=True,

View file

@ -1,12 +1,15 @@
from ayon_api import get_project, get_folder_by_path, get_task_by_name
from ayon_core.pipeline import Anatomy
from ayon_core.pipeline.anatomy import RootMissingEnv
from ayon_applications import PreLaunchHook
from ayon_applications.exceptions import ApplicationLaunchFailed
from ayon_applications.utils import (
EnvironmentPrepData,
prepare_app_environments,
prepare_context_environments
)
from ayon_core.pipeline import Anatomy
class GlobalHostDataHook(PreLaunchHook):
@ -67,9 +70,12 @@ class GlobalHostDataHook(PreLaunchHook):
self.data["project_entity"] = project_entity
# Anatomy
self.data["anatomy"] = Anatomy(
project_name, project_entity=project_entity
)
try:
self.data["anatomy"] = Anatomy(
project_name, project_entity=project_entity
)
except RootMissingEnv as exc:
raise ApplicationLaunchFailed(str(exc))
folder_path = self.data.get("folder_path")
if not folder_path:

View file

@ -29,6 +29,15 @@ class OCIOEnvHook(PreLaunchHook):
def execute(self):
"""Hook entry method."""
task_entity = self.data.get("task_entity")
if not task_entity:
self.log.info(
"Skipping OCIO Environment preparation."
"Task Entity is not available."
)
return
folder_entity = self.data["folder_entity"]
template_data = get_template_data(

View file

@ -0,0 +1,30 @@
""""Pre launch hook to remove launcher paths from the system."""
import os
from ayon_applications import PreLaunchHook
class PreRemoveLauncherPaths(PreLaunchHook):
"""Remove launcher paths from the system.
This hook is used to remove launcher paths from the system before launching
an application. It is used to ensure that the application is launched with
the correct environment variables. Especially for Windows, where
paths in `PATH` are used to load DLLs. This is important to avoid
conflicts with other applications that may have the same DLLs in their
paths.
"""
order = 1
def execute(self) -> None:
"""Execute the hook."""
# Remove launcher paths from the system
ayon_root = os.path.normpath(os.environ["AYON_ROOT"])
paths = [
path
for path in self.launch_context.env.get(
"PATH", "").split(os.pathsep)
if not os.path.normpath(path).startswith(ayon_root)
]
self.launch_context.env["PATH"] = os.pathsep.join(paths)

View file

@ -62,6 +62,7 @@ from .execute import (
run_subprocess,
run_detached_process,
run_ayon_launcher_process,
run_detached_ayon_launcher_process,
path_to_subprocess_arg,
CREATE_NO_WINDOW
)
@ -98,7 +99,6 @@ from .profiles_filtering import (
from .transcoding import (
get_transcode_temp_directory,
should_convert_for_ffmpeg,
convert_for_ffmpeg,
convert_input_paths_for_ffmpeg,
get_ffprobe_data,
get_ffprobe_streams,
@ -132,6 +132,7 @@ from .ayon_info import (
is_staging_enabled,
is_dev_mode_enabled,
is_in_tests,
get_settings_variant,
)
terminal = Terminal
@ -161,6 +162,7 @@ __all__ = [
"run_subprocess",
"run_detached_process",
"run_ayon_launcher_process",
"run_detached_ayon_launcher_process",
"path_to_subprocess_arg",
"CREATE_NO_WINDOW",
@ -198,7 +200,6 @@ __all__ = [
"get_transcode_temp_directory",
"should_convert_for_ffmpeg",
"convert_for_ffmpeg",
"convert_input_paths_for_ffmpeg",
"get_ffprobe_data",
"get_ffprobe_streams",
@ -242,4 +243,5 @@ __all__ = [
"is_staging_enabled",
"is_dev_mode_enabled",
"is_in_tests",
"get_settings_variant",
]

View file

@ -22,12 +22,10 @@ import clique
if typing.TYPE_CHECKING:
from typing import Self, Tuple, Union, TypedDict, Pattern
class EnumItemDict(TypedDict):
label: str
value: Any
EnumItemsInputType = Union[
Dict[Any, str],
List[Tuple[Any, str]],
@ -35,7 +33,6 @@ if typing.TYPE_CHECKING:
List[EnumItemDict]
]
class FileDefItemDict(TypedDict):
directory: str
filenames: List[str]
@ -289,6 +286,7 @@ AttrDefType = TypeVar("AttrDefType", bound=AbstractAttrDef)
# UI attribute definitions won't hold value
# -----------------------------------------
class UIDef(AbstractAttrDef):
is_value_def = False

View file

@ -177,10 +177,12 @@ def initialize_ayon_connection(force=False):
return _new_get_last_versions(
con, *args, **kwargs
)
def _lv_by_pi_wrapper(*args, **kwargs):
return _new_get_last_version_by_product_id(
con, *args, **kwargs
)
def _lv_by_pn_wrapper(*args, **kwargs):
return _new_get_last_version_by_product_name(
con, *args, **kwargs

View file

@ -78,15 +78,15 @@ def is_using_ayon_console():
return "ayon_console" in executable_filename
def is_headless_mode_enabled():
def is_headless_mode_enabled() -> bool:
return os.getenv("AYON_HEADLESS_MODE") == "1"
def is_staging_enabled():
def is_staging_enabled() -> bool:
return os.getenv("AYON_USE_STAGING") == "1"
def is_in_tests():
def is_in_tests() -> bool:
"""Process is running in automatic tests mode.
Returns:
@ -96,7 +96,7 @@ def is_in_tests():
return os.environ.get("AYON_IN_TESTS") == "1"
def is_dev_mode_enabled():
def is_dev_mode_enabled() -> bool:
"""Dev mode is enabled in AYON.
Returns:
@ -106,6 +106,22 @@ def is_dev_mode_enabled():
return os.getenv("AYON_USE_DEV") == "1"
def get_settings_variant() -> str:
"""Get AYON settings variant.
Returns:
str: Settings variant.
"""
if is_dev_mode_enabled():
return os.environ["AYON_BUNDLE_NAME"]
if is_staging_enabled():
return "staging"
return "production"
def get_ayon_info():
executable_args = get_ayon_launcher_args()
if is_running_from_build():

View file

@ -1,3 +1,4 @@
from __future__ import annotations
import os
import sys
import subprocess
@ -201,29 +202,9 @@ def clean_envs_for_ayon_process(env=None):
return env
def run_ayon_launcher_process(*args, add_sys_paths=False, **kwargs):
"""Execute AYON process with passed arguments and wait.
Wrapper for 'run_process' which prepends AYON executable arguments
before passed arguments and define environments if are not passed.
Values from 'os.environ' are used for environments if are not passed.
They are cleaned using 'clean_envs_for_ayon_process' function.
Example:
```
run_ayon_process("run", "<path to .py script>")
```
Args:
*args (str): ayon-launcher cli arguments.
**kwargs (Any): Keyword arguments for subprocess.Popen.
Returns:
str: Full output of subprocess concatenated stdout and stderr.
"""
args = get_ayon_launcher_args(*args)
def _prepare_ayon_launcher_env(
add_sys_paths: bool, kwargs: dict
) -> dict[str, str]:
env = kwargs.pop("env", None)
# Keep env untouched if are passed and not empty
if not env:
@ -239,8 +220,7 @@ def run_ayon_launcher_process(*args, add_sys_paths=False, **kwargs):
new_pythonpath.append(path)
lookup_set.add(path)
env["PYTHONPATH"] = os.pathsep.join(new_pythonpath)
return run_subprocess(args, env=env, **kwargs)
return env
def run_detached_process(args, **kwargs):
@ -314,6 +294,67 @@ def run_detached_process(args, **kwargs):
return process
def run_ayon_launcher_process(
*args, add_sys_paths: bool = False, **kwargs
) -> str:
"""Execute AYON process with passed arguments and wait.
Wrapper for 'run_process' which prepends AYON executable arguments
before passed arguments and define environments if are not passed.
Values from 'os.environ' are used for environments if are not passed.
They are cleaned using 'clean_envs_for_ayon_process' function.
Example:
```
run_ayon_launcher_process("run", "<path to .py script>")
```
Args:
*args (str): ayon-launcher cli arguments.
add_sys_paths (bool): Add system paths to PYTHONPATH.
**kwargs (Any): Keyword arguments for subprocess.Popen.
Returns:
str: Full output of subprocess concatenated stdout and stderr.
"""
args = get_ayon_launcher_args(*args)
env = _prepare_ayon_launcher_env(add_sys_paths, kwargs)
return run_subprocess(args, env=env, **kwargs)
def run_detached_ayon_launcher_process(
*args, add_sys_paths: bool = False, **kwargs
) -> subprocess.Popen:
"""Execute AYON process with passed arguments and wait.
Wrapper for 'run_process' which prepends AYON executable arguments
before passed arguments and define environments if are not passed.
Values from 'os.environ' are used for environments if are not passed.
They are cleaned using 'clean_envs_for_ayon_process' function.
Example:
```
run_detached_ayon_launcher_process("run", "<path to .py script>")
```
Args:
*args (str): ayon-launcher cli arguments.
add_sys_paths (bool): Add system paths to PYTHONPATH.
**kwargs (Any): Keyword arguments for subprocess.Popen.
Returns:
subprocess.Popen: Pointer to launched process but it is possible that
launched process is already killed (on linux).
"""
args = get_ayon_launcher_args(*args)
env = _prepare_ayon_launcher_env(add_sys_paths, kwargs)
return run_detached_process(args, env=env, **kwargs)
def path_to_subprocess_arg(path):
"""Prepare path for subprocess arguments.

View file

@ -1,15 +1,13 @@
import concurrent.futures
import os
import logging
import sys
import errno
from concurrent.futures import ThreadPoolExecutor, Future
from typing import List, Optional
from ayon_core.lib import create_hard_link
# this is needed until speedcopy for linux is fixed
if sys.platform == "win32":
from speedcopy import copyfile
else:
from shutil import copyfile
from speedcopy import copyfile
class DuplicateDestinationError(ValueError):
@ -109,41 +107,52 @@ class FileTransaction:
self._transfers[dst] = (src, opts)
def process(self):
# Backup any existing files
for dst, (src, _) in self._transfers.items():
self.log.debug("Checking file ... {} -> {}".format(src, dst))
path_same = self._same_paths(src, dst)
if path_same or not os.path.exists(dst):
continue
with ThreadPoolExecutor(max_workers=8) as executor:
# Submit backup tasks
backup_futures = [
executor.submit(self._backup_file, dst, src)
for dst, (src, _) in self._transfers.items()
]
wait_for_future_errors(
executor, backup_futures, logger=self.log)
# Backup original file
# todo: add timestamp or uuid to ensure unique
backup = dst + ".bak"
self._backup_to_original[backup] = dst
# Submit transfer tasks
transfer_futures = [
executor.submit(self._transfer_file, dst, src, opts)
for dst, (src, opts) in self._transfers.items()
]
wait_for_future_errors(
executor, transfer_futures, logger=self.log)
def _backup_file(self, dst, src):
self.log.debug(f"Checking file ... {src} -> {dst}")
path_same = self._same_paths(src, dst)
if path_same or not os.path.exists(dst):
return
# Backup original file
backup = dst + ".bak"
self._backup_to_original[backup] = dst
self.log.debug(f"Backup existing file: {dst} -> {backup}")
os.rename(dst, backup)
def _transfer_file(self, dst, src, opts):
path_same = self._same_paths(src, dst)
if path_same:
self.log.debug(
"Backup existing file: {} -> {}".format(dst, backup))
os.rename(dst, backup)
f"Source and destination are same files {src} -> {dst}")
return
# Copy the files to transfer
for dst, (src, opts) in self._transfers.items():
path_same = self._same_paths(src, dst)
if path_same:
self.log.debug(
"Source and destination are same files {} -> {}".format(
src, dst))
continue
self._create_folder_for_file(dst)
self._create_folder_for_file(dst)
if opts["mode"] == self.MODE_COPY:
self.log.debug(f"Copying file ... {src} -> {dst}")
copyfile(src, dst)
elif opts["mode"] == self.MODE_HARDLINK:
self.log.debug(f"Hardlinking file ... {src} -> {dst}")
create_hard_link(src, dst)
if opts["mode"] == self.MODE_COPY:
self.log.debug("Copying file ... {} -> {}".format(src, dst))
copyfile(src, dst)
elif opts["mode"] == self.MODE_HARDLINK:
self.log.debug("Hardlinking file ... {} -> {}".format(
src, dst))
create_hard_link(src, dst)
self._transferred.append(dst)
self._transferred.append(dst)
def finalize(self):
# Delete any backed up files
@ -212,3 +221,46 @@ class FileTransaction:
return os.stat(src) == os.stat(dst)
return src == dst
def wait_for_future_errors(
executor: ThreadPoolExecutor,
futures: List[Future],
logger: Optional[logging.Logger] = None):
"""For the ThreadPoolExecutor shutdown and cancel futures as soon one of
the workers raises an error as they complete.
The ThreadPoolExecutor only cancels pending futures on exception but will
still complete those that are running - each which also themselves could
fail. We log all exceptions but re-raise the last exception only.
"""
if logger is None:
logger = logging.getLogger(__name__)
for future in concurrent.futures.as_completed(futures):
exception = future.exception()
if exception:
# As soon as an error occurs, stop executing more futures.
# Running workers, however, will still be complete, so we also want
# to log those errors if any occurred on them.
executor.shutdown(wait=True, cancel_futures=True)
break
else:
# Futures are completed, no exceptions occurred
return
# An exception occurred in at least one future. Get exceptions from
# all futures that are done and ended up failing until that point.
exceptions = []
for future in futures:
if not future.cancelled() and future.done():
exception = future.exception()
if exception:
exceptions.append(exception)
# Log any exceptions that occurred in all workers
for exception in exceptions:
logger.error("Error occurred in worker", exc_info=exception)
# Raise the last exception
raise exceptions[-1]

View file

@ -39,6 +39,7 @@ class Terminal:
"""
from ayon_core.lib import env_value_to_bool
log_no_colors = env_value_to_bool(
"AYON_LOG_NO_COLORS", default=None
)

View file

@ -526,137 +526,6 @@ def should_convert_for_ffmpeg(src_filepath):
return False
# Deprecated since 2022 4 20
# - Reason - Doesn't convert sequences right way: Can't handle gaps, reuse
# first frame for all frames and changes filenames when input
# is sequence.
# - use 'convert_input_paths_for_ffmpeg' instead
def convert_for_ffmpeg(
first_input_path,
output_dir,
input_frame_start=None,
input_frame_end=None,
logger=None
):
"""Convert source file to format supported in ffmpeg.
Currently can convert only exrs.
Args:
first_input_path (str): Path to first file of a sequence or a single
file path for non-sequential input.
output_dir (str): Path to directory where output will be rendered.
Must not be same as input's directory.
input_frame_start (int): Frame start of input.
input_frame_end (int): Frame end of input.
logger (logging.Logger): Logger used for logging.
Raises:
ValueError: If input filepath has extension not supported by function.
Currently is supported only ".exr" extension.
"""
if logger is None:
logger = logging.getLogger(__name__)
logger.warning((
"DEPRECATED: 'ayon_core.lib.transcoding.convert_for_ffmpeg' is"
" deprecated function of conversion for FFMpeg. Please replace usage"
" with 'ayon_core.lib.transcoding.convert_input_paths_for_ffmpeg'"
))
ext = os.path.splitext(first_input_path)[1].lower()
if ext != ".exr":
raise ValueError((
"Function 'convert_for_ffmpeg' currently support only"
" \".exr\" extension. Got \"{}\"."
).format(ext))
is_sequence = False
if input_frame_start is not None and input_frame_end is not None:
is_sequence = int(input_frame_end) != int(input_frame_start)
input_info = get_oiio_info_for_input(first_input_path, logger=logger)
# Change compression only if source compression is "dwaa" or "dwab"
# - they're not supported in ffmpeg
compression = input_info["attribs"].get("compression")
if compression in ("dwaa", "dwab"):
compression = "none"
# Prepare subprocess arguments
oiio_cmd = get_oiio_tool_args(
"oiiotool",
# Don't add any additional attributes
"--nosoftwareattrib",
)
# Add input compression if available
if compression:
oiio_cmd.extend(["--compression", compression])
# Collect channels to export
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
oiio_cmd.extend([
input_arg, first_input_path,
# Tell oiiotool which channels should be put to top stack (and output)
"--ch", channels_arg,
# Use first subimage
"--subimage", "0"
])
# Add frame definitions to arguments
if is_sequence:
oiio_cmd.extend([
"--frames", "{}-{}".format(input_frame_start, input_frame_end)
])
for attr_name, attr_value in input_info["attribs"].items():
if not isinstance(attr_value, str):
continue
# Remove attributes that have string value longer than allowed length
# for ffmpeg or when contain prohibited symbols
erase_reason = "Missing reason"
erase_attribute = False
if len(attr_value) > MAX_FFMPEG_STRING_LEN:
erase_reason = "has too long value ({} chars).".format(
len(attr_value)
)
erase_attribute = True
if not erase_attribute:
for char in NOT_ALLOWED_FFMPEG_CHARS:
if char in attr_value:
erase_attribute = True
erase_reason = (
"contains unsupported character \"{}\"."
).format(char)
break
if erase_attribute:
# Set attribute to empty string
logger.info((
"Removed attribute \"{}\" from metadata because {}."
).format(attr_name, erase_reason))
oiio_cmd.extend(["--eraseattrib", attr_name])
# Add last argument - path to output
if is_sequence:
ext = os.path.splitext(first_input_path)[1]
base_filename = "tmp.%{:0>2}d{}".format(
len(str(input_frame_end)), ext
)
else:
base_filename = os.path.basename(first_input_path)
output_path = os.path.join(output_dir, base_filename)
oiio_cmd.extend([
"-o", output_path
])
logger.debug("Conversion command: {}".format(" ".join(oiio_cmd)))
run_subprocess(oiio_cmd, logger=logger)
def convert_input_paths_for_ffmpeg(
input_paths,
output_dir,
@ -664,7 +533,7 @@ def convert_input_paths_for_ffmpeg(
):
"""Convert source file to format supported in ffmpeg.
Currently can convert only exrs. The input filepaths should be files
Can currently convert only EXRs. The input filepaths should be files
with same type. Information about input is loaded only from first found
file.
@ -691,10 +560,10 @@ def convert_input_paths_for_ffmpeg(
ext = os.path.splitext(first_input_path)[1].lower()
if ext != ".exr":
raise ValueError((
"Function 'convert_for_ffmpeg' currently support only"
" \".exr\" extension. Got \"{}\"."
).format(ext))
raise ValueError(
"Function 'convert_input_paths_for_ffmpeg' currently supports"
f" only \".exr\" extension. Got \"{ext}\"."
)
input_info = get_oiio_info_for_input(first_input_path, logger=logger)

View file

@ -162,7 +162,7 @@ def find_tool_in_custom_paths(paths, tool, validation_func=None):
# Handle cases when path is just an executable
# - it allows to use executable from PATH
# - basename must match 'tool' value (without extension)
extless_path, ext = os.path.splitext(path)
extless_path, _ext = os.path.splitext(path)
if extless_path == tool:
executable_path = find_executable(tool)
if executable_path and (
@ -181,7 +181,7 @@ def find_tool_in_custom_paths(paths, tool, validation_func=None):
# If path is a file validate it
if os.path.isfile(normalized):
basename, ext = os.path.splitext(os.path.basename(path))
basename, _ext = os.path.splitext(os.path.basename(path))
# Check if the filename has actually the sane bane as 'tool'
if basename == tool:
executable_path = find_executable(normalized)

View file

@ -100,6 +100,10 @@ from .context_tools import (
get_current_task_name
)
from .compatibility import (
is_product_base_type_supported,
)
from .workfile import (
discover_workfile_build_plugins,
register_workfile_build_plugin,
@ -223,4 +227,7 @@ __all__ = (
# Backwards compatible function names
"install",
"uninstall",
# Feature detection
"is_product_base_type_supported",
)

View file

@ -1,5 +1,6 @@
from .exceptions import (
ProjectNotSet,
RootMissingEnv,
RootCombinationError,
TemplateMissingKey,
AnatomyTemplateUnsolved,
@ -9,6 +10,7 @@ from .anatomy import Anatomy
__all__ = (
"ProjectNotSet",
"RootMissingEnv",
"RootCombinationError",
"TemplateMissingKey",
"AnatomyTemplateUnsolved",

View file

@ -462,8 +462,8 @@ class Anatomy(BaseAnatomy):
Union[Dict[str, str], None]): Local root overrides.
"""
if not project_name:
return
return ayon_api.get_project_roots_for_site(
return None
return ayon_api.get_project_root_overrides_by_site_id(
project_name, get_local_site_id()
)

View file

@ -5,6 +5,11 @@ class ProjectNotSet(Exception):
"""Exception raised when is created Anatomy without project name."""
class RootMissingEnv(KeyError):
"""Raised when root requires environment variables which is not filled."""
pass
class RootCombinationError(Exception):
"""This exception is raised when templates has combined root types."""

View file

@ -2,9 +2,11 @@ import os
import platform
import numbers
from ayon_core.lib import Logger
from ayon_core.lib import Logger, StringTemplate
from ayon_core.lib.path_templates import FormatObject
from .exceptions import RootMissingEnv
class RootItem(FormatObject):
"""Represents one item or roots.
@ -21,18 +23,36 @@ class RootItem(FormatObject):
multi root setup otherwise None value is expected.
"""
def __init__(self, parent, root_raw_data, name):
super(RootItem, self).__init__()
super().__init__()
self._log = None
lowered_platform_keys = {}
for key, value in root_raw_data.items():
lowered_platform_keys[key.lower()] = value
lowered_platform_keys = {
key.lower(): value
for key, value in root_raw_data.items()
}
self.raw_data = lowered_platform_keys
self.cleaned_data = self._clean_roots(lowered_platform_keys)
self.name = name
self.parent = parent
self.available_platforms = set(lowered_platform_keys.keys())
self.value = lowered_platform_keys.get(platform.system().lower())
current_platform = platform.system().lower()
# WARNING: Using environment variables in roots is not considered
# as production safe. Some features may not work as expected, for
# example USD resolver or site sync.
try:
self.value = lowered_platform_keys[current_platform].format_map(
os.environ
)
except KeyError:
result = StringTemplate(self.value).format(os.environ.copy())
is_are = "is" if len(result.missing_keys) == 1 else "are"
missing_keys = ", ".join(result.missing_keys)
raise RootMissingEnv(
f"Root \"{name}\" requires environment variable/s"
f" {missing_keys} which {is_are} not available."
)
self.clean_value = self._clean_root(self.value)
def __format__(self, *args, **kwargs):
@ -105,10 +125,10 @@ class RootItem(FormatObject):
def _clean_roots(self, raw_data):
"""Clean all values of raw root item values."""
cleaned = {}
for key, value in raw_data.items():
cleaned[key] = self._clean_root(value)
return cleaned
return {
key: self._clean_root(value)
for key, value in raw_data.items()
}
def path_remapper(self, path, dst_platform=None, src_platform=None):
"""Remap path for specific platform.

View file

@ -834,7 +834,7 @@ def _get_global_config_data(
if not product_entities_by_name:
# in case no product was found we need to use fallback
fallback_type = fallback_data["type"]
fallback_type = fallback_data["fallback_type"]
return _get_config_path_from_profile_data(
fallback_data, fallback_type, template_data
)

View file

@ -0,0 +1,16 @@
"""Package to handle compatibility checks for pipeline components."""
def is_product_base_type_supported() -> bool:
"""Check support for product base types.
This function checks if the current pipeline supports product base types.
Once this feature is implemented, it will return True. This should be used
in places where some kind of backward compatibility is needed to avoid
breaking existing functionality that relies on the current behavior.
Returns:
bool: True if product base types are supported, False otherwise.
"""
return False

View file

@ -27,7 +27,8 @@ from .workfile import (
get_workdir,
get_custom_workfile_template_by_string_context,
get_workfile_template_key_from_context,
get_last_workfile
get_last_workfile,
MissingWorkdirError,
)
from . import (
register_loader_plugin_path,
@ -251,7 +252,7 @@ def uninstall_host():
pyblish.api.deregister_discovery_filter(filter_pyblish_plugins)
deregister_loader_plugin_path(LOAD_PATH)
deregister_inventory_action_path(INVENTORY_PATH)
log.info("Global plug-ins unregistred")
log.info("Global plug-ins unregistered")
deregister_host()
@ -617,7 +618,18 @@ def version_up_current_workfile():
last_workfile_path = get_last_workfile(
work_root, file_template, data, extensions, True
)
new_workfile_path = version_up(last_workfile_path)
# `get_last_workfile` will return the first expected file version
# if no files exist yet. In that case, if they do not exist we will
# want to save v001
new_workfile_path = last_workfile_path
if os.path.exists(new_workfile_path):
new_workfile_path = version_up(new_workfile_path)
# Raise an error if the parent folder doesn't exist as `host.save_workfile`
# is not supposed/able to create missing folders.
parent_folder = os.path.dirname(new_workfile_path)
if not os.path.exists(parent_folder):
raise MissingWorkdirError(
f"Work area directory '{parent_folder}' does not exist.")
host.save_workfile(new_workfile_path)

View file

@ -872,7 +872,7 @@ class CreateContext:
"""
return self._event_hub.add_callback(INSTANCE_ADDED_TOPIC, callback)
def add_instances_removed_callback (self, callback):
def add_instances_removed_callback(self, callback):
"""Register callback for removed instances.
Event is triggered when instances are already removed from context.
@ -933,7 +933,7 @@ class CreateContext:
"""
self._event_hub.add_callback(VALUE_CHANGED_TOPIC, callback)
def add_pre_create_attr_defs_change_callback (self, callback):
def add_pre_create_attr_defs_change_callback(self, callback):
"""Register callback to listen pre-create attribute changes.
Create plugin can trigger refresh of pre-create attributes. Usage of
@ -961,7 +961,7 @@ class CreateContext:
PRE_CREATE_ATTR_DEFS_CHANGED_TOPIC, callback
)
def add_create_attr_defs_change_callback (self, callback):
def add_create_attr_defs_change_callback(self, callback):
"""Register callback to listen create attribute changes.
Create plugin changed attribute definitions of instance.
@ -986,7 +986,7 @@ class CreateContext:
"""
self._event_hub.add_callback(CREATE_ATTR_DEFS_CHANGED_TOPIC, callback)
def add_publish_attr_defs_change_callback (self, callback):
def add_publish_attr_defs_change_callback(self, callback):
"""Register callback to listen publish attribute changes.
Publish plugin changed attribute definitions of instance of context.
@ -2303,10 +2303,16 @@ class CreateContext:
for plugin_name, plugin_value in item_changes.pop(
"publish_attributes"
).items():
if plugin_value is None:
current_publish[plugin_name] = None
continue
plugin_changes = current_publish.setdefault(
plugin_name, {}
)
plugin_changes.update(plugin_value)
if plugin_changes is None:
current_publish[plugin_name] = plugin_value
else:
plugin_changes.update(plugin_value)
item_values.update(item_changes)

View file

@ -52,15 +52,15 @@ def get_product_name_template(
# TODO remove formatting keys replacement
template = (
matching_profile["template"]
.replace("{task[name]}", "{task}")
.replace("{Task[name]}", "{Task}")
.replace("{TASK[NAME]}", "{TASK}")
.replace("{product[type]}", "{family}")
.replace("{Product[type]}", "{Family}")
.replace("{PRODUCT[TYPE]}", "{FAMILY}")
.replace("{folder[name]}", "{asset}")
.replace("{Folder[name]}", "{Asset}")
.replace("{FOLDER[NAME]}", "{ASSET}")
.replace("{task}", "{task[name]}")
.replace("{Task}", "{Task[name]}")
.replace("{TASK}", "{TASK[NAME]}")
.replace("{family}", "{product[type]}")
.replace("{Family}", "{Product[type]}")
.replace("{FAMILY}", "{PRODUCT[TYPE]}")
.replace("{asset}", "{folder[name]}")
.replace("{Asset}", "{Folder[name]}")
.replace("{ASSET}", "{FOLDER[NAME]}")
)
# Make sure template is set (matching may have empty string)

View file

@ -160,29 +160,26 @@ class AttributeValues:
return self._attr_defs_by_key.get(key, default)
def update(self, value):
changes = {}
for _key, _value in dict(value).items():
if _key in self._data and self._data.get(_key) == _value:
continue
self._data[_key] = _value
changes[_key] = _value
changes = self._update(value)
if changes:
self._parent.attribute_value_changed(self._key, changes)
def pop(self, key, default=None):
has_key = key in self._data
value = self._data.pop(key, default)
# Remove attribute definition if is 'UnknownDef'
# - gives option to get rid of unknown values
attr_def = self._attr_defs_by_key.get(key)
if isinstance(attr_def, UnknownDef):
self._attr_defs_by_key.pop(key)
self._attr_defs.remove(attr_def)
elif has_key:
self._parent.attribute_value_changed(self._key, {key: None})
value, changes = self._pop(key, default)
if changes:
self._parent.attribute_value_changed(self._key, changes)
return value
def set_value(self, value):
pop_keys = set(value.keys()) - set(self._data.keys())
changes = self._update(value)
for key in pop_keys:
_, key_changes = self._pop(key, None)
changes.update(key_changes)
if changes:
self._parent.attribute_value_changed(self._key, changes)
def reset_values(self):
self._data = {}
@ -228,6 +225,29 @@ class AttributeValues:
return serialize_attr_defs(self._attr_defs)
def _update(self, value):
changes = {}
for key, value in dict(value).items():
if key in self._data and self._data.get(key) == value:
continue
self._data[key] = value
changes[key] = value
return changes
def _pop(self, key, default):
has_key = key in self._data
value = self._data.pop(key, default)
# Remove attribute definition if is 'UnknownDef'
# - gives option to get rid of unknown values
attr_def = self._attr_defs_by_key.get(key)
changes = {}
if isinstance(attr_def, UnknownDef):
self._attr_defs_by_key.pop(key)
self._attr_defs.remove(attr_def)
elif has_key:
changes[key] = None
return value, changes
class CreatorAttributeValues(AttributeValues):
"""Creator specific attribute values of an instance."""
@ -270,6 +290,23 @@ class PublishAttributes:
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
"""Set value for plugin.
Args:
key (str): Plugin name.
value (dict[str, Any]): Value to set.
"""
current_value = self._data.get(key)
if isinstance(current_value, PublishAttributeValues):
current_value.set_value(value)
else:
self._data[key] = value
def __delitem__(self, key):
self.pop(key)
def __contains__(self, key):
return key in self._data
@ -332,7 +369,7 @@ class PublishAttributes:
return copy.deepcopy(self._origin_data)
def attribute_value_changed(self, key, changes):
self._parent.publish_attribute_value_changed(key, changes)
self._parent.publish_attribute_value_changed(key, changes)
def set_publish_plugin_attr_defs(
self,

View file

@ -255,7 +255,7 @@ def deliver_sequence(
report_items[""].append(msg)
return report_items, 0
dir_path, file_name = os.path.split(str(src_path))
dir_path, _file_name = os.path.split(str(src_path))
context = repre["context"]
ext = context.get("ext", context.get("representation"))
@ -270,7 +270,7 @@ def deliver_sequence(
# context.representation could be .psd
ext = ext.replace("..", ".")
src_collections, remainder = clique.assemble(os.listdir(dir_path))
src_collections, _remainder = clique.assemble(os.listdir(dir_path))
src_collection = None
for col in src_collections:
if col.tail != ext:

View file

@ -1,4 +1,4 @@
from __future__ import annotations
from __future__ import annotations
import copy
import os
import re
@ -660,14 +660,6 @@ def _get_legacy_product_name_and_group(
warnings.warn("Using legacy product name for renders",
DeprecationWarning)
if not source_product_name.startswith(product_type):
resulting_group_name = '{}{}{}{}{}'.format(
product_type,
task_name[0].upper(), task_name[1:],
source_product_name[0].upper(), source_product_name[1:])
else:
resulting_group_name = source_product_name
# create product name `<product type><Task><Product name>`
if not source_product_name.startswith(product_type):
resulting_group_name = '{}{}{}{}{}'.format(
@ -1168,7 +1160,7 @@ def prepare_cache_representations(skeleton_data, exp_files, anatomy):
"""
representations = []
collections, remainders = clique.assemble(exp_files)
collections, _remainders = clique.assemble(exp_files)
log = Logger.get_logger("farm_publishing")

View file

@ -221,19 +221,6 @@ class LoaderPlugin(list):
"""
return cls.options or []
@property
def fname(self):
"""Backwards compatibility with deprecation warning"""
self.log.warning((
"DEPRECATION WARNING: Source - Loader plugin {}."
" The 'fname' property on the Loader plugin will be removed in"
" future versions of OpenPype. Planned version to drop the support"
" is 3.16.6 or 3.17.0."
).format(self.__class__.__name__))
if hasattr(self, "_fname"):
return self._fname
@classmethod
def get_representation_name_aliases(cls, representation_name: str):
"""Return representation names to which switching is allowed from

View file

@ -316,12 +316,6 @@ def load_with_repre_context(
)
loader = Loader()
# Backwards compatibility: Originally the loader's __init__ required the
# representation context to set `fname` attribute to the filename to load
# Deprecated - to be removed in OpenPype 3.16.6 or 3.17.0.
loader._fname = get_representation_path_from_context(repre_context)
return loader.load(repre_context, name, namespace, options)

View file

@ -41,7 +41,7 @@ def validate(data, schema=None):
if not _CACHED:
_precache()
root, schema = data["schema"].rsplit(":", 1)
_root, schema = data["schema"].rsplit(":", 1)
if isinstance(schema, str):
schema = _cache[schema + ".json"]

View file

@ -209,7 +209,7 @@ def get_staging_dir_info(
staging_dir_config = get_staging_dir_config(
project_entity["name"],
task_type,
task_name ,
task_name,
product_type,
product_name,
host_name,

View file

@ -226,11 +226,26 @@ class _CacheItems:
thumbnails_cache = ThumbnailsCache()
def get_thumbnail_path(project_name, thumbnail_id):
def get_thumbnail_path(
project_name: str,
entity_type: str,
entity_id: str,
thumbnail_id: str
):
"""Get path to thumbnail image.
Thumbnail is cached by thumbnail id but is received using entity type and
entity id.
Notes:
Function 'get_thumbnail_by_id' can't be used because does not work
for artists. The endpoint can't validate artist permissions.
Args:
project_name (str): Project where thumbnail belongs to.
entity_type (str): Entity type "folder", "task", "version"
and "workfile".
entity_id (str): Entity id.
thumbnail_id (Union[str, None]): Thumbnail id.
Returns:
@ -251,7 +266,7 @@ def get_thumbnail_path(project_name, thumbnail_id):
# 'get_thumbnail_by_id' did not return output of
# 'ServerAPI' method.
con = ayon_api.get_server_api_connection()
result = con.get_thumbnail_by_id(project_name, thumbnail_id)
result = con.get_thumbnail(project_name, entity_type, entity_id)
if result is not None and result.is_valid:
return _CacheItems.thumbnails_cache.store_thumbnail(

View file

@ -16,6 +16,7 @@ from .path_resolving import (
from .utils import (
should_use_last_workfile_on_launch,
should_open_workfiles_tool_on_launch,
MissingWorkdirError,
)
from .build_workfile import BuildWorkfile
@ -46,6 +47,7 @@ __all__ = (
"should_use_last_workfile_on_launch",
"should_open_workfiles_tool_on_launch",
"MissingWorkdirError",
"BuildWorkfile",

View file

@ -329,9 +329,9 @@ def get_last_workfile(
Returns:
str: Last or first workfile as filename of full path to filename.
"""
filename, version = get_last_workfile_with_version(
"""
filename, _version = get_last_workfile_with_version(
workdir, file_template, fill_data, extensions
)
if filename is None:

View file

@ -2,6 +2,11 @@ from ayon_core.lib import filter_profiles
from ayon_core.settings import get_project_settings
class MissingWorkdirError(Exception):
"""Raised when accessing a work directory not found on disk."""
pass
def should_use_last_workfile_on_launch(
project_name,
host_name,

View file

@ -211,7 +211,7 @@ class DeleteOldVersions(load.ProductLoaderPlugin):
f"This will keep only the last {versions_to_keep} "
f"versions for the {num_contexts} selected product{s}."
)
informative_text="Warning: This will delete files from disk"
informative_text = "Warning: This will delete files from disk"
detailed_text = (
f"Keep only {versions_to_keep} versions for:\n{contexts_list}"
)

View file

@ -22,6 +22,7 @@ from ayon_core.tools.utils import show_message_dialog
OTIO = None
FRAME_SPLITTER = "__frame_splitter__"
def _import_otio():
global OTIO
if OTIO is None:

View file

@ -1,11 +1,14 @@
# -*- coding: utf-8 -*-
"""Cleanup leftover files from publish."""
import os
import shutil
import pyblish.api
import re
import shutil
import tempfile
import pyblish.api
from ayon_core.lib import is_in_tests
from ayon_core.pipeline import PublishError
class CleanUp(pyblish.api.InstancePlugin):
@ -48,17 +51,15 @@ class CleanUp(pyblish.api.InstancePlugin):
if is_in_tests():
# let automatic test process clean up temporary data
return
# Get the errored instances
failed = []
# If instance has errors, do not clean up
for result in instance.context.data["results"]:
if (result["error"] is not None and result["instance"] is not None
and result["instance"] not in failed):
failed.append(result["instance"])
assert instance not in failed, (
"Result of '{}' instance were not success".format(
instance.data["name"]
)
)
if result["error"] is not None and result["instance"] is instance:
raise PublishError(
"Result of '{}' instance were not success".format(
instance.data["name"]
)
)
_skip_cleanup_filepaths = instance.context.data.get(
"skipCleanupFilepaths"
@ -71,10 +72,17 @@ class CleanUp(pyblish.api.InstancePlugin):
self.log.debug("Cleaning renders new...")
self.clean_renders(instance, skip_cleanup_filepaths)
if [ef for ef in self.exclude_families
if instance.data["productType"] in ef]:
# TODO: Figure out whether this could be refactored to just a
# product_type in self.exclude_families check.
product_type = instance.data["productType"]
if any(
product_type in exclude_family
for exclude_family in self.exclude_families
):
self.log.debug(
"Skipping cleanup for instance because product "
f"type is excluded from cleanup: {product_type}")
return
import tempfile
temp_root = tempfile.gettempdir()
staging_dir = instance.data.get("stagingDir", None)

View file

@ -394,7 +394,6 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
if aov:
anatomy_data["aov"] = aov
def _fill_folder_data(self, instance, project_entity, anatomy_data):
# QUESTION: should we make sure that all folder data are popped if
# folder data cannot be found?

View file

@ -39,6 +39,7 @@ class CollectAudio(pyblish.api.ContextPlugin):
"blender",
"houdini",
"max",
"circuit",
]
audio_product_name = "audioMain"

View file

@ -0,0 +1,106 @@
import pyblish.api
from ayon_core.lib import EnumDef
from ayon_core.pipeline import publish
from ayon_core.pipeline.publish import PublishError
class CollectExplicitResolution(
pyblish.api.InstancePlugin,
publish.AYONPyblishPluginMixin,
):
"""Collect explicit user defined resolution attributes for instances"""
label = "Choose Explicit Resolution"
order = pyblish.api.CollectorOrder - 0.091
settings_category = "core"
enabled = False
default_resolution_item = (None, "Don't override")
# Settings
product_types = []
options = []
# caching resoluton items
resolution_items = None
def process(self, instance):
"""Process the instance and collect explicit resolution attributes"""
# Get the values from the instance data
values = self.get_attr_values_from_data(instance.data)
resolution_value = values.get("explicit_resolution", None)
if resolution_value is None:
return
# Get the width, height and pixel_aspect from the resolution value
resolution_data = self._get_resolution_values(resolution_value)
# Set the values to the instance data
instance.data.update(resolution_data)
def _get_resolution_values(self, resolution_value):
"""
Returns width, height and pixel_aspect from the resolution value
Arguments:
resolution_value (str): resolution value
Returns:
dict: dictionary with width, height and pixel_aspect
"""
resolution_items = self._get_resolution_items()
# ensure resolution_value is part of expected items
item_values = resolution_items.get(resolution_value)
# if the item is in the cache, get the values from it
if item_values:
return {
"resolutionWidth": item_values["width"],
"resolutionHeight": item_values["height"],
"pixelAspect": item_values["pixel_aspect"],
}
raise PublishError(
f"Invalid resolution value: {resolution_value} "
f"expected choices: {resolution_items}"
)
@classmethod
def _get_resolution_items(cls):
if cls.resolution_items is None:
resolution_items = {}
for item in cls.options:
item_text = (
f"{item['width']}x{item['height']} "
f"({item['pixel_aspect']})"
)
resolution_items[item_text] = item
cls.resolution_items = resolution_items
return cls.resolution_items
@classmethod
def get_attr_defs_for_instance(
cls, create_context, instance,
):
if instance.product_type not in cls.product_types:
return []
# Get the resolution items
resolution_items = cls._get_resolution_items()
items = [cls.default_resolution_item]
# Add all cached resolution items to the dropdown options
for item_text in resolution_items:
items.append((item_text, item_text))
return [
EnumDef(
"explicit_resolution",
items,
default="Don't override",
label="Force product resolution",
),
]

View file

@ -43,4 +43,3 @@ class CollectCoreJobEnvVars(pyblish.api.ContextPlugin):
if value:
self.log.debug(f"Setting job env: {key}: {value}")
env[key] = value

View file

@ -50,7 +50,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
"comments": instance.data.get("comments", []),
}
shot_data["attributes"] = {}
shot_data["attributes"] = {}
SHOT_ATTRS = (
"handleStart",
"handleEnd",

View file

@ -32,16 +32,16 @@ class CollectManagedStagingDir(pyblish.api.InstancePlugin):
label = "Collect Managed Staging Directory"
order = pyblish.api.CollectorOrder + 0.4990
def process(self, instance):
def process(self, instance: pyblish.api.Instance):
""" Collect the staging data and stores it to the instance.
Args:
instance (object): The instance to inspect.
"""
staging_dir_path = get_instance_staging_dir(instance)
persistance = instance.data.get("stagingDir_persistent", False)
persistence: bool = instance.data.get("stagingDir_persistent", False)
self.log.info((
self.log.debug(
f"Instance staging dir was set to `{staging_dir_path}` "
f"and persistence is set to `{persistance}`"
))
f"and persistence is set to `{persistence}`"
)

View file

@ -194,7 +194,6 @@ class CollectOtioSubsetResources(
repre = self._create_representation(
frame_start, frame_end, file=filename)
else:
_trim = False
dirname, filename = os.path.split(media_ref.target_url)
@ -209,7 +208,6 @@ class CollectOtioSubsetResources(
repre = self._create_representation(
frame_start, frame_end, file=filename, trim=_trim)
instance.data["originalDirname"] = self.staging_dir
# add representation to instance data
@ -221,7 +219,6 @@ class CollectOtioSubsetResources(
instance.data["representations"].append(repre)
self.log.debug(instance.data)
def _create_representation(self, start, end, **kwargs):

View file

@ -31,6 +31,9 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
# Keep "filesequence" for backwards compatibility of older jobs
targets = ["filesequence", "farm"]
label = "Collect rendered frames"
settings_category = "core"
remove_files = False
_context = None
@ -120,7 +123,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
self._fill_staging_dir(repre_data, anatomy)
representations.append(repre_data)
if not staging_dir_persistent:
if self.remove_files and not staging_dir_persistent:
add_repre_files_for_cleanup(instance, repre_data)
instance.data["representations"] = representations
@ -170,7 +173,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
os.environ.update(session_data)
staging_dir_persistent = self._process_path(data, anatomy)
if not staging_dir_persistent:
if self.remove_files and not staging_dir_persistent:
context.data["cleanupFullPaths"].append(path)
context.data["cleanupEmptyDirs"].append(
os.path.dirname(path)

View file

@ -54,7 +54,8 @@ class ExtractBurnin(publish.Extractor):
"houdini",
"max",
"blender",
"unreal"
"unreal",
"circuit",
]
optional = True

View file

@ -280,10 +280,14 @@ class ExtractOIIOTranscode(publish.Extractor):
collection = collections[0]
frames = list(collection.indexes)
if collection.holes():
if collection.holes().indexes:
return files_to_convert
frame_str = "{}-{}#".format(frames[0], frames[-1])
# Get the padding from the collection
# This is the number of digits used in the frame numbers
padding = collection.padding
frame_str = "{}-{}%0{}d".format(frames[0], frames[-1], padding)
file_name = "{}{}{}".format(collection.head, frame_str,
collection.tail)

View file

@ -54,7 +54,7 @@ class ExtractOTIOReview(
# plugin default attributes
to_width = 1280
to_height = 720
output_ext = ".jpg"
output_ext = ".png"
def process(self, instance):
# Not all hosts can import these modules.
@ -510,6 +510,12 @@ class ExtractOTIOReview(
"-tune", "stillimage"
])
if video or sequence:
command.extend([
"-vf", f"scale={self.to_width}:{self.to_height}:flags=lanczos",
"-compression_level", "5",
])
# add output attributes
command.extend([
"-start_number", str(out_frame_start)
@ -520,9 +526,10 @@ class ExtractOTIOReview(
input_extension
and self.output_ext == input_extension
):
command.extend([
"-c", "copy"
])
command.extend(["-c", "copy"])
else:
# For lossy formats, force re-encode
command.extend(["-pix_fmt", "rgba"])
# add output path at the end
command.append(output_path)

View file

@ -1,3 +1,4 @@
from __future__ import annotations
import os
import re
import copy
@ -5,11 +6,16 @@ import json
import shutil
import subprocess
from abc import ABC, abstractmethod
from typing import Any, Optional
from dataclasses import dataclass, field
import tempfile
import clique
import speedcopy
import pyblish.api
from ayon_api import get_last_version_by_product_name, get_representations
from ayon_core.lib import (
get_ffmpeg_tool_args,
filter_profiles,
@ -31,6 +37,39 @@ from ayon_core.pipeline.publish import (
from ayon_core.pipeline.publish.lib import add_repre_files_for_cleanup
@dataclass
class TempData:
"""Temporary data used across extractor's process."""
fps: float
frame_start: int
frame_end: int
handle_start: int
handle_end: int
frame_start_handle: int
frame_end_handle: int
output_frame_start: int
output_frame_end: int
pixel_aspect: float
resolution_width: int
resolution_height: int
origin_repre: dict[str, Any]
input_is_sequence: bool
first_sequence_frame: int
input_allow_bg: bool
with_audio: bool
without_handles: bool
handles_are_set: bool
input_ext: str
explicit_input_paths: list[str]
paths_to_remove: list[str]
# Set later
full_output_path: str = ""
filled_files: dict[int, str] = field(default_factory=dict)
output_ext_is_image: bool = True
output_is_sequence: bool = True
def frame_to_timecode(frame: int, fps: float) -> str:
"""Convert a frame number and FPS to editorial timecode (HH:MM:SS:FF).
@ -91,7 +130,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
"webpublisher",
"aftereffects",
"flame",
"unreal"
"unreal",
"circuit",
]
# Supported extensions
@ -196,7 +236,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
).format(repre_name))
continue
input_ext = repre["ext"]
input_ext = repre["ext"].lower()
if input_ext.startswith("."):
input_ext = input_ext[1:]
@ -399,15 +439,73 @@ class ExtractReview(pyblish.api.InstancePlugin):
)
temp_data = self.prepare_temp_data(instance, repre, output_def)
files_to_clean = []
if temp_data["input_is_sequence"]:
new_frame_files = {}
if temp_data.input_is_sequence:
self.log.debug("Checking sequence to fill gaps in sequence..")
files_to_clean = self.fill_sequence_gaps(
files=temp_data["origin_repre"]["files"],
staging_dir=new_repre["stagingDir"],
start_frame=temp_data["frame_start"],
end_frame=temp_data["frame_end"]
)
files = temp_data.origin_repre["files"]
collections = clique.assemble(
files,
)[0]
if len(collections) != 1:
raise KnownPublishError(
"Multiple collections {} found.".format(collections))
collection = collections[0]
fill_missing_frames = _output_def["fill_missing_frames"]
if fill_missing_frames == "closest_existing":
new_frame_files = self.fill_sequence_gaps_from_existing(
collection=collection,
staging_dir=new_repre["stagingDir"],
start_frame=temp_data.frame_start,
end_frame=temp_data.frame_end,
)
elif fill_missing_frames == "blank":
new_frame_files = self.fill_sequence_gaps_with_blanks(
collection=collection,
staging_dir=new_repre["stagingDir"],
start_frame=temp_data.frame_start,
end_frame=temp_data.frame_end,
resolution_width=temp_data.resolution_width,
resolution_height=temp_data.resolution_height,
extension=temp_data.input_ext,
temp_data=temp_data
)
elif fill_missing_frames == "previous_version":
new_frame_files = self.fill_sequence_gaps_with_previous(
collection=collection,
staging_dir=new_repre["stagingDir"],
instance=instance,
current_repre_name=repre["name"],
start_frame=temp_data.frame_start,
end_frame=temp_data.frame_end,
)
# fallback to original workflow
if new_frame_files is None:
new_frame_files = (
self.fill_sequence_gaps_from_existing(
collection=collection,
staging_dir=new_repre["stagingDir"],
start_frame=temp_data.frame_start,
end_frame=temp_data.frame_end,
))
elif fill_missing_frames == "only_rendered":
temp_data.explicit_input_paths = [
os.path.join(
new_repre["stagingDir"], file
).replace("\\", "/")
for file in files
]
frame_start = min(collection.indexes)
frame_end = max(collection.indexes)
# modify range for burnins
instance.data["frameStart"] = frame_start
instance.data["frameEnd"] = frame_end
temp_data.frame_start = frame_start
temp_data.frame_end = frame_end
temp_data.filled_files = new_frame_files
# create or update outputName
output_name = new_repre.get("outputName", "")
@ -415,7 +513,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
if output_name:
output_name += "_"
output_name += output_def["filename_suffix"]
if temp_data["without_handles"]:
if temp_data.without_handles:
output_name += "_noHandles"
# add outputName to anatomy format fill_data
@ -428,7 +526,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
# like Resolve or Premiere can detect the start frame for e.g.
# review output files
"timecode": frame_to_timecode(
frame=temp_data["frame_start_handle"],
frame=temp_data.frame_start_handle,
fps=float(instance.data["fps"])
)
})
@ -445,7 +543,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
except ZeroDivisionError:
# TODO recalculate width and height using OIIO before
# conversion
if 'exr' in temp_data["origin_repre"]["ext"]:
if 'exr' in temp_data.origin_repre["ext"]:
self.log.warning(
(
"Unsupported compression on input files."
@ -464,17 +562,20 @@ class ExtractReview(pyblish.api.InstancePlugin):
run_subprocess(subprcs_cmd, shell=True, logger=self.log)
# delete files added to fill gaps
if files_to_clean:
for f in files_to_clean:
os.unlink(f)
if new_frame_files:
for filepath in new_frame_files.values():
os.unlink(filepath)
for filepath in temp_data.paths_to_remove:
os.unlink(filepath)
new_repre.update({
"fps": temp_data["fps"],
"fps": temp_data.fps,
"name": "{}_{}".format(output_name, output_ext),
"outputName": output_name,
"outputDef": output_def,
"frameStartFtrack": temp_data["output_frame_start"],
"frameEndFtrack": temp_data["output_frame_end"],
"frameStartFtrack": temp_data.output_frame_start,
"frameEndFtrack": temp_data.output_frame_end,
"ffmpeg_cmd": subprcs_cmd
})
@ -500,7 +601,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
# - there can be more than one collection
return isinstance(repre["files"], (list, tuple))
def prepare_temp_data(self, instance, repre, output_def):
def prepare_temp_data(self, instance, repre, output_def) -> TempData:
"""Prepare dictionary with values used across extractor's process.
All data are collected from instance, context, origin representation
@ -516,7 +617,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
output_def (dict): Definition of output of this plugin.
Returns:
dict: All data which are used across methods during process.
TempData: All data which are used across methods during process.
Their values should not change during process but new keys
with values may be added.
"""
@ -559,6 +660,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
input_is_sequence = self.input_is_sequence(repre)
input_allow_bg = False
first_sequence_frame = None
if input_is_sequence and repre["files"]:
# Calculate first frame that should be used
cols, _ = clique.assemble(repre["files"])
@ -577,28 +679,33 @@ class ExtractReview(pyblish.api.InstancePlugin):
ext = os.path.splitext(repre["files"][0])[1].replace(".", "")
if ext.lower() in self.alpha_exts:
input_allow_bg = True
else:
ext = os.path.splitext(repre["files"])[1].replace(".", "")
return {
"fps": float(instance.data["fps"]),
"frame_start": frame_start,
"frame_end": frame_end,
"handle_start": handle_start,
"handle_end": handle_end,
"frame_start_handle": frame_start_handle,
"frame_end_handle": frame_end_handle,
"output_frame_start": int(output_frame_start),
"output_frame_end": int(output_frame_end),
"pixel_aspect": instance.data.get("pixelAspect", 1),
"resolution_width": instance.data.get("resolutionWidth"),
"resolution_height": instance.data.get("resolutionHeight"),
"origin_repre": repre,
"input_is_sequence": input_is_sequence,
"first_sequence_frame": first_sequence_frame,
"input_allow_bg": input_allow_bg,
"with_audio": with_audio,
"without_handles": without_handles,
"handles_are_set": handles_are_set
}
return TempData(
fps=float(instance.data["fps"]),
frame_start=frame_start,
frame_end=frame_end,
handle_start=handle_start,
handle_end=handle_end,
frame_start_handle=frame_start_handle,
frame_end_handle=frame_end_handle,
output_frame_start=int(output_frame_start),
output_frame_end=int(output_frame_end),
pixel_aspect=instance.data.get("pixelAspect", 1),
resolution_width=instance.data.get("resolutionWidth"),
resolution_height=instance.data.get("resolutionHeight"),
origin_repre=repre,
input_is_sequence=input_is_sequence,
first_sequence_frame=first_sequence_frame,
input_allow_bg=input_allow_bg,
with_audio=with_audio,
without_handles=without_handles,
handles_are_set=handles_are_set,
input_ext=ext,
explicit_input_paths=[], # absolute paths to rendered files
paths_to_remove=[]
)
def _ffmpeg_arguments(
self,
@ -619,7 +726,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
instance (Instance): Currently processed instance.
new_repre (dict): Representation representing output of this
process.
temp_data (dict): Base data for successful process.
temp_data (TempData): Base data for successful process.
"""
# Get FFmpeg arguments from profile presets
@ -661,31 +768,32 @@ class ExtractReview(pyblish.api.InstancePlugin):
# Set output frames len to 1 when output is single image
if (
temp_data["output_ext_is_image"]
and not temp_data["output_is_sequence"]
temp_data.output_ext_is_image
and not temp_data.output_is_sequence
):
output_frames_len = 1
else:
output_frames_len = (
temp_data["output_frame_end"]
- temp_data["output_frame_start"]
temp_data.output_frame_end
- temp_data.output_frame_start
+ 1
)
duration_seconds = float(output_frames_len / temp_data["fps"])
duration_seconds = float(output_frames_len / temp_data.fps)
# Define which layer should be used
if layer_name:
ffmpeg_input_args.extend(["-layer", layer_name])
if temp_data["input_is_sequence"]:
explicit_input_paths = temp_data.explicit_input_paths
if temp_data.input_is_sequence and not explicit_input_paths:
# Set start frame of input sequence (just frame in filename)
# - definition of input filepath
# - add handle start if output should be without handles
start_number = temp_data["first_sequence_frame"]
if temp_data["without_handles"] and temp_data["handles_are_set"]:
start_number += temp_data["handle_start"]
start_number = temp_data.first_sequence_frame
if temp_data.without_handles and temp_data.handles_are_set:
start_number += temp_data.handle_start
ffmpeg_input_args.extend([
"-start_number", str(start_number)
])
@ -698,32 +806,32 @@ class ExtractReview(pyblish.api.InstancePlugin):
# }
# Add framerate to input when input is sequence
ffmpeg_input_args.extend([
"-framerate", str(temp_data["fps"])
"-framerate", str(temp_data.fps)
])
# Add duration of an input sequence if output is video
if not temp_data["output_is_sequence"]:
if not temp_data.output_is_sequence:
ffmpeg_input_args.extend([
"-to", "{:0.10f}".format(duration_seconds)
])
if temp_data["output_is_sequence"]:
if temp_data.output_is_sequence and not explicit_input_paths:
# Set start frame of output sequence (just frame in filename)
# - this is definition of an output
ffmpeg_output_args.extend([
"-start_number", str(temp_data["output_frame_start"])
"-start_number", str(temp_data.output_frame_start)
])
# Change output's duration and start point if should not contain
# handles
if temp_data["without_handles"] and temp_data["handles_are_set"]:
if temp_data.without_handles and temp_data.handles_are_set:
# Set output duration in seconds
ffmpeg_output_args.extend([
"-t", "{:0.10}".format(duration_seconds)
])
# Add -ss (start offset in seconds) if input is not sequence
if not temp_data["input_is_sequence"]:
start_sec = float(temp_data["handle_start"]) / temp_data["fps"]
if not temp_data.input_is_sequence:
start_sec = float(temp_data.handle_start) / temp_data.fps
# Set start time without handles
# - Skip if start sec is 0.0
if start_sec > 0.0:
@ -732,18 +840,42 @@ class ExtractReview(pyblish.api.InstancePlugin):
])
# Set frame range of output when input or output is sequence
elif temp_data["output_is_sequence"]:
elif temp_data.output_is_sequence:
ffmpeg_output_args.extend([
"-frames:v", str(output_frames_len)
])
# Add video/image input path
ffmpeg_input_args.extend([
"-i", path_to_subprocess_arg(temp_data["full_input_path"])
])
if not explicit_input_paths:
# Add video/image input path
ffmpeg_input_args.extend([
"-i", path_to_subprocess_arg(temp_data.full_input_path)
])
else:
frame_duration = 1 / temp_data.fps
explicit_frames_meta = tempfile.NamedTemporaryFile(
mode="w", prefix="explicit_frames", suffix=".txt", delete=False
)
explicit_frames_meta.close()
explicit_frames_path = explicit_frames_meta.name
with open(explicit_frames_path, "w") as fp:
lines = [
f"file '{path}'{os.linesep}duration {frame_duration}"
for path in temp_data.explicit_input_paths
]
fp.write("\n".join(lines))
temp_data.paths_to_remove.append(explicit_frames_path)
# let ffmpeg use only rendered files, might have gaps
ffmpeg_input_args.extend([
"-f", "concat",
"-safe", "0",
"-i", path_to_subprocess_arg(explicit_frames_path),
"-r", str(temp_data.fps)
])
# Add audio arguments if there are any. Skipped when output are images.
if not temp_data["output_ext_is_image"] and temp_data["with_audio"]:
if not temp_data.output_ext_is_image and temp_data.with_audio:
audio_in_args, audio_filters, audio_out_args = self.audio_args(
instance, temp_data, duration_seconds
)
@ -765,7 +897,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
bg_red, bg_green, bg_blue, bg_alpha = bg_color
if bg_alpha > 0.0:
if not temp_data["input_allow_bg"]:
if not temp_data.input_allow_bg:
self.log.info((
"Output definition has defined BG color input was"
" resolved as does not support adding BG."
@ -796,7 +928,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
# NOTE This must be latest added item to output arguments.
ffmpeg_output_args.append(
path_to_subprocess_arg(temp_data["full_output_path"])
path_to_subprocess_arg(temp_data.full_output_path)
)
return self.ffmpeg_full_args(
@ -880,8 +1012,159 @@ class ExtractReview(pyblish.api.InstancePlugin):
return all_args
def fill_sequence_gaps(self, files, staging_dir, start_frame, end_frame):
# type: (list, str, int, int) -> list
def fill_sequence_gaps_with_previous(
self,
collection: str,
staging_dir: str,
instance: pyblish.plugin.Instance,
current_repre_name: str,
start_frame: int,
end_frame: int
) -> Optional[dict[int, str]]:
"""Tries to replace missing frames from ones from last version"""
repre_file_paths = self._get_last_version_files(
instance, current_repre_name)
if repre_file_paths is None:
# issues in getting last version files, falling back
return None
prev_collection = clique.assemble(
repre_file_paths,
patterns=[clique.PATTERNS["frames"]],
minimum_items=1
)[0][0]
prev_col_format = prev_collection.format("{head}{padding}{tail}")
added_files = {}
anatomy = instance.context.data["anatomy"]
col_format = collection.format("{head}{padding}{tail}")
for frame in range(start_frame, end_frame + 1):
if frame in collection.indexes:
continue
hole_fpath = os.path.join(staging_dir, col_format % frame)
previous_version_path = prev_col_format % frame
previous_version_path = anatomy.fill_root(previous_version_path)
if not os.path.exists(previous_version_path):
self.log.warning(
"Missing frame should be replaced from "
f"'{previous_version_path}' but that doesn't exist. "
"Falling back to filling from currently last rendered."
)
return None
self.log.warning(
f"Replacing missing '{hole_fpath}' with "
f"'{previous_version_path}'"
)
speedcopy.copyfile(previous_version_path, hole_fpath)
added_files[frame] = hole_fpath
return added_files
def _get_last_version_files(
self,
instance: pyblish.plugin.Instance,
current_repre_name: str,
):
product_name = instance.data["productName"]
project_name = instance.data["projectEntity"]["name"]
folder_entity = instance.data["folderEntity"]
version_entity = get_last_version_by_product_name(
project_name,
product_name,
folder_entity["id"],
fields={"id"}
)
if not version_entity:
return None
matching_repres = get_representations(
project_name,
version_ids=[version_entity["id"]],
representation_names=[current_repre_name],
fields={"files"}
)
if not matching_repres:
return None
matching_repre = list(matching_repres)[0]
repre_file_paths = [
file_info["path"]
for file_info in matching_repre["files"]
]
return repre_file_paths
def fill_sequence_gaps_with_blanks(
self,
collection: str,
staging_dir: str,
start_frame: int,
end_frame: int,
resolution_width: int,
resolution_height: int,
extension: str,
temp_data: TempData
) -> Optional[dict[int, str]]:
"""Fills missing files by blank frame."""
blank_frame_path = None
added_files = {}
col_format = collection.format("{head}{padding}{tail}")
for frame in range(start_frame, end_frame + 1):
if frame in collection.indexes:
continue
hole_fpath = os.path.join(staging_dir, col_format % frame)
if blank_frame_path is None:
blank_frame_path = self._create_blank_frame(
staging_dir, extension, resolution_width, resolution_height
)
temp_data.paths_to_remove.append(blank_frame_path)
speedcopy.copyfile(blank_frame_path, hole_fpath)
added_files[frame] = hole_fpath
return added_files
def _create_blank_frame(
self,
staging_dir,
extension,
resolution_width,
resolution_height
):
blank_frame_path = os.path.join(staging_dir, f"blank.{extension}")
command = get_ffmpeg_tool_args(
"ffmpeg",
"-f", "lavfi",
"-i", "color=c=black:s={}x{}:d=1".format(
resolution_width, resolution_height
),
"-tune", "stillimage",
"-frames:v", "1",
blank_frame_path
)
self.log.debug("Executing: {}".format(" ".join(command)))
output = run_subprocess(
command, logger=self.log
)
self.log.debug("Output: {}".format(output))
return blank_frame_path
def fill_sequence_gaps_from_existing(
self,
collection,
staging_dir: str,
start_frame: int,
end_frame: int
) -> dict[int, str]:
"""Fill missing files in sequence by duplicating existing ones.
This will take nearest frame file and copy it with so as to fill
@ -889,40 +1172,33 @@ class ExtractReview(pyblish.api.InstancePlugin):
hole ahead.
Args:
files (list): List of representation files.
collection (clique.collection)
staging_dir (str): Path to staging directory.
start_frame (int): Sequence start (no matter what files are there)
end_frame (int): Sequence end (no matter what files are there)
Returns:
list of added files. Those should be cleaned after work
dict[int, str] of added files. Those should be cleaned after work
is done.
Raises:
KnownPublishError: if more than one collection is obtained.
"""
collections = clique.assemble(files)[0]
if len(collections) != 1:
raise KnownPublishError(
"Multiple collections {} found.".format(collections))
col = collections[0]
# Prepare which hole is filled with what frame
# - the frame is filled only with already existing frames
prev_frame = next(iter(col.indexes))
prev_frame = next(iter(collection.indexes))
hole_frame_to_nearest = {}
for frame in range(int(start_frame), int(end_frame) + 1):
if frame in col.indexes:
if frame in collection.indexes:
prev_frame = frame
else:
# Use previous frame as source for hole
hole_frame_to_nearest[frame] = prev_frame
# Calculate paths
added_files = []
col_format = col.format("{head}{padding}{tail}")
added_files = {}
col_format = collection.format("{head}{padding}{tail}")
for hole_frame, src_frame in hole_frame_to_nearest.items():
hole_fpath = os.path.join(staging_dir, col_format % hole_frame)
src_fpath = os.path.join(staging_dir, col_format % src_frame)
@ -931,11 +1207,11 @@ class ExtractReview(pyblish.api.InstancePlugin):
"Missing previously detected file: {}".format(src_fpath))
speedcopy.copyfile(src_fpath, hole_fpath)
added_files.append(hole_fpath)
added_files[hole_frame] = hole_fpath
return added_files
def input_output_paths(self, new_repre, output_def, temp_data):
def input_output_paths(self, new_repre, output_def, temp_data: TempData):
"""Deduce input nad output file paths based on entered data.
Input may be sequence of images, video file or single image file and
@ -948,11 +1224,11 @@ class ExtractReview(pyblish.api.InstancePlugin):
"sequence_file" (if output is sequence) keys to new representation.
"""
repre = temp_data["origin_repre"]
repre = temp_data.origin_repre
src_staging_dir = repre["stagingDir"]
dst_staging_dir = new_repre["stagingDir"]
if temp_data["input_is_sequence"]:
if temp_data.input_is_sequence:
collections = clique.assemble(repre["files"])[0]
full_input_path = os.path.join(
src_staging_dir,
@ -977,6 +1253,14 @@ class ExtractReview(pyblish.api.InstancePlugin):
# Make sure to have full path to one input file
full_input_path_single_file = full_input_path
filled_files = temp_data.filled_files
if filled_files:
first_frame, first_file = next(iter(filled_files.items()))
if first_file < full_input_path_single_file:
self.log.warning(f"Using filled frame: '{first_file}'")
full_input_path_single_file = first_file
temp_data.first_sequence_frame = first_frame
filename_suffix = output_def["filename_suffix"]
output_ext = output_def.get("ext")
@ -1003,8 +1287,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
)
if output_is_sequence:
new_repre_files = []
frame_start = temp_data["output_frame_start"]
frame_end = temp_data["output_frame_end"]
frame_start = temp_data.output_frame_start
frame_end = temp_data.output_frame_end
filename_base = "{}_{}".format(filename, filename_suffix)
# Temporary template for frame filling. Example output:
@ -1041,18 +1325,18 @@ class ExtractReview(pyblish.api.InstancePlugin):
new_repre["stagingDir"] = dst_staging_dir
# Store paths to temp data
temp_data["full_input_path"] = full_input_path
temp_data["full_input_path_single_file"] = full_input_path_single_file
temp_data["full_output_path"] = full_output_path
temp_data.full_input_path = full_input_path
temp_data.full_input_path_single_file = full_input_path_single_file
temp_data.full_output_path = full_output_path
# Store information about output
temp_data["output_ext_is_image"] = output_ext_is_image
temp_data["output_is_sequence"] = output_is_sequence
temp_data.output_ext_is_image = output_ext_is_image
temp_data.output_is_sequence = output_is_sequence
self.log.debug("Input path {}".format(full_input_path))
self.log.debug("Output path {}".format(full_output_path))
def audio_args(self, instance, temp_data, duration_seconds):
def audio_args(self, instance, temp_data: TempData, duration_seconds):
"""Prepares FFMpeg arguments for audio inputs."""
audio_in_args = []
audio_filters = []
@ -1069,7 +1353,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
frame_start_ftrack = instance.data.get("frameStartFtrack")
if frame_start_ftrack is not None:
offset_frames = frame_start_ftrack - audio["offset"]
offset_seconds = offset_frames / temp_data["fps"]
offset_seconds = offset_frames / temp_data.fps
if offset_seconds > 0:
audio_in_args.append(
@ -1253,7 +1537,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
return output
def rescaling_filters(self, temp_data, output_def, new_repre):
def rescaling_filters(self, temp_data: TempData, output_def, new_repre):
"""Prepare vieo filters based on tags in new representation.
It is possible to add letterboxes to output video or rescale to
@ -1273,7 +1557,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
self.log.debug("reformat_in_baking: `{}`".format(reformat_in_baking))
# NOTE Skipped using instance's resolution
full_input_path_single_file = temp_data["full_input_path_single_file"]
full_input_path_single_file = temp_data.full_input_path_single_file
try:
streams = get_ffprobe_streams(
full_input_path_single_file, self.log
@ -1298,7 +1582,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
break
# Get instance data
pixel_aspect = temp_data["pixel_aspect"]
pixel_aspect = temp_data.pixel_aspect
if reformat_in_baking:
self.log.debug((
"Using resolution from input. It is already "
@ -1332,7 +1616,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
bg_red, bg_green, bg_blue = overscan_color
else:
# Backwards compatibility
bg_red, bg_green, bg_blue, _ = overscan_color
bg_red, bg_green, bg_blue, _ = overscan_color
overscan_color_value = "#{0:0>2X}{1:0>2X}{2:0>2X}".format(
bg_red, bg_green, bg_blue
@ -1393,8 +1677,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
# - use instance resolution only if there were not scale changes
# that may massivelly affect output 'use_input_res'
if not use_input_res and output_width is None or output_height is None:
output_width = temp_data["resolution_width"]
output_height = temp_data["resolution_height"]
output_width = temp_data.resolution_width
output_height = temp_data.resolution_height
# Use source's input resolution instance does not have set it.
if output_width is None or output_height is None:

View file

@ -17,7 +17,7 @@ from ayon_core.lib import (
)
from ayon_core.lib.transcoding import convert_colorspace
from ayon_core.lib.transcoding import VIDEO_EXTENSIONS
from ayon_core.lib.transcoding import VIDEO_EXTENSIONS, IMAGE_EXTENSIONS
class ExtractThumbnail(pyblish.api.InstancePlugin):
@ -39,7 +39,8 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
"nuke",
"aftereffects",
"unreal",
"houdini"
"houdini",
"circuit",
]
enabled = False
@ -162,9 +163,12 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# Store new staging to cleanup paths
instance.context.data["cleanupFullPaths"].append(dst_staging)
thumbnail_created = False
oiio_supported = is_oiio_supported()
thumbnail_created = False
for repre in filtered_repres:
# Reset for each iteration to handle cases where multiple
# reviewable thumbnails are needed
repre_thumb_created = False
repre_files = repre["files"]
src_staging = os.path.normpath(repre["stagingDir"])
if not isinstance(repre_files, (list, tuple)):
@ -213,7 +217,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
)
# If the input can read by OIIO then use OIIO method for
# conversion otherwise use ffmpeg
thumbnail_created = self._create_thumbnail_oiio(
repre_thumb_created = self._create_thumbnail_oiio(
full_input_path,
full_output_path,
colorspace_data
@ -222,21 +226,22 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# Try to use FFMPEG if OIIO is not supported or for cases when
# oiiotool isn't available or representation is not having
# colorspace data
if not thumbnail_created:
if not repre_thumb_created:
if oiio_supported:
self.log.debug(
"Converting with FFMPEG because input"
" can't be read by OIIO."
)
thumbnail_created = self._create_thumbnail_ffmpeg(
repre_thumb_created = self._create_thumbnail_ffmpeg(
full_input_path, full_output_path
)
# Skip representation and try next one if wasn't created
if not thumbnail_created:
if not repre_thumb_created:
continue
thumbnail_created = True
if len(explicit_repres) > 1:
repre_name = "thumbnail_{}".format(repre["outputName"])
else:
@ -331,7 +336,8 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
return need_thumb_repres
def _get_filtered_repres(self, instance):
filtered_repres = []
review_repres = []
other_repres = []
src_repres = instance.data.get("representations") or []
for repre in src_repres:
@ -343,17 +349,36 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# to be published locally
continue
if "review" not in tags:
continue
if not repre.get("files"):
self.log.debug((
"Representation \"{}\" doesn't have files. Skipping"
).format(repre["name"]))
continue
filtered_repres.append(repre)
return filtered_repres
if "review" in tags:
review_repres.append(repre)
elif self._is_valid_images_repre(repre):
other_repres.append(repre)
return review_repres + other_repres
def _is_valid_images_repre(self, repre):
"""Check if representation contains valid image files
Args:
repre (dict): representation
Returns:
bool: whether the representation has the valid image content
"""
# Get first file's extension
first_file = repre["files"]
if isinstance(first_file, (list, tuple)):
first_file = first_file[0]
ext = os.path.splitext(first_file)[1].lower()
return ext in IMAGE_EXTENSIONS or ext in VIDEO_EXTENSIONS
def _create_thumbnail_oiio(
self,
@ -449,7 +474,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# output arguments from presets
jpeg_items.extend(ffmpeg_args.get("output") or [])
# we just want one frame from movie files
jpeg_items.extend(["-vframes", "1"])
jpeg_items.extend(["-frames:v", "1"])
if resolution_arg:
jpeg_items.extend(resolution_arg)
@ -481,27 +506,36 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# Set video input attributes
max_int = str(2147483647)
video_data = get_ffprobe_data(video_file_path, logger=self.log)
# Use duration of the individual streams since it is returned with
# higher decimal precision than 'format.duration'. We need this
# more precise value for calculating the correct amount of frames
# for higher FPS ranges or decimal ranges, e.g. 29.97 FPS
duration = max(
float(stream.get("duration", 0))
for stream in video_data["streams"]
if stream.get("codec_type") == "video"
)
cmd_args = [
"-y",
"-ss", str(duration * self.duration_split),
# Get duration or use a safe default (single frame)
duration = 0
for stream in video_data["streams"]:
if stream.get("codec_type") == "video":
stream_duration = float(stream.get("duration", 0))
if stream_duration > duration:
duration = stream_duration
# For very short videos, just use the first frame
# Calculate seek position safely
seek_position = 0.0
# Only use timestamp calculation for videos longer than 0.1 seconds
if duration > 0.1:
seek_position = duration * self.duration_split
# Build command args
cmd_args = []
if seek_position > 0.0:
cmd_args.extend(["-ss", str(seek_position)])
# Add generic ffmpeg commands
cmd_args.extend([
"-i", video_file_path,
"-analyzeduration", max_int,
"-probesize", max_int,
"-vframes", "1"
]
# add output file path
cmd_args.append(output_thumb_file_path)
"-y",
"-frames:v", "1",
output_thumb_file_path
])
# create ffmpeg command
cmd = get_ffmpeg_tool_args(
@ -512,15 +546,53 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# run subprocess
self.log.debug("Executing: {}".format(" ".join(cmd)))
run_subprocess(cmd, logger=self.log)
self.log.debug(
"Thumbnail created: {}".format(output_thumb_file_path))
return output_thumb_file_path
# Verify the output file was created
if (
os.path.exists(output_thumb_file_path)
and os.path.getsize(output_thumb_file_path) > 0
):
self.log.debug(
"Thumbnail created: {}".format(output_thumb_file_path))
return output_thumb_file_path
self.log.warning("Output file was not created or is empty")
# Try to create thumbnail without offset
# - skip if offset did not happen
if "-ss" not in cmd_args:
return None
self.log.debug("Trying fallback without offset")
# Remove -ss and its value
ss_index = cmd_args.index("-ss")
cmd_args.pop(ss_index) # Remove -ss
cmd_args.pop(ss_index) # Remove the timestamp value
# Create new command and try again
cmd = get_ffmpeg_tool_args("ffmpeg", *cmd_args)
self.log.debug("Fallback command: {}".format(" ".join(cmd)))
run_subprocess(cmd, logger=self.log)
if (
os.path.exists(output_thumb_file_path)
and os.path.getsize(output_thumb_file_path) > 0
):
self.log.debug("Fallback thumbnail created")
return output_thumb_file_path
return None
except RuntimeError as error:
self.log.warning(
"Failed intermediate thumb source using ffmpeg: {}".format(
error)
)
return None
finally:
# Remove output file if is empty
if (
os.path.exists(output_thumb_file_path)
and os.path.getsize(output_thumb_file_path) == 0
):
os.remove(output_thumb_file_path)
def _get_resolution_arg(
self,

View file

@ -170,7 +170,7 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin):
"-analyzeduration", max_int,
"-probesize", max_int,
"-i", src_path,
"-vframes", "1",
"-frames:v", "1",
dst_path
)

View file

@ -619,8 +619,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
# used for all represe
# from temp to final
original_directory = (
instance.data.get("originalDirname") or instance_stagingdir)
instance.data.get("originalDirname") or stagingdir)
_rootless = self.get_rootless_path(anatomy, original_directory)
if _rootless == original_directory:
raise KnownPublishError((
@ -684,7 +683,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
elif is_sequence_representation:
# Collection of files (sequence)
src_collections, remainders = clique.assemble(files)
src_collections, _remainders = clique.assemble(files)
src_collection = src_collections[0]
destination_indexes = list(src_collection.indexes)

View file

@ -0,0 +1,138 @@
import copy
import pyblish.api
from typing import List
from ayon_core.lib import EnumDef
from ayon_core.pipeline import OptionalPyblishPluginMixin
class AttachReviewables(
pyblish.api.InstancePlugin, OptionalPyblishPluginMixin
):
"""Attach reviewable to other instances
This pre-integrator plugin allows instances to be 'attached to' other
instances by moving all its representations over to the other instance.
Even though this technically could work for any representation the current
intent is to use for reviewables only, like e.g. `review` or `render`
product type.
When the reviewable is attached to another instance, the instance itself
will not be published as a separate entity. Instead, the representations
will be copied/moved to the instances it is attached to.
"""
families = ["render", "review"]
order = pyblish.api.IntegratorOrder - 0.499
label = "Attach reviewables"
settings_category = "core"
def process(self, instance):
# TODO: Support farm.
# If instance is being submitted to the farm we should pass through
# the 'attached reviewables' metadata to the farm job
# TODO: Reviewable frame range and resolutions
# Because we are attaching the data to another instance, how do we
# correctly propagate the resolution + frame rate to the other
# instance? Do we even need to?
# TODO: If this were to attach 'renders' to another instance that would
# mean there wouldn't necessarily be a render publish separate as a
# result. Is that correct expected behavior?
attr_values = self.get_attr_values_from_data(instance.data)
attach_to = attr_values.get("attach", [])
if not attach_to:
self.log.debug(
"Reviewable is not set to attach to another instance."
)
return
attach_instances: List[pyblish.api.Instance] = []
for attach_instance_id in attach_to:
# Find the `pyblish.api.Instance` matching the `CreatedInstance.id`
# in the `attach_to` list
attach_instance = next(
(
_inst
for _inst in instance.context
if _inst.data.get("instance_id") == attach_instance_id
),
None,
)
if attach_instance is None:
continue
# Skip inactive instances
if not attach_instance.data.get("active", True):
continue
# For now do not support attaching to 'farm' instances until we
# can pass the 'attaching' on to the farm jobs.
if attach_instance.data.get("farm"):
self.log.warning(
"Attaching to farm instances is not supported yet."
)
continue
attach_instances.append(attach_instance)
instances_names = ", ".join(
instance.name for instance in attach_instances
)
self.log.info(
f"Attaching reviewable to other instances: {instances_names}"
)
# Copy the representations of this reviewable instance to the other
# instance
representations = instance.data.get("representations", [])
for attach_instance in attach_instances:
self.log.info(f"Attaching to {attach_instance.name}")
attach_instance.data.setdefault("representations", []).extend(
copy.deepcopy(representations)
)
# Delete representations on the reviewable instance itself
for repre in representations:
self.log.debug(
"Marking representation as deleted because it was "
f"attached to other instances instead: {repre}"
)
repre.setdefault("tags", []).append("delete")
# Stop integrator from trying to integrate this instance
if attach_to:
instance.data["integrate"] = False
@classmethod
def get_attr_defs_for_instance(cls, create_context, instance):
# TODO: Check if instance is actually a 'reviewable'
# Filtering of instance, if needed, can be customized
if not cls.instance_matches_plugin_families(instance):
return []
items = []
for other_instance in create_context.instances:
if other_instance == instance:
continue
# Do not allow attaching to other reviewable instances
if other_instance.data["productType"] in cls.families:
continue
items.append(
{
"label": other_instance.label,
"value": str(other_instance.id),
}
)
return [
EnumDef(
"attach",
label="Attach reviewable",
multiselection=True,
items=items,
tooltip="Attach this reviewable to another instance",
)
]

View file

@ -1,7 +1,11 @@
import os
import copy
import errno
import itertools
import shutil
from concurrent.futures import ThreadPoolExecutor
from speedcopy import copyfile
import clique
import pyblish.api
@ -13,6 +17,7 @@ from ayon_api.operations import (
from ayon_api.utils import create_entity_id
from ayon_core.lib import create_hard_link, source_hash
from ayon_core.lib.file_transaction import wait_for_future_errors
from ayon_core.pipeline.publish import (
get_publish_template_name,
OptionalPyblishPluginMixin,
@ -415,11 +420,14 @@ class IntegrateHeroVersion(
# Copy(hardlink) paths of source and destination files
# TODO should we *only* create hardlinks?
# TODO should we keep files for deletion until this is successful?
for src_path, dst_path in src_to_dst_file_paths:
self.copy_file(src_path, dst_path)
for src_path, dst_path in other_file_paths_mapping:
self.copy_file(src_path, dst_path)
with ThreadPoolExecutor(max_workers=8) as executor:
futures = [
executor.submit(self.copy_file, src_path, dst_path)
for src_path, dst_path in itertools.chain(
src_to_dst_file_paths, other_file_paths_mapping
)
]
wait_for_future_errors(executor, futures)
# Update prepared representation etity data with files
# and integrate it to server.
@ -648,7 +656,7 @@ class IntegrateHeroVersion(
src_path, dst_path
))
shutil.copy(src_path, dst_path)
copyfile(src_path, dst_path)
def version_from_representations(self, project_name, repres):
for repre in repres:

View file

@ -7,7 +7,7 @@ class IntegrateResourcesPath(pyblish.api.InstancePlugin):
label = "Integrate Resources Path"
order = pyblish.api.IntegratorOrder - 0.05
families = ["clip", "projectfile", "plate"]
families = ["clip", "projectfile", "plate"]
def process(self, instance):
resources = instance.data.get("resources") or []

View file

@ -27,8 +27,10 @@ import collections
import pyblish.api
import ayon_api
from ayon_api import RequestTypes
from ayon_api.operations import OperationsSession
InstanceFilterResult = collections.namedtuple(
"InstanceFilterResult",
["instance", "thumbnail_path", "version_id"]
@ -161,6 +163,30 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
return None
return os.path.normpath(filled_path)
def _create_thumbnail(self, project_name: str, src_filepath: str) -> str:
"""Upload thumbnail to AYON and return its id.
This is temporary fix of 'create_thumbnail' function in ayon_api to
fix jpeg mime type.
"""
mime_type = None
with open(src_filepath, "rb") as stream:
if b"\xff\xd8\xff" == stream.read(3):
mime_type = "image/jpeg"
if mime_type is None:
return ayon_api.create_thumbnail(project_name, src_filepath)
response = ayon_api.upload_file(
f"projects/{project_name}/thumbnails",
src_filepath,
request_type=RequestTypes.post,
headers={"Content-Type": mime_type},
)
response.raise_for_status()
return response.json()["id"]
def _integrate_thumbnails(
self,
filtered_instance_items,
@ -179,7 +205,7 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
).format(instance_label))
continue
thumbnail_id = ayon_api.create_thumbnail(
thumbnail_id = self._create_thumbnail(
project_name, thumbnail_path
)

View file

@ -173,7 +173,6 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
if frame_end is not None:
options["frame_end"] = frame_end
options["label"] = align
self._add_burnin(text, align, options, DRAWTEXT)

View file

@ -175,7 +175,7 @@ class BaseObj:
self.log.warning("Invalid range '{}'".format(part))
continue
for idx in range(sub_parts[0], sub_parts[1]+1):
for idx in range(sub_parts[0], sub_parts[1] + 1):
indexes.append(idx)
return indexes
@ -353,7 +353,6 @@ class BaseObj:
self.items[item.id] = item
item.fill_data_format()
def reset(self):
for item in self.items.values():
item.reset()

View file

@ -282,7 +282,7 @@ class ItemTable(BaseItem):
value.draw(image, drawer)
def value_width(self):
row_heights, col_widths = self.size_values
_row_heights, col_widths = self.size_values
width = 0
for _width in col_widths:
width += _width
@ -292,7 +292,7 @@ class ItemTable(BaseItem):
return width
def value_height(self):
row_heights, col_widths = self.size_values
row_heights, _col_widths = self.size_values
height = 0
for _height in row_heights:
height += _height
@ -569,21 +569,21 @@ class TableField(BaseItem):
@property
def item_pos_x(self):
pos_x, pos_y, width, height = (
pos_x, _pos_y, _width, _height = (
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
)
return pos_x
@property
def item_pos_y(self):
pos_x, pos_y, width, height = (
_pos_x, pos_y, _width, _height = (
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
)
return pos_y
@property
def value_pos_x(self):
pos_x, pos_y, width, height = (
pos_x, _pos_y, width, _height = (
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
)
alignment_hor = self.style["alignment-horizontal"].lower()
@ -605,7 +605,7 @@ class TableField(BaseItem):
@property
def value_pos_y(self):
pos_x, pos_y, width, height = (
_pos_x, pos_y, _width, height = (
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
)

View file

@ -88,14 +88,9 @@ class _AyonSettingsCache:
@classmethod
def _get_variant(cls):
if _AyonSettingsCache.variant is None:
from ayon_core.lib import is_staging_enabled, is_dev_mode_enabled
variant = "production"
if is_dev_mode_enabled():
variant = cls._get_studio_bundle_name()
elif is_staging_enabled():
variant = "staging"
from ayon_core.lib import get_settings_variant
variant = get_settings_variant()
# Cache variant
_AyonSettingsCache.variant = variant

View file

@ -829,6 +829,37 @@ HintedLineEditButton {
}
/* Launcher specific stylesheets */
ActionsView[mode="icon"] {
/* font size can't be set on items */
font-size: 9pt;
border: 0px;
padding: 0px;
margin: 0px;
}
ActionsView[mode="icon"]::item {
padding-top: 8px;
padding-bottom: 4px;
border: 0px;
border-radius: 0.3em;
}
ActionsView[mode="icon"]::item:hover {
color: {color:font-hover};
background: #424A57;
}
ActionsView[mode="icon"]::icon {}
ActionMenuPopup #Wrapper {
border-radius: 0.3em;
background: #353B46;
}
ActionMenuPopup ActionsView[mode="icon"] {
background: transparent;
border: none;
}
#IconView[mode="icon"] {
/* font size can't be set on items */
font-size: 9pt;

View file

@ -1,22 +1,58 @@
from qtpy import QtWidgets
from __future__ import annotations
from typing import Optional
from qtpy import QtWidgets, QtGui
from ayon_core.style import load_stylesheet
from ayon_core.resources import get_ayon_icon_filepath
from ayon_core.lib import AbstractAttrDef
from .widgets import AttributeDefinitionsWidget
class AttributeDefinitionsDialog(QtWidgets.QDialog):
def __init__(self, attr_defs, parent=None):
super(AttributeDefinitionsDialog, self).__init__(parent)
def __init__(
self,
attr_defs: list[AbstractAttrDef],
title: Optional[str] = None,
submit_label: Optional[str] = None,
cancel_label: Optional[str] = None,
submit_icon: Optional[QtGui.QIcon] = None,
cancel_icon: Optional[QtGui.QIcon] = None,
parent: Optional[QtWidgets.QWidget] = None,
):
super().__init__(parent)
if title:
self.setWindowTitle(title)
icon = QtGui.QIcon(get_ayon_icon_filepath())
self.setWindowIcon(icon)
self.setStyleSheet(load_stylesheet())
attrs_widget = AttributeDefinitionsWidget(attr_defs, self)
if submit_label is None:
submit_label = "OK"
if cancel_label is None:
cancel_label = "Cancel"
btns_widget = QtWidgets.QWidget(self)
ok_btn = QtWidgets.QPushButton("OK", btns_widget)
cancel_btn = QtWidgets.QPushButton("Cancel", btns_widget)
cancel_btn = QtWidgets.QPushButton(cancel_label, btns_widget)
submit_btn = QtWidgets.QPushButton(submit_label, btns_widget)
if submit_icon is not None:
submit_btn.setIcon(submit_icon)
if cancel_icon is not None:
cancel_btn.setIcon(cancel_icon)
btns_layout = QtWidgets.QHBoxLayout(btns_widget)
btns_layout.setContentsMargins(0, 0, 0, 0)
btns_layout.addStretch(1)
btns_layout.addWidget(ok_btn, 0)
btns_layout.addWidget(submit_btn, 0)
btns_layout.addWidget(cancel_btn, 0)
main_layout = QtWidgets.QVBoxLayout(self)
@ -24,10 +60,33 @@ class AttributeDefinitionsDialog(QtWidgets.QDialog):
main_layout.addStretch(1)
main_layout.addWidget(btns_widget, 0)
ok_btn.clicked.connect(self.accept)
submit_btn.clicked.connect(self.accept)
cancel_btn.clicked.connect(self.reject)
self._attrs_widget = attrs_widget
self._submit_btn = submit_btn
self._cancel_btn = cancel_btn
def get_values(self):
return self._attrs_widget.current_value()
def set_values(self, values):
self._attrs_widget.set_value(values)
def set_submit_label(self, text: str):
self._submit_btn.setText(text)
def set_submit_icon(self, icon: QtGui.QIcon):
self._submit_btn.setIcon(icon)
def set_submit_visible(self, visible: bool):
self._submit_btn.setVisible(visible)
def set_cancel_label(self, text: str):
self._cancel_btn.setText(text)
def set_cancel_icon(self, icon: QtGui.QIcon):
self._cancel_btn.setIcon(icon)
def set_cancel_visible(self, visible: bool):
self._cancel_btn.setVisible(visible)

View file

@ -22,6 +22,7 @@ from ayon_core.tools.utils import (
FocusSpinBox,
FocusDoubleSpinBox,
MultiSelectionComboBox,
MarkdownLabel,
PlaceholderLineEdit,
PlaceholderPlainTextEdit,
set_style_property,
@ -247,12 +248,10 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
def set_value(self, value):
new_value = copy.deepcopy(value)
unused_keys = set(new_value.keys())
for widget in self._widgets_by_id.values():
attr_def = widget.attr_def
if attr_def.key not in new_value:
continue
unused_keys.remove(attr_def.key)
widget_value = new_value[attr_def.key]
if widget_value is None:
@ -350,7 +349,7 @@ class SeparatorAttrWidget(_BaseAttrDefWidget):
class LabelAttrWidget(_BaseAttrDefWidget):
def _ui_init(self):
input_widget = QtWidgets.QLabel(self)
input_widget = MarkdownLabel(self)
label = self.attr_def.label
if label:
input_widget.setText(str(label))

View file

@ -227,6 +227,9 @@ class HierarchyModel(object):
self._tasks_by_id = NestedCacheItem(
levels=2, default_factory=dict, lifetime=self.lifetime)
self._entity_ids_by_assignee = NestedCacheItem(
levels=2, default_factory=dict, lifetime=self.lifetime)
self._folders_refreshing = set()
self._tasks_refreshing = set()
self._controller = controller
@ -238,6 +241,8 @@ class HierarchyModel(object):
self._task_items.reset()
self._tasks_by_id.reset()
self._entity_ids_by_assignee.reset()
def refresh_project(self, project_name):
"""Force to refresh folder items for a project.
@ -461,6 +466,54 @@ class HierarchyModel(object):
output = self.get_task_entities(project_name, {task_id})
return output[task_id]
def get_entity_ids_for_assignees(
self, project_name: str, assignees: list[str]
):
folder_ids = set()
task_ids = set()
output = {
"folder_ids": folder_ids,
"task_ids": task_ids,
}
assignees = set(assignees)
for assignee in tuple(assignees):
cache = self._entity_ids_by_assignee[project_name][assignee]
if cache.is_valid:
assignees.discard(assignee)
assignee_data = cache.get_data()
folder_ids.update(assignee_data["folder_ids"])
task_ids.update(assignee_data["task_ids"])
if not assignees:
return output
tasks = ayon_api.get_tasks(
project_name,
assignees_all=assignees,
fields={"id", "folderId", "assignees"},
)
tasks_assignee = {}
for task in tasks:
folder_ids.add(task["folderId"])
task_ids.add(task["id"])
for assignee in task["assignees"]:
tasks_assignee.setdefault(assignee, []).append(task)
for assignee, tasks in tasks_assignee.items():
cache = self._entity_ids_by_assignee[project_name][assignee]
assignee_folder_ids = set()
assignee_task_ids = set()
assignee_data = {
"folder_ids": assignee_folder_ids,
"task_ids": assignee_task_ids,
}
for task in tasks:
assignee_folder_ids.add(task["folderId"])
assignee_task_ids.add(task["id"])
cache.update_data(assignee_data)
return output
@contextlib.contextmanager
def _folder_refresh_event_manager(self, project_name, sender):
self._folders_refreshing.add(project_name)

View file

@ -21,8 +21,49 @@ class ThumbnailsModel:
self._folders_cache.reset()
self._versions_cache.reset()
def get_thumbnail_path(self, project_name, thumbnail_id):
return self._get_thumbnail_path(project_name, thumbnail_id)
def get_thumbnail_paths(
self,
project_name,
entity_type,
entity_ids,
):
output = {
entity_id: None
for entity_id in entity_ids
}
if not project_name or not entity_type or not entity_ids:
return output
thumbnail_id_by_entity_id = {}
if entity_type == "folder":
thumbnail_id_by_entity_id = self.get_folder_thumbnail_ids(
project_name, entity_ids
)
elif entity_type == "version":
thumbnail_id_by_entity_id = self.get_version_thumbnail_ids(
project_name, entity_ids
)
if not thumbnail_id_by_entity_id:
return output
entity_ids_by_thumbnail_id = collections.defaultdict(set)
for entity_id, thumbnail_id in thumbnail_id_by_entity_id.items():
if not thumbnail_id:
continue
entity_ids_by_thumbnail_id[thumbnail_id].add(entity_id)
for thumbnail_id, entity_ids in entity_ids_by_thumbnail_id.items():
thumbnail_path = self._get_thumbnail_path(
project_name, entity_type, next(iter(entity_ids)), thumbnail_id
)
if not thumbnail_path:
continue
for entity_id in entity_ids:
output[entity_id] = thumbnail_path
return output
def get_folder_thumbnail_ids(self, project_name, folder_ids):
project_cache = self._folders_cache[project_name]
@ -56,7 +97,13 @@ class ThumbnailsModel:
output[version_id] = cache.get_data()
return output
def _get_thumbnail_path(self, project_name, thumbnail_id):
def _get_thumbnail_path(
self,
project_name,
entity_type,
entity_id,
thumbnail_id
):
if not thumbnail_id:
return None
@ -64,7 +111,12 @@ class ThumbnailsModel:
if thumbnail_id in project_cache:
return project_cache[thumbnail_id]
filepath = get_thumbnail_path(project_name, thumbnail_id)
filepath = get_thumbnail_path(
project_name,
entity_type,
entity_id,
thumbnail_id
)
project_cache[thumbnail_id] = filepath
return filepath

View file

@ -248,4 +248,3 @@ class EnhancedTabBar(QtWidgets.QTabBar):
else:
super().mouseReleaseEvent(event)

View file

@ -492,7 +492,7 @@ def show(parent=None):
try:
module.window.close()
del(module.window)
del module.window
except (AttributeError, RuntimeError):
pass

View file

@ -32,7 +32,7 @@ from qtpy import QtWidgets, QtCore, QtGui
import pyblish.api
from ayon_core import style
TAB = 4* "&nbsp;"
TAB = 4 * "&nbsp;"
HEADER_SIZE = "15px"
KEY_COLOR = QtGui.QColor("#ffffff")
@ -243,7 +243,7 @@ class DebugUI(QtWidgets.QDialog):
self._set_window_title(plugin=result["plugin"])
print(10*"<", result["plugin"].__name__, 10*">")
print(10 * "<", result["plugin"].__name__, 10 * ">")
plugin_order = result["plugin"].order
plugin_name = result["plugin"].__name__

View file

@ -1,4 +1,59 @@
from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Optional, Any
from ayon_core.tools.common_models import (
ProjectItem,
FolderItem,
FolderTypeItem,
TaskItem,
TaskTypeItem,
)
@dataclass
class WebactionContext:
"""Context used for methods related to webactions."""
identifier: str
project_name: str
folder_id: str
task_id: str
addon_name: str
addon_version: str
@dataclass
class ActionItem:
"""Item representing single action to trigger.
Attributes:
action_type (Literal["webaction", "local"]): Type of action.
identifier (str): Unique identifier of action item.
order (int): Action ordering.
label (str): Action label.
variant_label (Union[str, None]): Variant label, full label is
concatenated with space. Actions are grouped under single
action if it has same 'label' and have set 'variant_label'.
full_label (str): Full label, if not set it is generated
from 'label' and 'variant_label'.
icon (dict[str, str]): Icon definition.
addon_name (Optional[str]): Addon name.
addon_version (Optional[str]): Addon version.
config_fields (list[dict]): Config fields for webaction.
"""
action_type: str
identifier: str
order: int
label: str
variant_label: Optional[str]
full_label: str
icon: Optional[dict[str, str]]
config_fields: list[dict]
addon_name: Optional[str] = None
addon_version: Optional[str] = None
class AbstractLauncherCommon(ABC):
@ -88,7 +143,9 @@ class AbstractLauncherBackend(AbstractLauncherCommon):
class AbstractLauncherFrontEnd(AbstractLauncherCommon):
# Entity items for UI
@abstractmethod
def get_project_items(self, sender=None):
def get_project_items(
self, sender: Optional[str] = None
) -> list[ProjectItem]:
"""Project items for all projects.
This function may trigger events 'projects.refresh.started' and
@ -106,7 +163,9 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def get_folder_type_items(self, project_name, sender=None):
def get_folder_type_items(
self, project_name: str, sender: Optional[str] = None
) -> list[FolderTypeItem]:
"""Folder type items for a project.
This function may trigger events with topics
@ -126,7 +185,9 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def get_task_type_items(self, project_name, sender=None):
def get_task_type_items(
self, project_name: str, sender: Optional[str] = None
) -> list[TaskTypeItem]:
"""Task type items for a project.
This function may trigger events with topics
@ -146,7 +207,9 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
pass
@abstractmethod
def get_folder_items(self, project_name, sender=None):
def get_folder_items(
self, project_name: str, sender: Optional[str] = None
) -> list[FolderItem]:
"""Folder items to visualize project hierarchy.
This function may trigger events 'folders.refresh.started' and
@ -160,12 +223,14 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
Returns:
list[FolderItem]: Minimum possible information needed
for visualisation of folder hierarchy.
"""
"""
pass
@abstractmethod
def get_task_items(self, project_name, folder_id, sender=None):
def get_task_items(
self, project_name: str, folder_id: str, sender: Optional[str] = None
) -> list[TaskItem]:
"""Task items.
This function may trigger events 'tasks.refresh.started' and
@ -180,52 +245,52 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
Returns:
list[TaskItem]: Minimum possible information needed
for visualisation of tasks.
"""
"""
pass
@abstractmethod
def get_selected_project_name(self):
def get_selected_project_name(self) -> Optional[str]:
"""Selected project name.
Returns:
Union[str, None]: Selected project name.
"""
"""
pass
@abstractmethod
def get_selected_folder_id(self):
def get_selected_folder_id(self) -> Optional[str]:
"""Selected folder id.
Returns:
Union[str, None]: Selected folder id.
"""
"""
pass
@abstractmethod
def get_selected_task_id(self):
def get_selected_task_id(self) -> Optional[str]:
"""Selected task id.
Returns:
Union[str, None]: Selected task id.
"""
"""
pass
@abstractmethod
def get_selected_task_name(self):
def get_selected_task_name(self) -> Optional[str]:
"""Selected task name.
Returns:
Union[str, None]: Selected task name.
"""
"""
pass
@abstractmethod
def get_selected_context(self):
def get_selected_context(self) -> dict[str, Optional[str]]:
"""Get whole selected context.
Example:
@ -238,34 +303,36 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
Returns:
dict[str, Union[str, None]]: Selected context.
"""
"""
pass
@abstractmethod
def set_selected_project(self, project_name):
def set_selected_project(self, project_name: Optional[str]):
"""Change selected folder.
Args:
project_name (Union[str, None]): Project nameor None if no project
is selected.
"""
"""
pass
@abstractmethod
def set_selected_folder(self, folder_id):
def set_selected_folder(self, folder_id: Optional[str]):
"""Change selected folder.
Args:
folder_id (Union[str, None]): Folder id or None if no folder
is selected.
"""
"""
pass
@abstractmethod
def set_selected_task(self, task_id, task_name):
def set_selected_task(
self, task_id: Optional[str], task_name: Optional[str]
):
"""Change selected task.
Args:
@ -273,13 +340,18 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
is selected.
task_name (Union[str, None]): Task name or None if no task
is selected.
"""
"""
pass
# Actions
@abstractmethod
def get_action_items(self, project_name, folder_id, task_id):
def get_action_items(
self,
project_name: Optional[str],
folder_id: Optional[str],
task_id: Optional[str],
) -> list[ActionItem]:
"""Get action items for given context.
Args:
@ -290,37 +362,74 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
Returns:
list[ActionItem]: List of action items that should be shown
for given context.
"""
"""
pass
@abstractmethod
def trigger_action(self, project_name, folder_id, task_id, action_id):
def trigger_action(
self,
action_id: str,
project_name: Optional[str],
folder_id: Optional[str],
task_id: Optional[str],
):
"""Trigger action on given context.
Args:
action_id (str): Action identifier.
project_name (Union[str, None]): Project name.
folder_id (Union[str, None]): Folder id.
task_id (Union[str, None]): Task id.
action_id (str): Action identifier.
"""
"""
pass
@abstractmethod
def set_application_force_not_open_workfile(
self, project_name, folder_id, task_id, action_ids, enabled
def trigger_webaction(
self,
context: WebactionContext,
action_label: str,
form_data: Optional[dict[str, Any]] = None,
):
"""This is application action related to force not open last workfile.
"""Trigger action on the given context.
Args:
project_name (Union[str, None]): Project name.
folder_id (Union[str, None]): Folder id.
task_id (Union[str, None]): Task id.
action_id (Iterable[str]): Action identifiers.
enabled (bool): New value of force not open workfile.
"""
context (WebactionContext): Webaction context.
action_label (str): Action label.
form_data (Optional[dict[str, Any]]): Form values of action.
"""
pass
@abstractmethod
def get_action_config_values(
self, context: WebactionContext
) -> dict[str, Any]:
"""Get action config values.
Args:
context (WebactionContext): Webaction context.
Returns:
dict[str, Any]: Action config values.
"""
pass
@abstractmethod
def set_action_config_values(
self,
context: WebactionContext,
values: dict[str, Any],
):
"""Set action config values.
Args:
context (WebactionContext): Webaction context.
values (dict[str, Any]): Action config values.
"""
pass
@abstractmethod
@ -340,5 +449,19 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
Triggers 'controller.refresh.actions.started' event at the beginning
and 'controller.refresh.actions.finished' at the end.
"""
pass
@abstractmethod
def get_my_tasks_entity_ids(
self, project_name: str
) -> dict[str, list[str]]:
"""Get entity ids for my tasks.
Args:
project_name (str): Project name.
Returns:
dict[str, list[str]]: Folder and task ids.
"""
pass

View file

@ -1,4 +1,4 @@
from ayon_core.lib import Logger
from ayon_core.lib import Logger, get_ayon_username
from ayon_core.lib.events import QueuedEventSystem
from ayon_core.settings import get_project_settings
from ayon_core.tools.common_models import ProjectsModel, HierarchyModel
@ -6,6 +6,8 @@ from ayon_core.tools.common_models import ProjectsModel, HierarchyModel
from .abstract import AbstractLauncherFrontEnd, AbstractLauncherBackend
from .models import LauncherSelectionModel, ActionsModel
NOT_SET = object()
class BaseLauncherController(
AbstractLauncherFrontEnd, AbstractLauncherBackend
@ -15,6 +17,8 @@ class BaseLauncherController(
self._event_system = None
self._log = None
self._username = NOT_SET
self._selection_model = LauncherSelectionModel(self)
self._projects_model = ProjectsModel(self)
self._hierarchy_model = HierarchyModel(self)
@ -28,7 +32,7 @@ class BaseLauncherController(
@property
def event_system(self):
"""Inner event system for workfiles tool controller.
"""Inner event system for launcher tool controller.
Is used for communication with UI. Event system is created on demand.
@ -131,16 +135,30 @@ class BaseLauncherController(
return self._actions_model.get_action_items(
project_name, folder_id, task_id)
def set_application_force_not_open_workfile(
self, project_name, folder_id, task_id, action_ids, enabled
def trigger_action(
self,
identifier,
project_name,
folder_id,
task_id,
):
self._actions_model.set_application_force_not_open_workfile(
project_name, folder_id, task_id, action_ids, enabled
self._actions_model.trigger_action(
identifier,
project_name,
folder_id,
task_id,
)
def trigger_action(self, project_name, folder_id, task_id, identifier):
self._actions_model.trigger_action(
project_name, folder_id, task_id, identifier)
def trigger_webaction(self, context, action_label, form_data=None):
self._actions_model.trigger_webaction(
context, action_label, form_data
)
def get_action_config_values(self, context):
return self._actions_model.get_action_config_values(context)
def set_action_config_values(self, context, values):
return self._actions_model.set_action_config_values(context, values)
# General methods
def refresh(self):
@ -168,5 +186,19 @@ class BaseLauncherController(
self._emit_event("controller.refresh.actions.finished")
def get_my_tasks_entity_ids(self, project_name: str):
username = self._get_my_username()
assignees = []
if username:
assignees.append(username)
return self._hierarchy_model.get_entity_ids_for_assignees(
project_name, assignees
)
def _get_my_username(self):
if self._username is NOT_SET:
self._username = get_ayon_username()
return self._username
def _emit_event(self, topic, data=None):
self.emit_event(topic, data, "controller")

View file

@ -1,219 +1,47 @@
import os
import uuid
from dataclasses import dataclass, asdict
from urllib.parse import urlencode, urlparse
from typing import Any, Optional
import webbrowser
import ayon_api
from ayon_core import resources
from ayon_core.lib import Logger, AYONSettingsRegistry
from ayon_core.lib import (
Logger,
NestedCacheItem,
CacheItem,
get_settings_variant,
run_detached_ayon_launcher_process,
)
from ayon_core.addon import AddonsManager
from ayon_core.pipeline.actions import (
discover_launcher_actions,
LauncherAction,
LauncherActionSelection,
register_launcher_action_path,
)
from ayon_core.pipeline.workfile import should_use_last_workfile_on_launch
try:
# Available since applications addon 0.2.4
from ayon_applications.action import ApplicationAction
except ImportError:
# Backwards compatibility from 0.3.3 (24/06/10)
# TODO: Remove in future releases
class ApplicationAction(LauncherAction):
"""Action to launch an application.
Application action based on 'ApplicationManager' system.
Handling of applications in launcher is not ideal and should be
completely redone from scratch. This is just a temporary solution
to keep backwards compatibility with AYON launcher.
Todos:
Move handling of errors to frontend.
"""
# Application object
application = None
# Action attributes
name = None
label = None
label_variant = None
group = None
icon = None
color = None
order = 0
data = {}
project_settings = {}
project_entities = {}
_log = None
@property
def log(self):
if self._log is None:
self._log = Logger.get_logger(self.__class__.__name__)
return self._log
def is_compatible(self, selection):
if not selection.is_task_selected:
return False
project_entity = self.project_entities[selection.project_name]
apps = project_entity["attrib"].get("applications")
if not apps or self.application.full_name not in apps:
return False
project_settings = self.project_settings[selection.project_name]
only_available = project_settings["applications"]["only_available"]
if only_available and not self.application.find_executable():
return False
return True
def _show_message_box(self, title, message, details=None):
from qtpy import QtWidgets, QtGui
from ayon_core import style
dialog = QtWidgets.QMessageBox()
icon = QtGui.QIcon(resources.get_ayon_icon_filepath())
dialog.setWindowIcon(icon)
dialog.setStyleSheet(style.load_stylesheet())
dialog.setWindowTitle(title)
dialog.setText(message)
if details:
dialog.setDetailedText(details)
dialog.exec_()
def process(self, selection, **kwargs):
"""Process the full Application action"""
from ayon_applications import (
ApplicationExecutableNotFound,
ApplicationLaunchFailed,
)
try:
self.application.launch(
project_name=selection.project_name,
folder_path=selection.folder_path,
task_name=selection.task_name,
**self.data
)
except ApplicationExecutableNotFound as exc:
details = exc.details
msg = exc.msg
log_msg = str(msg)
if details:
log_msg += "\n" + details
self.log.warning(log_msg)
self._show_message_box(
"Application executable not found", msg, details
)
except ApplicationLaunchFailed as exc:
msg = str(exc)
self.log.warning(msg, exc_info=True)
self._show_message_box("Application launch failed", msg)
from ayon_core.tools.launcher.abstract import ActionItem, WebactionContext
# class Action:
# def __init__(self, label, icon=None, identifier=None):
# self._label = label
# self._icon = icon
# self._callbacks = []
# self._identifier = identifier or uuid.uuid4().hex
# self._checked = True
# self._checkable = False
#
# def set_checked(self, checked):
# self._checked = checked
#
# def set_checkable(self, checkable):
# self._checkable = checkable
#
# def set_label(self, label):
# self._label = label
#
# def add_callback(self, callback):
# self._callbacks = callback
#
#
# class Menu:
# def __init__(self, label, icon=None):
# self.label = label
# self.icon = icon
# self._actions = []
#
# def add_action(self, action):
# self._actions.append(action)
@dataclass
class WebactionForm:
fields: list[dict[str, Any]]
title: str
submit_label: str
submit_icon: str
cancel_label: str
cancel_icon: str
class ActionItem:
"""Item representing single action to trigger.
Todos:
Get rid of application specific logic.
Args:
identifier (str): Unique identifier of action item.
label (str): Action label.
variant_label (Union[str, None]): Variant label, full label is
concatenated with space. Actions are grouped under single
action if it has same 'label' and have set 'variant_label'.
icon (dict[str, str]): Icon definition.
order (int): Action ordering.
is_application (bool): Is action application action.
force_not_open_workfile (bool): Force not open workfile. Application
related.
full_label (Optional[str]): Full label, if not set it is generated
from 'label' and 'variant_label'.
"""
def __init__(
self,
identifier,
label,
variant_label,
icon,
order,
is_application,
force_not_open_workfile,
full_label=None
):
self.identifier = identifier
self.label = label
self.variant_label = variant_label
self.icon = icon
self.order = order
self.is_application = is_application
self.force_not_open_workfile = force_not_open_workfile
self._full_label = full_label
def copy(self):
return self.from_data(self.to_data())
@property
def full_label(self):
if self._full_label is None:
if self.variant_label:
self._full_label = " ".join([self.label, self.variant_label])
else:
self._full_label = self.label
return self._full_label
def to_data(self):
return {
"identifier": self.identifier,
"label": self.label,
"variant_label": self.variant_label,
"icon": self.icon,
"order": self.order,
"is_application": self.is_application,
"force_not_open_workfile": self.force_not_open_workfile,
"full_label": self._full_label,
}
@classmethod
def from_data(cls, data):
return cls(**data)
@dataclass
class WebactionResponse:
response_type: str
success: bool
message: Optional[str] = None
clipboard_text: Optional[str] = None
form: Optional[WebactionForm] = None
error_message: Optional[str] = None
def get_action_icon(action):
@ -264,8 +92,6 @@ class ActionsModel:
controller (AbstractLauncherBackend): Controller instance.
"""
_not_open_workfile_reg_key = "force_not_open_workfile"
def __init__(self, controller):
self._controller = controller
@ -274,11 +100,21 @@ class ActionsModel:
self._discovered_actions = None
self._actions = None
self._action_items = {}
self._launcher_tool_reg = AYONSettingsRegistry("launcher_tool")
self._webaction_items = NestedCacheItem(
levels=2, default_factory=list, lifetime=20,
)
self._addons_manager = None
self._variant = get_settings_variant()
@staticmethod
def calculate_full_label(label: str, variant_label: Optional[str]) -> str:
"""Calculate full label from label and variant_label."""
if variant_label:
return " ".join([label, variant_label])
return label
@property
def log(self):
if self._log is None:
@ -289,39 +125,12 @@ class ActionsModel:
self._discovered_actions = None
self._actions = None
self._action_items = {}
self._webaction_items.reset()
self._controller.emit_event("actions.refresh.started")
self._get_action_objects()
self._controller.emit_event("actions.refresh.finished")
def _should_start_last_workfile(
self,
project_name,
task_id,
identifier,
host_name,
not_open_workfile_actions
):
if identifier in not_open_workfile_actions:
return not not_open_workfile_actions[identifier]
task_name = None
task_type = None
if task_id is not None:
task_entity = self._controller.get_task_entity(
project_name, task_id
)
task_name = task_entity["name"]
task_type = task_entity["taskType"]
output = should_use_last_workfile_on_launch(
project_name,
host_name,
task_name,
task_type
)
return output
def get_action_items(self, project_name, folder_id, task_id):
"""Get actions for project.
@ -332,53 +141,31 @@ class ActionsModel:
Returns:
list[ActionItem]: List of actions.
"""
not_open_workfile_actions = self._get_no_last_workfile_for_context(
project_name, folder_id, task_id)
selection = self._prepare_selection(project_name, folder_id, task_id)
output = []
action_items = self._get_action_items(project_name)
for identifier, action in self._get_action_objects().items():
if not action.is_compatible(selection):
continue
if action.is_compatible(selection):
output.append(action_items[identifier])
output.extend(self._get_webactions(selection))
action_item = action_items[identifier]
# Handling of 'force_not_open_workfile' for applications
if action_item.is_application:
action_item = action_item.copy()
start_last_workfile = self._should_start_last_workfile(
project_name,
task_id,
identifier,
action.application.host_name,
not_open_workfile_actions
)
action_item.force_not_open_workfile = (
not start_last_workfile
)
output.append(action_item)
return output
def set_application_force_not_open_workfile(
self, project_name, folder_id, task_id, action_ids, enabled
def trigger_action(
self,
identifier,
project_name,
folder_id,
task_id,
):
no_workfile_reg_data = self._get_no_last_workfile_reg_data()
project_data = no_workfile_reg_data.setdefault(project_name, {})
folder_data = project_data.setdefault(folder_id, {})
task_data = folder_data.setdefault(task_id, {})
for action_id in action_ids:
task_data[action_id] = enabled
self._launcher_tool_reg.set_item(
self._not_open_workfile_reg_key, no_workfile_reg_data
)
def trigger_action(self, project_name, folder_id, task_id, identifier):
selection = self._prepare_selection(project_name, folder_id, task_id)
failed = False
error_message = None
action_label = identifier
action_items = self._get_action_items(project_name)
trigger_id = uuid.uuid4().hex
try:
action = self._actions[identifier]
action_item = action_items[identifier]
@ -386,22 +173,11 @@ class ActionsModel:
self._controller.emit_event(
"action.trigger.started",
{
"trigger_id": trigger_id,
"identifier": identifier,
"full_label": action_label,
}
)
if isinstance(action, ApplicationAction):
per_action = self._get_no_last_workfile_for_context(
project_name, folder_id, task_id
)
start_last_workfile = self._should_start_last_workfile(
project_name,
task_id,
identifier,
action.application.host_name,
per_action
)
action.data["start_last_workfile"] = start_last_workfile
action.process(selection)
except Exception as exc:
@ -412,6 +188,7 @@ class ActionsModel:
self._controller.emit_event(
"action.trigger.finished",
{
"trigger_id": trigger_id,
"identifier": identifier,
"failed": failed,
"error_message": error_message,
@ -419,32 +196,148 @@ class ActionsModel:
}
)
def trigger_webaction(self, context, action_label, form_data):
entity_type = None
entity_ids = []
identifier = context.identifier
folder_id = context.folder_id
task_id = context.task_id
project_name = context.project_name
addon_name = context.addon_name
addon_version = context.addon_version
if task_id:
entity_type = "task"
entity_ids.append(task_id)
elif folder_id:
entity_type = "folder"
entity_ids.append(folder_id)
query = {
"addonName": addon_name,
"addonVersion": addon_version,
"identifier": identifier,
"variant": self._variant,
}
url = f"actions/execute?{urlencode(query)}"
request_data = {
"projectName": project_name,
"entityType": entity_type,
"entityIds": entity_ids,
}
if form_data is not None:
request_data["formData"] = form_data
trigger_id = uuid.uuid4().hex
failed = False
try:
self._controller.emit_event(
"webaction.trigger.started",
{
"trigger_id": trigger_id,
"identifier": identifier,
"full_label": action_label,
}
)
conn = ayon_api.get_server_api_connection()
# Add 'referer' header to the request
# - ayon-api 1.1.1 adds the value to the header automatically
headers = conn.get_headers()
if "referer" in headers:
headers = None
else:
headers["referer"] = conn.get_base_url()
response = ayon_api.raw_post(
url, headers=headers, json=request_data
)
response.raise_for_status()
handle_response = self._handle_webaction_response(response.data)
except Exception:
failed = True
self.log.warning("Action trigger failed.", exc_info=True)
handle_response = WebactionResponse(
"unknown",
False,
error_message="Failed to trigger webaction.",
)
data = asdict(handle_response)
data.update({
"trigger_failed": failed,
"trigger_id": trigger_id,
"identifier": identifier,
"full_label": action_label,
"project_name": project_name,
"folder_id": folder_id,
"task_id": task_id,
"addon_name": addon_name,
"addon_version": addon_version,
})
self._controller.emit_event(
"webaction.trigger.finished",
data,
)
def get_action_config_values(self, context: WebactionContext):
selection = self._prepare_selection(
context.project_name, context.folder_id, context.task_id
)
if not selection.is_project_selected:
return {}
request_data = self._get_webaction_request_data(selection)
query = {
"addonName": context.addon_name,
"addonVersion": context.addon_version,
"identifier": context.identifier,
"variant": self._variant,
}
url = f"actions/config?{urlencode(query)}"
try:
response = ayon_api.post(url, **request_data)
response.raise_for_status()
except Exception:
self.log.warning(
"Failed to collect webaction config values.",
exc_info=True
)
return {}
return response.data
def set_action_config_values(self, context, values):
selection = self._prepare_selection(
context.project_name, context.folder_id, context.task_id
)
if not selection.is_project_selected:
return {}
request_data = self._get_webaction_request_data(selection)
request_data["value"] = values
query = {
"addonName": context.addon_name,
"addonVersion": context.addon_version,
"identifier": context.identifier,
"variant": self._variant,
}
url = f"actions/config?{urlencode(query)}"
try:
response = ayon_api.post(url, **request_data)
response.raise_for_status()
except Exception:
self.log.warning(
"Failed to store webaction config values.",
exc_info=True
)
def _get_addons_manager(self):
if self._addons_manager is None:
self._addons_manager = AddonsManager()
return self._addons_manager
def _get_no_last_workfile_reg_data(self):
try:
no_workfile_reg_data = self._launcher_tool_reg.get_item(
self._not_open_workfile_reg_key)
except ValueError:
no_workfile_reg_data = {}
self._launcher_tool_reg.set_item(
self._not_open_workfile_reg_key, no_workfile_reg_data)
return no_workfile_reg_data
def _get_no_last_workfile_for_context(
self, project_name, folder_id, task_id
):
not_open_workfile_reg_data = self._get_no_last_workfile_reg_data()
return (
not_open_workfile_reg_data
.get(project_name, {})
.get(folder_id, {})
.get(task_id, {})
)
def _prepare_selection(self, project_name, folder_id, task_id):
project_entity = None
if project_name:
@ -458,6 +351,179 @@ class ActionsModel:
project_settings=project_settings,
)
def _get_webaction_request_data(self, selection: LauncherActionSelection):
if not selection.is_project_selected:
return None
entity_type = None
entity_id = None
entity_subtypes = []
if selection.is_task_selected:
entity_type = "task"
entity_id = selection.task_entity["id"]
entity_subtypes = [selection.task_entity["taskType"]]
elif selection.is_folder_selected:
entity_type = "folder"
entity_id = selection.folder_entity["id"]
entity_subtypes = [selection.folder_entity["folderType"]]
entity_ids = []
if entity_id:
entity_ids.append(entity_id)
project_name = selection.project_name
return {
"projectName": project_name,
"entityType": entity_type,
"entitySubtypes": entity_subtypes,
"entityIds": entity_ids,
}
def _get_webactions(self, selection: LauncherActionSelection):
if not selection.is_project_selected:
return []
request_data = self._get_webaction_request_data(selection)
project_name = selection.project_name
entity_id = None
if request_data["entityIds"]:
entity_id = request_data["entityIds"][0]
cache: CacheItem = self._webaction_items[project_name][entity_id]
if cache.is_valid:
return cache.get_data()
try:
response = ayon_api.post("actions/list", **request_data)
response.raise_for_status()
except Exception:
self.log.warning("Failed to collect webactions.", exc_info=True)
return []
action_items = []
for action in response.data["actions"]:
# NOTE Settings variant may be important for triggering?
# - action["variant"]
icon = action.get("icon")
if icon and icon["type"] == "url":
if not urlparse(icon["url"]).scheme:
icon["type"] = "ayon_url"
config_fields = action.get("configFields") or []
variant_label = action["label"]
group_label = action.get("groupLabel")
if not group_label:
group_label = variant_label
variant_label = None
full_label = self.calculate_full_label(
group_label, variant_label
)
action_items.append(ActionItem(
action_type="webaction",
identifier=action["identifier"],
order=action["order"],
label=group_label,
variant_label=variant_label,
full_label=full_label,
icon=icon,
addon_name=action["addonName"],
addon_version=action["addonVersion"],
config_fields=config_fields,
# category=action["category"],
))
cache.update_data(action_items)
return cache.get_data()
def _handle_webaction_response(self, data) -> WebactionResponse:
response_type = data["type"]
# Backwards compatibility -> 'server' type is not available since
# AYON backend 1.8.3
if response_type == "server":
return WebactionResponse(
response_type,
False,
error_message="Please use AYON web UI to run the action.",
)
payload = data.get("payload") or {}
download_uri = payload.get("extra_download")
if download_uri is not None:
# Find out if is relative or absolute URL
if not urlparse(download_uri).scheme:
ayon_url = ayon_api.get_base_url().rstrip("/")
path = download_uri.lstrip("/")
download_uri = f"{ayon_url}/{path}"
# Use webbrowser to open file
webbrowser.open_new_tab(download_uri)
response = WebactionResponse(
response_type,
data["success"],
data.get("message"),
payload.get("extra_clipboard"),
)
if response_type == "simple":
pass
elif response_type == "redirect":
# NOTE unused 'newTab' key because we always have to
# open new tab from desktop app.
if not webbrowser.open_new_tab(payload["uri"]):
payload.error_message = "Failed to open web browser."
elif response_type == "form":
submit_icon = payload["submit_icon"] or None
cancel_icon = payload["cancel_icon"] or None
if submit_icon:
submit_icon = {
"type": "material-symbols",
"name": submit_icon,
}
if cancel_icon:
cancel_icon = {
"type": "material-symbols",
"name": cancel_icon,
}
response.form = WebactionForm(
fields=payload["fields"],
title=payload["title"],
submit_label=payload["submit_label"],
cancel_label=payload["cancel_label"],
submit_icon=submit_icon,
cancel_icon=cancel_icon,
)
elif response_type == "launcher":
# Run AYON launcher process with uri in arguments
# NOTE This does pass environment variables of current process
# to the subprocess.
# NOTE We could 'take action' directly and use the arguments here
if payload is not None:
uri = payload["uri"]
else:
uri = data["uri"]
run_detached_ayon_launcher_process(uri)
elif response_type in ("query", "navigate"):
response.error_message = (
"Please use AYON web UI to run the action."
)
else:
self.log.warning(
f"Unknown webaction response type '{response_type}'"
)
response.error_message = "Unknown webaction response type."
return response
def _get_discovered_action_classes(self):
if self._discovered_actions is None:
# NOTE We don't need to register the paths, but that would
@ -470,7 +536,6 @@ class ActionsModel:
register_launcher_action_path(path)
self._discovered_actions = (
discover_launcher_actions()
+ self._get_applications_action_classes()
)
return self._discovered_actions
@ -498,62 +563,29 @@ class ActionsModel:
action_items = {}
for identifier, action in self._get_action_objects().items():
is_application = isinstance(action, ApplicationAction)
# Backwards compatibility from 0.3.3 (24/06/10)
# TODO: Remove in future releases
if is_application and hasattr(action, "project_settings"):
if hasattr(action, "project_settings"):
action.project_entities[project_name] = project_entity
action.project_settings[project_name] = project_settings
label = action.label or identifier
variant_label = getattr(action, "label_variant", None)
full_label = self.calculate_full_label(
label, variant_label
)
icon = get_action_icon(action)
item = ActionItem(
identifier,
label,
variant_label,
icon,
action.order,
is_application,
False
action_type="local",
identifier=identifier,
order=action.order,
label=label,
variant_label=variant_label,
full_label=full_label,
icon=icon,
config_fields=[],
)
action_items[identifier] = item
self._action_items[project_name] = action_items
return action_items
def _get_applications_action_classes(self):
addons_manager = self._get_addons_manager()
applications_addon = addons_manager.get_enabled_addon("applications")
if hasattr(applications_addon, "get_applications_action_classes"):
return applications_addon.get_applications_action_classes()
# Backwards compatibility from 0.3.3 (24/06/10)
# TODO: Remove in future releases
actions = []
if applications_addon is None:
return actions
manager = applications_addon.get_applications_manager()
for full_name, application in manager.applications.items():
if not application.enabled:
continue
action = type(
"app_{}".format(full_name),
(ApplicationAction,),
{
"identifier": "application.{}".format(full_name),
"application": application,
"name": application.name,
"label": application.group.label,
"label_variant": application.label,
"group": None,
"icon": application.icon,
"color": getattr(application, "color", None),
"order": getattr(application, "order", None) or 0,
"data": {}
}
)
actions.append(action)
return actions

File diff suppressed because it is too large Load diff

View file

@ -5,17 +5,17 @@ from ayon_core.tools.utils import (
PlaceholderLineEdit,
SquareButton,
RefreshButton,
)
from ayon_core.tools.utils import (
ProjectsCombobox,
FoldersWidget,
TasksWidget,
NiceCheckbox,
)
from ayon_core.tools.utils.lib import checkstate_int_to_enum
class HierarchyPage(QtWidgets.QWidget):
def __init__(self, controller, parent):
super(HierarchyPage, self).__init__(parent)
super().__init__(parent)
# Header
header_widget = QtWidgets.QWidget(self)
@ -43,23 +43,36 @@ class HierarchyPage(QtWidgets.QWidget):
)
content_body.setOrientation(QtCore.Qt.Horizontal)
# - Folders widget with filter
folders_wrapper = QtWidgets.QWidget(content_body)
# - filters
filters_widget = QtWidgets.QWidget(self)
folders_filter_text = PlaceholderLineEdit(folders_wrapper)
folders_filter_text = PlaceholderLineEdit(filters_widget)
folders_filter_text.setPlaceholderText("Filter folders...")
folders_widget = FoldersWidget(controller, folders_wrapper)
my_tasks_tooltip = (
"Filter folders and task to only those you are assigned to."
)
my_tasks_label = QtWidgets.QLabel("My tasks", filters_widget)
my_tasks_label.setToolTip(my_tasks_tooltip)
folders_wrapper_layout = QtWidgets.QVBoxLayout(folders_wrapper)
folders_wrapper_layout.setContentsMargins(0, 0, 0, 0)
folders_wrapper_layout.addWidget(folders_filter_text, 0)
folders_wrapper_layout.addWidget(folders_widget, 1)
my_tasks_checkbox = NiceCheckbox(filters_widget)
my_tasks_checkbox.setChecked(False)
my_tasks_checkbox.setToolTip(my_tasks_tooltip)
filters_layout = QtWidgets.QHBoxLayout(filters_widget)
filters_layout.setContentsMargins(0, 0, 0, 0)
filters_layout.addWidget(folders_filter_text, 1)
filters_layout.addWidget(my_tasks_label, 0)
filters_layout.addWidget(my_tasks_checkbox, 0)
# - Folders widget
folders_widget = FoldersWidget(controller, content_body)
folders_widget.set_header_visible(True)
# - Tasks widget
tasks_widget = TasksWidget(controller, content_body)
content_body.addWidget(folders_wrapper)
content_body.addWidget(folders_widget)
content_body.addWidget(tasks_widget)
content_body.setStretchFactor(0, 100)
content_body.setStretchFactor(1, 65)
@ -67,20 +80,27 @@ class HierarchyPage(QtWidgets.QWidget):
main_layout = QtWidgets.QVBoxLayout(self)
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.addWidget(header_widget, 0)
main_layout.addWidget(filters_widget, 0)
main_layout.addWidget(content_body, 1)
btn_back.clicked.connect(self._on_back_clicked)
refresh_btn.clicked.connect(self._on_refresh_clicked)
folders_filter_text.textChanged.connect(self._on_filter_text_changed)
my_tasks_checkbox.stateChanged.connect(
self._on_my_tasks_checkbox_state_changed
)
self._is_visible = False
self._controller = controller
self._btn_back = btn_back
self._projects_combobox = projects_combobox
self._my_tasks_checkbox = my_tasks_checkbox
self._folders_widget = folders_widget
self._tasks_widget = tasks_widget
self._project_name = None
# Post init
projects_combobox.set_listen_to_selection_change(self._is_visible)
@ -91,10 +111,14 @@ class HierarchyPage(QtWidgets.QWidget):
self._projects_combobox.set_listen_to_selection_change(visible)
if visible and project_name:
self._projects_combobox.set_selection(project_name)
self._project_name = project_name
def refresh(self):
self._folders_widget.refresh()
self._tasks_widget.refresh()
self._on_my_tasks_checkbox_state_changed(
self._my_tasks_checkbox.checkState()
)
def _on_back_clicked(self):
self._controller.set_selected_project(None)
@ -104,3 +128,16 @@ class HierarchyPage(QtWidgets.QWidget):
def _on_filter_text_changed(self, text):
self._folders_widget.set_name_filter(text)
def _on_my_tasks_checkbox_state_changed(self, state):
folder_ids = None
task_ids = None
state = checkstate_int_to_enum(state)
if state == QtCore.Qt.Checked:
entity_ids = self._controller.get_my_tasks_entity_ids(
self._project_name
)
folder_ids = entity_ids["folder_ids"]
task_ids = entity_ids["task_ids"]
self._folders_widget.set_folder_ids_filter(folder_ids)
self._tasks_widget.set_task_ids_filter(task_ids)

View file

@ -1,7 +0,0 @@
import os
RESOURCES_DIR = os.path.dirname(os.path.abspath(__file__))
def get_options_image_path():
return os.path.join(RESOURCES_DIR, "options.png")

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 KiB

View file

@ -1,9 +1,9 @@
from qtpy import QtWidgets, QtCore, QtGui
from ayon_core import style
from ayon_core import resources
from ayon_core import style, resources
from ayon_core.tools.launcher.control import BaseLauncherController
from ayon_core.tools.utils import MessageOverlayObject
from .projects_widget import ProjectsWidget
from .hierarchy_page import HierarchyPage
@ -17,7 +17,7 @@ class LauncherWindow(QtWidgets.QWidget):
page_side_anim_interval = 250
def __init__(self, controller=None, parent=None):
super(LauncherWindow, self).__init__(parent)
super().__init__(parent)
if controller is None:
controller = BaseLauncherController()
@ -41,6 +41,8 @@ class LauncherWindow(QtWidgets.QWidget):
self._controller = controller
overlay_object = MessageOverlayObject(self)
# Main content - Pages & Actions
content_body = QtWidgets.QSplitter(self)
@ -78,26 +80,18 @@ class LauncherWindow(QtWidgets.QWidget):
content_body.setSizes([580, 160])
# Footer
footer_widget = QtWidgets.QWidget(self)
# - Message label
message_label = QtWidgets.QLabel(footer_widget)
# footer_widget = QtWidgets.QWidget(self)
#
# action_history = ActionHistory(footer_widget)
# action_history.setStatusTip("Show Action History")
footer_layout = QtWidgets.QHBoxLayout(footer_widget)
footer_layout.setContentsMargins(0, 0, 0, 0)
footer_layout.addWidget(message_label, 1)
#
# footer_layout = QtWidgets.QHBoxLayout(footer_widget)
# footer_layout.setContentsMargins(0, 0, 0, 0)
# footer_layout.addWidget(action_history, 0)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(content_body, 1)
layout.addWidget(footer_widget, 0)
message_timer = QtCore.QTimer()
message_timer.setInterval(self.message_interval)
message_timer.setSingleShot(True)
# layout.addWidget(footer_widget, 0)
actions_refresh_timer = QtCore.QTimer()
actions_refresh_timer.setInterval(self.refresh_interval)
@ -109,7 +103,6 @@ class LauncherWindow(QtWidgets.QWidget):
page_slide_anim.setEasingCurve(QtCore.QEasingCurve.OutQuad)
projects_page.refreshed.connect(self._on_projects_refresh)
message_timer.timeout.connect(self._on_message_timeout)
actions_refresh_timer.timeout.connect(
self._on_actions_refresh_timeout)
page_slide_anim.valueChanged.connect(
@ -128,6 +121,16 @@ class LauncherWindow(QtWidgets.QWidget):
"action.trigger.finished",
self._on_action_trigger_finished,
)
controller.register_event_callback(
"webaction.trigger.started",
self._on_webaction_trigger_started,
)
controller.register_event_callback(
"webaction.trigger.finished",
self._on_webaction_trigger_finished,
)
self._overlay_object = overlay_object
self._controller = controller
@ -141,11 +144,8 @@ class LauncherWindow(QtWidgets.QWidget):
self._projects_page = projects_page
self._hierarchy_page = hierarchy_page
self._actions_widget = actions_widget
self._message_label = message_label
# self._action_history = action_history
self._message_timer = message_timer
self._actions_refresh_timer = actions_refresh_timer
self._page_slide_anim = page_slide_anim
@ -153,14 +153,14 @@ class LauncherWindow(QtWidgets.QWidget):
self.resize(520, 740)
def showEvent(self, event):
super(LauncherWindow, self).showEvent(event)
super().showEvent(event)
self._window_is_active = True
if not self._actions_refresh_timer.isActive():
self._actions_refresh_timer.start()
self._controller.refresh()
def closeEvent(self, event):
super(LauncherWindow, self).closeEvent(event)
super().closeEvent(event)
self._window_is_active = False
self._actions_refresh_timer.stop()
@ -176,7 +176,7 @@ class LauncherWindow(QtWidgets.QWidget):
self._on_actions_refresh_timeout()
self._actions_refresh_timer.start()
super(LauncherWindow, self).changeEvent(event)
super().changeEvent(event)
def _on_actions_refresh_timeout(self):
# Stop timer if widget is not visible
@ -185,13 +185,6 @@ class LauncherWindow(QtWidgets.QWidget):
else:
self._refresh_on_activate = True
def _echo(self, message):
self._message_label.setText(str(message))
self._message_timer.start()
def _on_message_timeout(self):
self._message_label.setText("")
def _on_project_selection_change(self, event):
project_name = event["project_name"]
self._selected_project_name = project_name
@ -215,13 +208,69 @@ class LauncherWindow(QtWidgets.QWidget):
self._hierarchy_page.refresh()
self._actions_widget.refresh()
def _show_toast_message(self, message, success=True, message_id=None):
message_type = None
if not success:
message_type = "error"
self._overlay_object.add_message(
message, message_type, message_id=message_id
)
def _on_action_trigger_started(self, event):
self._echo("Running action: {}".format(event["full_label"]))
self._show_toast_message(
"Running: {}".format(event["full_label"]),
message_id=event["trigger_id"],
)
def _on_action_trigger_finished(self, event):
if not event["failed"]:
action_label = event["full_label"]
if event["failed"]:
message = f"Failed to run: {action_label}"
else:
message = f"Finished: {action_label}"
self._show_toast_message(
message,
not event["failed"],
message_id=event["trigger_id"],
)
def _on_webaction_trigger_started(self, event):
self._show_toast_message(
"Running: {}".format(event["full_label"]),
message_id=event["trigger_id"],
)
def _on_webaction_trigger_finished(self, event):
clipboard_text = event["clipboard_text"]
if clipboard_text:
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setText(clipboard_text)
action_label = event["full_label"]
# Avoid to show exception message
if event["trigger_failed"]:
self._show_toast_message(
f"Failed to run: {action_label}",
message_id=event["trigger_id"]
)
return
self._echo("Failed: {}".format(event["error_message"]))
# Failed to run webaction, e.g. because of missing webaction handling
# - not reported by server
if event["error_message"]:
self._show_toast_message(
event["error_message"],
success=False,
message_id=event["trigger_id"]
)
return
if event["message"]:
self._show_toast_message(event["message"], event["success"])
if event["form"]:
self._actions_widget.handle_webaction_form_event(event)
def _is_page_slide_anim_running(self):
return (

View file

@ -733,7 +733,12 @@ class FrontendLoaderController(_BaseLoaderController):
pass
@abstractmethod
def get_thumbnail_path(self, project_name, thumbnail_id):
def get_thumbnail_paths(
self,
project_name,
entity_type,
entity_ids
):
"""Get thumbnail path for thumbnail id.
This method should get a path to a thumbnail based on thumbnail id.
@ -742,10 +747,11 @@ class FrontendLoaderController(_BaseLoaderController):
Args:
project_name (str): Project name.
thumbnail_id (str): Thumbnail id.
entity_type (str): Entity type.
entity_ids (set[str]): Entity ids.
Returns:
Union[str, None]: Thumbnail path or None if not found.
dict[str, Union[str, None]]: Thumbnail path by entity id.
"""
pass

View file

@ -259,9 +259,14 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
project_name, version_ids
)
def get_thumbnail_path(self, project_name, thumbnail_id):
return self._thumbnails_model.get_thumbnail_path(
project_name, thumbnail_id
def get_thumbnail_paths(
self,
project_name,
entity_type,
entity_ids,
):
return self._thumbnails_model.get_thumbnail_paths(
project_name, entity_type, entity_ids
)
def change_products_group(self, project_name, product_ids, group_name):

View file

@ -84,15 +84,17 @@ def _get_options(action, action_item, parent):
if not getattr(action, "optioned", False) or not options:
return {}
dialog_title = action.label + " Options"
if isinstance(options[0], AbstractAttrDef):
qargparse_options = False
dialog = AttributeDefinitionsDialog(options, parent)
dialog = AttributeDefinitionsDialog(
options, title=dialog_title, parent=parent
)
else:
qargparse_options = True
dialog = OptionDialog(parent)
dialog.create(options)
dialog.setWindowTitle(action.label + " Options")
dialog.setWindowTitle(dialog_title)
if not dialog.exec_():
return None

View file

@ -1,3 +1,4 @@
from __future__ import annotations
from qtpy import QtGui, QtCore
from ._multicombobox import (

View file

@ -501,38 +501,29 @@ class LoaderWindow(QtWidgets.QWidget):
self._update_thumbnails()
def _update_thumbnails(self):
# TODO make this threaded and show loading animation while running
project_name = self._selected_project_name
thumbnail_ids = set()
entity_type = None
entity_ids = set()
if self._selected_version_ids:
thumbnail_id_by_entity_id = (
self._controller.get_version_thumbnail_ids(
project_name,
self._selected_version_ids
)
)
thumbnail_ids = set(thumbnail_id_by_entity_id.values())
entity_ids = set(self._selected_version_ids)
entity_type = "version"
elif self._selected_folder_ids:
thumbnail_id_by_entity_id = (
self._controller.get_folder_thumbnail_ids(
project_name,
self._selected_folder_ids
)
)
thumbnail_ids = set(thumbnail_id_by_entity_id.values())
entity_ids = set(self._selected_folder_ids)
entity_type = "folder"
thumbnail_ids.discard(None)
if not thumbnail_ids:
self._thumbnails_widget.set_current_thumbnails(None)
return
thumbnail_paths = set()
for thumbnail_id in thumbnail_ids:
thumbnail_path = self._controller.get_thumbnail_path(
project_name, thumbnail_id)
thumbnail_paths.add(thumbnail_path)
thumbnail_path_by_entity_id = self._controller.get_thumbnail_paths(
project_name, entity_type, entity_ids
)
thumbnail_paths = set(thumbnail_path_by_entity_id.values())
thumbnail_paths.discard(None)
self._thumbnails_widget.set_current_thumbnail_paths(thumbnail_paths)
if thumbnail_paths:
self._thumbnails_widget.set_current_thumbnail_paths(
thumbnail_paths
)
else:
self._thumbnails_widget.set_current_thumbnails(None)
def _on_projects_refresh(self):
self._refresh_handler.set_project_refreshed()

View file

@ -1,4 +1,5 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass, asdict
from typing import (
Optional,
Dict,
@ -28,6 +29,19 @@ if TYPE_CHECKING:
from .models import CreatorItem, PublishErrorInfo, InstanceItem
@dataclass
class CommentDef:
"""Comment attribute definition."""
minimum_chars_required: int
def to_data(self):
return asdict(self)
@classmethod
def from_data(cls, data):
return cls(**data)
class CardMessageTypes:
standard = None
info = "info"
@ -135,6 +149,17 @@ class AbstractPublisherCommon(ABC):
pass
@abstractmethod
def get_comment_def(self) -> CommentDef:
"""Get comment attribute definition.
This can define how the Comment field should behave, like having
a minimum amount of required characters before being allowed to
publish.
"""
pass
class AbstractPublisherBackend(AbstractPublisherCommon):
@abstractmethod

View file

@ -20,7 +20,8 @@ from .models import (
from .abstract import (
AbstractPublisherBackend,
AbstractPublisherFrontend,
CardMessageTypes
CardMessageTypes,
CommentDef,
)
@ -601,3 +602,17 @@ class PublisherController(
def _start_publish(self, up_validation):
self._publish_model.set_publish_up_validation(up_validation)
self._publish_model.start_publish(wait=True)
def get_comment_def(self) -> CommentDef:
# Take the cached settings from the Create Context
settings = self.get_create_context().get_current_project_settings()
comment_minimum_required_chars: int = (
settings
.get("core", {})
.get("tools", {})
.get("publish", {})
.get("comment_minimum_required_chars", 0)
)
return CommentDef(
minimum_chars_required=comment_minimum_required_chars
)

View file

@ -461,19 +461,19 @@ class CreateModel:
self._create_context.add_instances_added_callback(
self._cc_added_instance
)
self._create_context.add_instances_removed_callback (
self._create_context.add_instances_removed_callback(
self._cc_removed_instance
)
self._create_context.add_value_changed_callback(
self._cc_value_changed
)
self._create_context.add_pre_create_attr_defs_change_callback (
self._create_context.add_pre_create_attr_defs_change_callback(
self._cc_pre_create_attr_changed
)
self._create_context.add_create_attr_defs_change_callback (
self._create_context.add_create_attr_defs_change_callback(
self._cc_create_attr_changed
)
self._create_context.add_publish_attr_defs_change_callback (
self._create_context.add_publish_attr_defs_change_callback(
self._cc_publish_attr_changed
)

View file

@ -358,7 +358,7 @@ class PublishReportMaker:
exception = result.get("error")
if exception:
fname, line_no, func, exc = exception.traceback
fname, line_no, func, _ = exception.traceback
# Conversion of exception into string may crash
try:

View file

@ -245,6 +245,13 @@ class PublisherWindow(QtWidgets.QDialog):
show_timer.setInterval(1)
show_timer.timeout.connect(self._on_show_timer)
comment_invalid_timer = QtCore.QTimer()
comment_invalid_timer.setSingleShot(True)
comment_invalid_timer.setInterval(2500)
comment_invalid_timer.timeout.connect(
self._on_comment_invalid_timeout
)
errors_dialog_message_timer = QtCore.QTimer()
errors_dialog_message_timer.setInterval(100)
errors_dialog_message_timer.timeout.connect(
@ -395,6 +402,7 @@ class PublisherWindow(QtWidgets.QDialog):
self._app_event_listener_installed = False
self._show_timer = show_timer
self._comment_invalid_timer = comment_invalid_timer
self._show_counter = 0
self._window_is_visible = False
@ -823,15 +831,45 @@ class PublisherWindow(QtWidgets.QDialog):
self._controller.set_comment(self._comment_input.text())
def _on_validate_clicked(self):
if self._save_changes(False):
if self._validate_comment() and self._save_changes(False):
self._set_publish_comment()
self._controller.validate()
def _on_publish_clicked(self):
if self._save_changes(False):
if self._validate_comment() and self._save_changes(False):
self._set_publish_comment()
self._controller.publish()
def _validate_comment(self) -> bool:
# Validate comment length
comment_def = self._controller.get_comment_def()
char_count = len(self._comment_input.text().strip())
if (
comment_def.minimum_chars_required
and char_count < comment_def.minimum_chars_required
):
self._overlay_object.add_message(
"Please enter a comment of at least "
f"{comment_def.minimum_chars_required} characters",
message_type="error"
)
self._invalidate_comment_field()
return False
return True
def _invalidate_comment_field(self):
self._comment_invalid_timer.start()
self._comment_input.setStyleSheet("border-color: #DD2020")
# Set focus so user can start typing and is pointed towards the field
self._comment_input.setFocus()
self._comment_input.setCursorPosition(
len(self._comment_input.text())
)
def _on_comment_invalid_timeout(self):
# Reset style
self._comment_input.setStyleSheet("")
def _set_footer_enabled(self, enabled):
self._save_btn.setEnabled(True)
self._reset_btn.setEnabled(True)

View file

@ -959,11 +959,13 @@ class SceneInventoryView(QtWidgets.QTreeView):
remove_container(container)
self.data_changed.emit()
def _show_version_error_dialog(self, version, item_ids):
def _show_version_error_dialog(self, version, item_ids, exception):
"""Shows QMessageBox when version switch doesn't work
Args:
version: str or int or None
item_ids (Iterable[str]): List of item ids to run the
exception (Exception): Exception that occurred
"""
if version == -1:
version_str = "latest"
@ -988,10 +990,11 @@ class SceneInventoryView(QtWidgets.QTreeView):
dialog.addButton(QtWidgets.QMessageBox.Cancel)
msg = (
"Version update to '{}' failed as representation doesn't exist."
"Version update to '{}' failed with the following error:\n"
"{}."
"\n\nPlease update to version with a valid representation"
" OR \n use 'Switch Folder' button to change folder."
).format(version_str)
).format(version_str, exception)
dialog.setText(msg)
dialog.exec_()
@ -1105,10 +1108,10 @@ class SceneInventoryView(QtWidgets.QTreeView):
container = containers_by_id[item_id]
try:
update_container(container, item_version)
except AssertionError:
except Exception as exc:
log.warning("Update failed", exc_info=True)
self._show_version_error_dialog(
item_version, [item_id]
item_version, [item_id], exc
)
finally:
# Always update the scene inventory view, even if errors occurred

View file

@ -1,12 +0,0 @@
import warnings
from .broker import StdOutBroker
warnings.warn(
(
"Import of 'StdOutBroker' from 'ayon_core.tools.stdout_broker.app'"
" is deprecated. Please use 'ayon_core.tools.stdout_broker' instead."
),
DeprecationWarning
)
__all__ = ("StdOutBroker", )

View file

@ -738,4 +738,3 @@ def main(force=False):
sys.exit(1)
main()

View file

@ -6,6 +6,7 @@ from .widgets import (
CustomTextComboBox,
PlaceholderLineEdit,
PlaceholderPlainTextEdit,
MarkdownLabel,
ElideLabel,
HintedLineEdit,
ExpandingTextEdit,
@ -91,6 +92,7 @@ __all__ = (
"CustomTextComboBox",
"PlaceholderLineEdit",
"PlaceholderPlainTextEdit",
"MarkdownLabel",
"ElideLabel",
"HintedLineEdit",
"ExpandingTextEdit",

View file

@ -14,3 +14,4 @@ except AttributeError:
DEFAULT_PROJECT_LABEL = "< Default >"
PROJECT_NAME_ROLE = QtCore.Qt.UserRole + 101
PROJECT_IS_ACTIVE_ROLE = QtCore.Qt.UserRole + 102
DEFAULT_WEB_ICON_COLOR = "#f4f5f5"

View file

@ -1,4 +1,6 @@
from __future__ import annotations
import collections
from typing import Optional
from qtpy import QtWidgets, QtGui, QtCore
@ -33,7 +35,10 @@ class FoldersQtModel(QtGui.QStandardItemModel):
refreshed = QtCore.Signal()
def __init__(self, controller):
super(FoldersQtModel, self).__init__()
super().__init__()
self.setColumnCount(1)
self.setHeaderData(0, QtCore.Qt.Horizontal, "Folders")
self._controller = controller
self._items_by_id = {}
@ -334,6 +339,29 @@ class FoldersQtModel(QtGui.QStandardItemModel):
self.refreshed.emit()
class FoldersProxyModel(RecursiveSortFilterProxyModel):
def __init__(self):
super().__init__()
self._folder_ids_filter = None
def set_folder_ids_filter(self, folder_ids: Optional[list[str]]):
if self._folder_ids_filter == folder_ids:
return
self._folder_ids_filter = folder_ids
self.invalidateFilter()
def filterAcceptsRow(self, row, parent_index):
if self._folder_ids_filter is not None:
if not self._folder_ids_filter:
return False
source_index = self.sourceModel().index(row, 0, parent_index)
folder_id = source_index.data(FOLDER_ID_ROLE)
if folder_id not in self._folder_ids_filter:
return False
return super().filterAcceptsRow(row, parent_index)
class FoldersWidget(QtWidgets.QWidget):
"""Folders widget.
@ -369,13 +397,13 @@ class FoldersWidget(QtWidgets.QWidget):
refreshed = QtCore.Signal()
def __init__(self, controller, parent, handle_expected_selection=False):
super(FoldersWidget, self).__init__(parent)
super().__init__(parent)
folders_view = TreeView(self)
folders_view.setHeaderHidden(True)
folders_model = FoldersQtModel(controller)
folders_proxy_model = RecursiveSortFilterProxyModel()
folders_proxy_model = FoldersProxyModel()
folders_proxy_model.setSourceModel(folders_model)
folders_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)
@ -446,6 +474,18 @@ class FoldersWidget(QtWidgets.QWidget):
if name:
self._folders_view.expandAll()
def set_folder_ids_filter(self, folder_ids: Optional[list[str]]):
"""Set filter of folder ids.
Args:
folder_ids (list[str]): The list of folder ids.
"""
self._folders_proxy_model.set_folder_ids_filter(folder_ids)
def set_header_visible(self, visible: bool):
self._folders_view.setHeaderHidden(not visible)
def refresh(self):
"""Refresh folders model.

View file

@ -1,11 +1,14 @@
import os
import sys
import io
import contextlib
import collections
import traceback
import urllib.request
from functools import partial
from typing import Union, Any
import ayon_api
from qtpy import QtWidgets, QtCore, QtGui
import qtawesome
import qtmaterialsymbols
@ -17,7 +20,12 @@ from ayon_core.style import (
from ayon_core.resources import get_image_path
from ayon_core.lib import Logger
from .constants import CHECKED_INT, UNCHECKED_INT, PARTIALLY_CHECKED_INT
from .constants import (
CHECKED_INT,
UNCHECKED_INT,
PARTIALLY_CHECKED_INT,
DEFAULT_WEB_ICON_COLOR,
)
log = Logger.get_logger(__name__)
@ -480,11 +488,27 @@ class _IconsCache:
if icon_type == "path":
parts = [icon_type, icon_def["path"]]
elif icon_type in {"awesome-font", "material-symbols"}:
color = icon_def["color"] or ""
elif icon_type == "awesome-font":
color = icon_def.get("color") or ""
if isinstance(color, QtGui.QColor):
color = color.name()
parts = [icon_type, icon_def["name"] or "", color]
elif icon_type == "material-symbols":
color = icon_def.get("color") or DEFAULT_WEB_ICON_COLOR
if isinstance(color, QtGui.QColor):
color = color.name()
parts = [icon_type, icon_def["name"] or "", color]
elif icon_type in {"url", "ayon_url"}:
parts = [icon_type, icon_def["url"]]
elif icon_type == "transparent":
size = icon_def.get("size")
if size is None:
size = 256
parts = [icon_type, str(size)]
return "|".join(parts)
@classmethod
@ -505,7 +529,7 @@ class _IconsCache:
elif icon_type == "awesome-font":
icon_name = icon_def["name"]
icon_color = icon_def["color"]
icon_color = icon_def.get("color")
icon = cls.get_qta_icon_by_name_and_color(icon_name, icon_color)
if icon is None:
icon = cls.get_qta_icon_by_name_and_color(
@ -513,10 +537,40 @@ class _IconsCache:
elif icon_type == "material-symbols":
icon_name = icon_def["name"]
icon_color = icon_def["color"]
icon_color = icon_def.get("color") or DEFAULT_WEB_ICON_COLOR
if qtmaterialsymbols.get_icon_name_char(icon_name) is not None:
icon = qtmaterialsymbols.get_icon(icon_name, icon_color)
elif icon_type == "url":
url = icon_def["url"]
try:
content = urllib.request.urlopen(url).read()
pix = QtGui.QPixmap()
pix.loadFromData(content)
icon = QtGui.QIcon(pix)
except Exception:
log.warning(
"Failed to download image '%s'", url, exc_info=True
)
icon = None
elif icon_type == "ayon_url":
url = icon_def["url"].lstrip("/")
url = f"{ayon_api.get_base_url()}/{url}"
stream = io.BytesIO()
ayon_api.download_file_to_stream(url, stream)
pix = QtGui.QPixmap()
pix.loadFromData(stream.getvalue())
icon = QtGui.QIcon(pix)
elif icon_type == "transparent":
size = icon_def.get("size")
if size is None:
size = 256
pix = QtGui.QPixmap(size, size)
pix.fill(QtCore.Qt.transparent)
icon = QtGui.QIcon(pix)
if icon is None:
icon = cls.get_default()
cls._cache[cache_key] = icon

View file

@ -350,21 +350,21 @@ class ProjectSortFilterProxy(QtCore.QSortFilterProxyModel):
if project_name is None:
return True
string_pattern = self.filterRegularExpression().pattern()
if string_pattern:
return string_pattern.lower() in project_name.lower()
# Current project keep always visible
default = super(ProjectSortFilterProxy, self).filterAcceptsRow(
source_row, source_parent
)
if not default:
return default
# Make sure current project is visible
if index.data(PROJECT_IS_CURRENT_ROLE):
return True
default = super().filterAcceptsRow(source_row, source_parent)
if not default:
return default
string_pattern = self.filterRegularExpression().pattern()
if (
string_pattern
and string_pattern.lower() not in project_name.lower()
):
return False
if (
self._filter_inactive
and not index.data(PROJECT_IS_ACTIVE_ROLE)

View file

@ -1,3 +1,6 @@
from __future__ import annotations
from typing import Optional
from qtpy import QtWidgets, QtGui, QtCore
from ayon_core.style import (
@ -343,6 +346,29 @@ class TasksQtModel(QtGui.QStandardItemModel):
return self._has_content
class TasksProxyModel(QtCore.QSortFilterProxyModel):
def __init__(self):
super().__init__()
self._task_ids_filter: Optional[set[str]] = None
def set_task_ids_filter(self, task_ids: Optional[set[str]]):
if self._task_ids_filter == task_ids:
return
self._task_ids_filter = task_ids
self.invalidateFilter()
def filterAcceptsRow(self, row, parent_index):
if self._task_ids_filter is not None:
if not self._task_ids_filter:
return False
source_index = self.sourceModel().index(row, 0, parent_index)
task_id = source_index.data(ITEM_ID_ROLE)
if task_id is not None and task_id not in self._task_ids_filter:
return False
return super().filterAcceptsRow(row, parent_index)
class TasksWidget(QtWidgets.QWidget):
"""Tasks widget.
@ -364,7 +390,7 @@ class TasksWidget(QtWidgets.QWidget):
tasks_view.setIndentation(0)
tasks_model = TasksQtModel(controller)
tasks_proxy_model = QtCore.QSortFilterProxyModel()
tasks_proxy_model = TasksProxyModel()
tasks_proxy_model.setSourceModel(tasks_model)
tasks_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)
@ -490,6 +516,15 @@ class TasksWidget(QtWidgets.QWidget):
)
return True
def set_task_ids_filter(self, task_ids: Optional[list[str]]):
"""Set filter of folder ids.
Args:
task_ids (list[str]): The list of folder ids.
"""
self._tasks_proxy_model.set_task_ids_filter(task_ids)
def _on_tasks_refresh_finished(self, event):
"""Tasks were refreshed in controller.
@ -540,7 +575,7 @@ class TasksWidget(QtWidgets.QWidget):
if self._tasks_model.is_refreshing:
return
parent_id, task_id, task_name, _ = self._get_selected_item_ids()
_parent_id, task_id, task_name, _ = self._get_selected_item_ids()
self._controller.set_selected_task(task_id, task_name)
self.selection_changed.emit()

Some files were not shown because too many files have changed in this diff Show more