Merge branch 'develop' into enhancement/AY-6086_adding-parents-to-template-keys

This commit is contained in:
Jakub Trllo 2024-11-21 18:28:06 +01:00 committed by GitHub
commit f661808ccd
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
160 changed files with 18737 additions and 5917 deletions

View file

@ -1,6 +1,6 @@
name: Bug Report
description: File a bug report
title: 'Your issue title here'
title: Your issue title here
labels:
- 'type: bug'
body:
@ -36,6 +36,16 @@ body:
description: What version are you running? Look to AYON Tray
options:
- 1.0.0
- 0.4.4
- 0.4.3
- 0.4.2
- 0.4.1
- 0.4.0
- 0.3.2
- 0.3.1
- 0.3.0
- 0.2.1
- 0.2.0
validations:
required: true
- type: dropdown

25
.github/workflows/release_trigger.yml vendored Normal file
View file

@ -0,0 +1,25 @@
name: 🚀 Release Trigger
on:
workflow_dispatch:
inputs:
draft:
type: boolean
description: "Create Release Draft"
required: false
default: false
release_overwrite:
type: string
description: "Set Version Release Tag"
required: false
jobs:
call-release-trigger:
uses: ynput/ops-repo-automation/.github/workflows/release_trigger.yml@main
with:
draft: ${{ inputs.draft }}
release_overwrite: ${{ inputs.release_overwrite }}
secrets:
token: ${{ secrets.YNPUT_BOT_TOKEN }}
email: ${{ secrets.CI_EMAIL }}
user: ${{ secrets.CI_USER }}

View file

@ -0,0 +1,16 @@
name: 📤 Upload to Ynput Cloud
on:
workflow_dispatch:
release:
types: [published]
jobs:
call-upload-to-ynput-cloud:
uses: ynput/ops-repo-automation/.github/workflows/upload_to_ynput_cloud.yml@main
secrets:
CI_EMAIL: ${{ secrets.CI_EMAIL }}
CI_USER: ${{ secrets.CI_USER }}
YNPUT_BOT_TOKEN: ${{ secrets.YNPUT_BOT_TOKEN }}
YNPUT_CLOUD_URL: ${{ secrets.YNPUT_CLOUD_URL }}
YNPUT_CLOUD_TOKEN: ${{ secrets.YNPUT_CLOUD_TOKEN }}

View file

@ -9,10 +9,6 @@ AYON_CORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# -------------------------
PACKAGE_DIR = AYON_CORE_ROOT
PLUGINS_DIR = os.path.join(AYON_CORE_ROOT, "plugins")
AYON_SERVER_ENABLED = True
# Indicate if AYON entities should be used instead of OpenPype entities
USE_AYON_ENTITIES = True
# -------------------------
@ -23,6 +19,4 @@ __all__ = (
"AYON_CORE_ROOT",
"PACKAGE_DIR",
"PLUGINS_DIR",
"AYON_SERVER_ENABLED",
"USE_AYON_ENTITIES",
)

View file

@ -9,11 +9,18 @@ from .interfaces import (
)
from .base import (
ProcessPreparationError,
ProcessContext,
AYONAddon,
AddonsManager,
load_addons,
)
from .utils import (
ensure_addons_are_process_context_ready,
ensure_addons_are_process_ready,
)
__all__ = (
"click_wrap",
@ -24,7 +31,12 @@ __all__ = (
"ITrayService",
"IHostAddon",
"ProcessPreparationError",
"ProcessContext",
"AYONAddon",
"AddonsManager",
"load_addons",
"ensure_addons_are_process_context_ready",
"ensure_addons_are_process_ready",
)

View file

@ -10,13 +10,18 @@ import threading
import collections
from uuid import uuid4
from abc import ABC, abstractmethod
from typing import Optional
import appdirs
import ayon_api
from semver import VersionInfo
from ayon_core import AYON_CORE_ROOT
from ayon_core.lib import Logger, is_dev_mode_enabled
from ayon_core.lib import (
Logger,
is_dev_mode_enabled,
get_launcher_storage_dir,
is_headless_mode_enabled,
)
from ayon_core.settings import get_studio_settings
from .interfaces import (
@ -31,9 +36,6 @@ IGNORED_FILENAMES = {
# Files ignored on addons import from "./ayon_core/modules"
IGNORED_DEFAULT_FILENAMES = {
"__init__.py",
"base.py",
"interfaces.py",
"click_wrap.py",
}
# When addon was moved from ayon-core codebase
@ -64,77 +66,65 @@ MOVED_ADDON_MILESTONE_VERSIONS = {
}
# Inherit from `object` for Python 2 hosts
class _ModuleClass(object):
"""Fake module class for storing AYON addons.
class ProcessPreparationError(Exception):
"""Exception that can be used when process preparation failed.
The message is shown to user (either as UI dialog or printed). If
different error is raised a "generic" error message is shown to user
with option to copy error message to clipboard.
Object of this class can be stored to `sys.modules` and used for storing
dynamically imported modules.
"""
pass
def __init__(self, name):
# Call setattr on super class
super(_ModuleClass, self).__setattr__("name", name)
super(_ModuleClass, self).__setattr__("__name__", name)
# Where modules and interfaces are stored
super(_ModuleClass, self).__setattr__("__attributes__", dict())
super(_ModuleClass, self).__setattr__("__defaults__", set())
class ProcessContext:
"""Hold context of process that is going to be started.
super(_ModuleClass, self).__setattr__("_log", None)
Right now the context is simple, having information about addon that wants
to trigger preparation and possibly project name for which it should
happen.
def __getattr__(self, attr_name):
if attr_name not in self.__attributes__:
if attr_name in ("__path__", "__file__"):
return None
raise AttributeError("'{}' has not attribute '{}'".format(
self.name, attr_name
))
return self.__attributes__[attr_name]
Preparation for process can be required for ayon-core or any other addon.
It can be, change of environment variables, or request login to
a project management.
def __iter__(self):
for module in self.values():
yield module
At the moment of creation is 'ProcessContext' only data holder, but that
might change in future if there will be need.
def __setattr__(self, attr_name, value):
if attr_name in self.__attributes__:
self.log.warning(
"Duplicated name \"{}\" in {}. Overriding.".format(
attr_name, self.name
)
)
self.__attributes__[attr_name] = value
Args:
addon_name (str): Addon name which triggered process.
addon_version (str): Addon version which triggered process.
project_name (Optional[str]): Project name. Can be filled in case
process is triggered for specific project. Some addons can have
different behavior based on project. Value is NOT autofilled.
headless (Optional[bool]): Is process running in headless mode. Value
is filled with value based on state set in AYON launcher.
def __setitem__(self, key, value):
self.__setattr__(key, value)
"""
def __init__(
self,
addon_name: str,
addon_version: str,
project_name: Optional[str] = None,
headless: Optional[bool] = None,
**kwargs,
):
if headless is None:
headless = is_headless_mode_enabled()
self.addon_name: str = addon_name
self.addon_version: str = addon_version
self.project_name: Optional[str] = project_name
self.headless: bool = headless
def __getitem__(self, key):
return getattr(self, key)
@property
def log(self):
if self._log is None:
super(_ModuleClass, self).__setattr__(
"_log", Logger.get_logger(self.name)
)
return self._log
def get(self, key, default=None):
return self.__attributes__.get(key, default)
def keys(self):
return self.__attributes__.keys()
def values(self):
return self.__attributes__.values()
def items(self):
return self.__attributes__.items()
if kwargs:
unknown_keys = ", ".join([f'"{key}"' for key in kwargs.keys()])
print(f"Unknown keys in ProcessContext: {unknown_keys}")
class _LoadCache:
addons_lock = threading.Lock()
addons_loaded = False
addon_modules = []
def load_addons(force=False):
@ -248,7 +238,7 @@ def _handle_moved_addons(addon_name, milestone_version, log):
return addon_dir
def _load_ayon_addons(openpype_modules, modules_key, log):
def _load_ayon_addons(log):
"""Load AYON addons based on information from server.
This function should not trigger downloading of any addons but only use
@ -256,30 +246,18 @@ def _load_ayon_addons(openpype_modules, modules_key, log):
development).
Args:
openpype_modules (_ModuleClass): Module object where modules are
stored.
modules_key (str): Key under which will be modules imported in
`sys.modules`.
log (logging.Logger): Logger object.
Returns:
List[str]: List of v3 addons to skip to load because v4 alternative is
imported.
"""
addons_to_skip_in_core = []
all_addon_modules = []
bundle_info = _get_ayon_bundle_data()
addons_info = _get_ayon_addons_information(bundle_info)
if not addons_info:
return addons_to_skip_in_core
return all_addon_modules
addons_dir = os.environ.get("AYON_ADDONS_DIR")
if not addons_dir:
addons_dir = os.path.join(
appdirs.user_data_dir("AYON", "Ynput"),
"addons"
)
addons_dir = get_launcher_storage_dir("addons")
dev_mode_enabled = is_dev_mode_enabled()
dev_addons_info = {}
@ -298,7 +276,7 @@ def _load_ayon_addons(openpype_modules, modules_key, log):
addon_version = addon_info["version"]
# core addon does not have any addon object
if addon_name in ("openpype", "core"):
if addon_name == "core":
continue
dev_addon_info = dev_addons_info.get(addon_name, {})
@ -337,7 +315,7 @@ def _load_ayon_addons(openpype_modules, modules_key, log):
continue
sys.path.insert(0, addon_dir)
imported_modules = []
addon_modules = []
for name in os.listdir(addon_dir):
# Ignore of files is implemented to be able to run code from code
# where usually is more files than just the addon
@ -364,7 +342,7 @@ def _load_ayon_addons(openpype_modules, modules_key, log):
inspect.isclass(attr)
and issubclass(attr, AYONAddon)
):
imported_modules.append(mod)
addon_modules.append(mod)
break
except BaseException:
@ -373,50 +351,37 @@ def _load_ayon_addons(openpype_modules, modules_key, log):
exc_info=True
)
if not imported_modules:
if not addon_modules:
log.warning("Addon {} {} has no content to import".format(
addon_name, addon_version
))
continue
if len(imported_modules) > 1:
if len(addon_modules) > 1:
log.warning((
"Skipping addon '{}'."
" Multiple modules were found ({}) in dir {}."
"Multiple modules ({}) were found in addon '{}' in dir {}."
).format(
", ".join([m.__name__ for m in addon_modules]),
addon_name,
", ".join([m.__name__ for m in imported_modules]),
addon_dir,
))
continue
all_addon_modules.extend(addon_modules)
mod = imported_modules[0]
addon_alias = getattr(mod, "V3_ALIAS", None)
if not addon_alias:
addon_alias = addon_name
addons_to_skip_in_core.append(addon_alias)
new_import_str = "{}.{}".format(modules_key, addon_alias)
sys.modules[new_import_str] = mod
setattr(openpype_modules, addon_alias, mod)
return addons_to_skip_in_core
return all_addon_modules
def _load_addons_in_core(
ignore_addon_names, openpype_modules, modules_key, log
):
def _load_addons_in_core(log):
# Add current directory at first place
# - has small differences in import logic
addon_modules = []
modules_dir = os.path.join(AYON_CORE_ROOT, "modules")
if not os.path.exists(modules_dir):
log.warning(
f"Could not find path when loading AYON addons \"{modules_dir}\""
)
return
return addon_modules
ignored_filenames = IGNORED_FILENAMES | IGNORED_DEFAULT_FILENAMES
for filename in os.listdir(modules_dir):
# Ignore filenames
if filename in ignored_filenames:
@ -425,9 +390,6 @@ def _load_addons_in_core(
fullpath = os.path.join(modules_dir, filename)
basename, ext = os.path.splitext(filename)
if basename in ignore_addon_names:
continue
# Validations
if os.path.isdir(fullpath):
# Check existence of init file
@ -446,69 +408,43 @@ def _load_addons_in_core(
# - check manifest and content of manifest
try:
# Don't import dynamically current directory modules
new_import_str = f"{modules_key}.{basename}"
import_str = f"ayon_core.modules.{basename}"
default_module = __import__(import_str, fromlist=("", ))
sys.modules[new_import_str] = default_module
setattr(openpype_modules, basename, default_module)
addon_modules.append(default_module)
except Exception:
log.error(
f"Failed to import in-core addon '{basename}'.",
exc_info=True
)
return addon_modules
def _load_addons():
# Key under which will be modules imported in `sys.modules`
modules_key = "openpype_modules"
# Change `sys.modules`
sys.modules[modules_key] = openpype_modules = _ModuleClass(modules_key)
log = Logger.get_logger("AddonsLoader")
ignore_addon_names = _load_ayon_addons(
openpype_modules, modules_key, log
)
_load_addons_in_core(
ignore_addon_names, openpype_modules, modules_key, log
)
addon_modules = _load_ayon_addons(log)
# All addon in 'modules' folder are tray actions and should be moved
# to tray tool.
# TODO remove
addon_modules.extend(_load_addons_in_core(log))
_MARKING_ATTR = "_marking"
def mark_func(func):
"""Mark function to be used in report.
Args:
func (Callable): Function to mark.
Returns:
Callable: Marked function.
"""
setattr(func, _MARKING_ATTR, True)
return func
def is_func_marked(func):
return getattr(func, _MARKING_ATTR, False)
# Store modules to local cache
_LoadCache.addon_modules = addon_modules
class AYONAddon(ABC):
"""Base class of AYON addon.
Attributes:
id (UUID): Addon object id.
enabled (bool): Is addon enabled.
name (str): Addon name.
Args:
manager (AddonsManager): Manager object who discovered addon.
settings (dict[str, Any]): AYON settings.
"""
"""
enabled = True
_id = None
@ -528,8 +464,8 @@ class AYONAddon(ABC):
Returns:
str: Object id.
"""
"""
if self._id is None:
self._id = uuid4()
return self._id
@ -541,8 +477,8 @@ class AYONAddon(ABC):
Returns:
str: Addon name.
"""
"""
pass
@property
@ -573,18 +509,40 @@ class AYONAddon(ABC):
Args:
settings (dict[str, Any]): Settings.
"""
"""
pass
@mark_func
def connect_with_addons(self, enabled_addons):
"""Connect with other enabled addons.
Args:
enabled_addons (list[AYONAddon]): Addons that are enabled.
"""
"""
pass
def ensure_is_process_ready(
self, process_context: ProcessContext
):
"""Make sure addon is prepared for a process.
This method is called when some action makes sure that addon has set
necessary data. For example if user should be logged in
and filled credentials in environment variables this method should
ask user for credentials.
Implementation of this method is optional.
Note:
The logic can be similar to logic in tray, but tray does not require
to be logged in.
Args:
process_context (ProcessContext): Context of child
process.
"""
pass
def get_global_environments(self):
@ -594,8 +552,8 @@ class AYONAddon(ABC):
Returns:
dict[str, str]: Environment variables.
"""
"""
return {}
def modify_application_launch_arguments(self, application, env):
@ -607,8 +565,8 @@ class AYONAddon(ABC):
Args:
application (Application): Application that is launched.
env (dict[str, str]): Current environment variables.
"""
"""
pass
def on_host_install(self, host, host_name, project_name):
@ -627,8 +585,8 @@ class AYONAddon(ABC):
host_name (str): Name of host.
project_name (str): Project name which is main part of host
context.
"""
"""
pass
def cli(self, addon_click_group):
@ -655,31 +613,11 @@ class AYONAddon(ABC):
Args:
addon_click_group (click.Group): Group to which can be added
commands.
"""
pass
class OpenPypeModule(AYONAddon):
"""Base class of OpenPype module.
Deprecated:
Use `AYONAddon` instead.
Args:
manager (AddonsManager): Manager object who discovered addon.
settings (dict[str, Any]): Module settings (OpenPype settings).
"""
# Disable by default
enabled = False
class OpenPypeAddOn(OpenPypeModule):
# Enable Addon by default
enabled = True
class _AddonReportInfo:
def __init__(
self, class_name, name, version, report_value_by_label
@ -711,8 +649,8 @@ class AddonsManager:
settings (Optional[dict[str, Any]]): AYON studio settings.
initialize (Optional[bool]): Initialize addons on init.
True by default.
"""
"""
# Helper attributes for report
_report_total_key = "Total"
_log = None
@ -748,8 +686,8 @@ class AddonsManager:
Returns:
Union[AYONAddon, Any]: Addon found by name or `default`.
"""
"""
return self._addons_by_name.get(addon_name, default)
@property
@ -776,8 +714,8 @@ class AddonsManager:
Returns:
Union[AYONAddon, None]: Enabled addon found by name or None.
"""
"""
addon = self.get(addon_name)
if addon is not None and addon.enabled:
return addon
@ -788,8 +726,8 @@ class AddonsManager:
Returns:
list[AYONAddon]: Initialized and enabled addons.
"""
"""
return [
addon
for addon in self._addons
@ -801,8 +739,6 @@ class AddonsManager:
# Make sure modules are loaded
load_addons()
import openpype_modules
self.log.debug("*** AYON addons initialization.")
# Prepare settings for addons
@ -810,14 +746,12 @@ class AddonsManager:
if settings is None:
settings = get_studio_settings()
modules_settings = {}
report = {}
time_start = time.time()
prev_start_time = time_start
addon_classes = []
for module in openpype_modules:
for module in _LoadCache.addon_modules:
# Go through globals in `ayon_core.modules`
for name in dir(module):
modules_item = getattr(module, name, None)
@ -826,8 +760,6 @@ class AddonsManager:
if (
not inspect.isclass(modules_item)
or modules_item is AYONAddon
or modules_item is OpenPypeModule
or modules_item is OpenPypeAddOn
or not issubclass(modules_item, AYONAddon)
):
continue
@ -853,33 +785,14 @@ class AddonsManager:
addon_classes.append(modules_item)
aliased_names = []
for addon_cls in addon_classes:
name = addon_cls.__name__
if issubclass(addon_cls, OpenPypeModule):
# TODO change to warning
self.log.debug((
"Addon '{}' is inherited from 'OpenPypeModule'."
" Please use 'AYONAddon'."
).format(name))
try:
# Try initialize module
if issubclass(addon_cls, OpenPypeModule):
addon = addon_cls(self, modules_settings)
else:
addon = addon_cls(self, settings)
addon = addon_cls(self, settings)
# Store initialized object
self._addons.append(addon)
self._addons_by_id[addon.id] = addon
self._addons_by_name[addon.name] = addon
# NOTE This will be removed with release 1.0.0 of ayon-core
# please use carefully.
# Gives option to use alias name for addon for cases when
# name in OpenPype was not the same as in AYON.
name_alias = getattr(addon, "openpype_alias", None)
if name_alias:
aliased_names.append((name_alias, addon))
now = time.time()
report[addon.__class__.__name__] = now - prev_start_time
@ -898,17 +811,6 @@ class AddonsManager:
f"[{enabled_str}] {addon.name} ({addon.version})"
)
for item in aliased_names:
name_alias, addon = item
if name_alias not in self._addons_by_name:
self._addons_by_name[name_alias] = addon
continue
self.log.warning(
"Alias name '{}' of addon '{}' is already assigned.".format(
name_alias, addon.name
)
)
if self._report is not None:
report[self._report_total_key] = time.time() - time_start
self._report["Initialization"] = report
@ -925,16 +827,7 @@ class AddonsManager:
self.log.debug("Has {} enabled addons.".format(len(enabled_addons)))
for addon in enabled_addons:
try:
if not is_func_marked(addon.connect_with_addons):
addon.connect_with_addons(enabled_addons)
elif hasattr(addon, "connect_with_modules"):
self.log.warning((
"DEPRECATION WARNING: Addon '{}' still uses"
" 'connect_with_modules' method. Please switch to use"
" 'connect_with_addons' method."
).format(addon.name))
addon.connect_with_modules(enabled_addons)
addon.connect_with_addons(enabled_addons)
except Exception:
self.log.error(
@ -1283,56 +1176,3 @@ class AddonsManager:
# Join rows with newline char and add new line at the end
output = "\n".join(formatted_rows) + "\n"
print(output)
# DEPRECATED - Module compatibility
@property
def modules(self):
self.log.warning(
"DEPRECATION WARNING: Used deprecated property"
" 'modules' please use 'addons' instead."
)
return self.addons
@property
def modules_by_id(self):
self.log.warning(
"DEPRECATION WARNING: Used deprecated property"
" 'modules_by_id' please use 'addons_by_id' instead."
)
return self.addons_by_id
@property
def modules_by_name(self):
self.log.warning(
"DEPRECATION WARNING: Used deprecated property"
" 'modules_by_name' please use 'addons_by_name' instead."
)
return self.addons_by_name
def get_enabled_module(self, *args, **kwargs):
self.log.warning(
"DEPRECATION WARNING: Used deprecated method"
" 'get_enabled_module' please use 'get_enabled_addon' instead."
)
return self.get_enabled_addon(*args, **kwargs)
def initialize_modules(self):
self.log.warning(
"DEPRECATION WARNING: Used deprecated method"
" 'initialize_modules' please use 'initialize_addons' instead."
)
self.initialize_addons()
def get_enabled_modules(self):
self.log.warning(
"DEPRECATION WARNING: Used deprecated method"
" 'get_enabled_modules' please use 'get_enabled_addons' instead."
)
return self.get_enabled_addons()
def get_host_module(self, host_name):
self.log.warning(
"DEPRECATION WARNING: Used deprecated method"
" 'get_host_module' please use 'get_host_addon' instead."
)
return self.get_host_addon(host_name)

View file

@ -0,0 +1,132 @@
import sys
import json
from typing import Optional
from qtpy import QtWidgets, QtCore
from ayon_core.style import load_stylesheet
from ayon_core.tools.utils import get_ayon_qt_app
class DetailDialog(QtWidgets.QDialog):
def __init__(self, detail, parent):
super().__init__(parent)
self.setWindowTitle("Detail")
detail_input = QtWidgets.QPlainTextEdit(self)
detail_input.setPlainText(detail)
detail_input.setReadOnly(True)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(detail_input, 1)
def showEvent(self, event):
self.resize(600, 400)
super().showEvent(event)
class ErrorDialog(QtWidgets.QDialog):
def __init__(
self,
message: str,
detail: Optional[str],
parent: Optional[QtWidgets.QWidget] = None
):
super().__init__(parent)
self.setWindowTitle("Preparation failed")
self.setWindowFlags(
self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint
)
message_label = QtWidgets.QLabel(self)
detail_wrapper = QtWidgets.QWidget(self)
detail_label = QtWidgets.QLabel(detail_wrapper)
detail_layout = QtWidgets.QVBoxLayout(detail_wrapper)
detail_layout.setContentsMargins(0, 0, 0, 0)
detail_layout.addWidget(detail_label)
btns_wrapper = QtWidgets.QWidget(self)
copy_detail_btn = QtWidgets.QPushButton("Copy detail", btns_wrapper)
show_detail_btn = QtWidgets.QPushButton("Show detail", btns_wrapper)
confirm_btn = QtWidgets.QPushButton("Close", btns_wrapper)
btns_layout = QtWidgets.QHBoxLayout(btns_wrapper)
btns_layout.setContentsMargins(0, 0, 0, 0)
btns_layout.addWidget(copy_detail_btn, 0)
btns_layout.addWidget(show_detail_btn, 0)
btns_layout.addStretch(1)
btns_layout.addWidget(confirm_btn, 0)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(message_label, 0)
layout.addWidget(detail_wrapper, 1)
layout.addWidget(btns_wrapper, 0)
copy_detail_btn.clicked.connect(self._on_copy_clicked)
show_detail_btn.clicked.connect(self._on_show_detail_clicked)
confirm_btn.clicked.connect(self._on_confirm_clicked)
self._message_label = message_label
self._detail_wrapper = detail_wrapper
self._detail_label = detail_label
self._copy_detail_btn = copy_detail_btn
self._show_detail_btn = show_detail_btn
self._confirm_btn = confirm_btn
self._detail_dialog = None
self._detail = detail
self.set_message(message, detail)
def showEvent(self, event):
self.setStyleSheet(load_stylesheet())
self.resize(320, 140)
super().showEvent(event)
def set_message(self, message, detail):
self._message_label.setText(message)
self._detail = detail
for widget in (
self._copy_detail_btn,
self._show_detail_btn,
):
widget.setVisible(bool(detail))
def _on_copy_clicked(self):
if self._detail:
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setText(self._detail)
def _on_show_detail_clicked(self):
if self._detail_dialog is None:
self._detail_dialog = DetailDialog(self._detail, self)
self._detail_dialog.show()
def _on_confirm_clicked(self):
self.accept()
def main():
json_path = sys.argv[-1]
with open(json_path, "r") as stream:
data = json.load(stream)
message = data["message"]
detail = data["detail"]
app = get_ayon_qt_app()
dialog = ErrorDialog(message, detail)
dialog.show()
app.exec_()
if __name__ == "__main__":
main()

View file

@ -0,0 +1,201 @@
import os
import sys
import contextlib
import tempfile
import json
import traceback
from io import StringIO
from typing import Optional
from ayon_core.lib import run_ayon_launcher_process
from .base import AddonsManager, ProcessContext, ProcessPreparationError
def _handle_error(
process_context: ProcessContext,
message: str,
detail: Optional[str],
):
"""Handle error in process ready preparation.
Shows UI to inform user about the error, or prints the message
to stdout if running in headless mode.
Todos:
Make this functionality with the dialog as unified function, so it can
be used elsewhere.
Args:
process_context (ProcessContext): The context in which the
error occurred.
message (str): The message to show.
detail (Optional[str]): The detail message to show (usually
traceback).
"""
if process_context.headless:
if detail:
print(detail)
print(f"{10*'*'}\n{message}\n{10*'*'}")
return
current_dir = os.path.dirname(os.path.abspath(__file__))
script_path = os.path.join(current_dir, "ui", "process_ready_error.py")
with tempfile.NamedTemporaryFile("w", delete=False) as tmp:
tmp_path = tmp.name
json.dump(
{"message": message, "detail": detail},
tmp.file
)
try:
run_ayon_launcher_process(
"--skip-bootstrap",
script_path,
tmp_path,
add_sys_paths=True,
creationflags=0,
)
finally:
os.remove(tmp_path)
def _start_tray():
from ayon_core.tools.tray import make_sure_tray_is_running
make_sure_tray_is_running()
def ensure_addons_are_process_context_ready(
process_context: ProcessContext,
addons_manager: Optional[AddonsManager] = None,
exit_on_failure: bool = True,
) -> bool:
"""Ensure all enabled addons are ready to be used in the given context.
Call this method only in AYON launcher process and as first thing
to avoid possible clashes with preparation. For example 'QApplication'
should not be created.
Todos:
Run all preparations and allow to "ignore" failed preparations.
Right now single addon can block using certain actions.
Args:
process_context (ProcessContext): The context in which the
addons should be prepared.
addons_manager (Optional[AddonsManager]): The addons
manager to use. If not provided, a new one will be created.
exit_on_failure (bool, optional): If True, the process will exit
if an error occurs. Defaults to True.
Returns:
bool: True if all addons are ready, False otherwise.
"""
if addons_manager is None:
addons_manager = AddonsManager()
message = None
failed = False
use_detail = False
# Wrap the output in StringIO to capture it for details on fail
# - but in case stdout was invalid on start of process also store
# the tracebacks
tracebacks = []
output = StringIO()
with contextlib.redirect_stdout(output):
with contextlib.redirect_stderr(output):
for addon in addons_manager.get_enabled_addons():
addon_failed = True
try:
addon.ensure_is_process_ready(process_context)
addon_failed = False
except ProcessPreparationError as exc:
message = str(exc)
print(f"Addon preparation failed: '{addon.name}'")
print(message)
except BaseException:
use_detail = True
message = "An unexpected error occurred."
formatted_traceback = "".join(traceback.format_exception(
*sys.exc_info()
))
tracebacks.append(formatted_traceback)
print(f"Addon preparation failed: '{addon.name}'")
print(message)
# Print the traceback so it is in the stdout
print(formatted_traceback)
if addon_failed:
failed = True
break
output_str = output.getvalue()
# Print stdout/stderr to console as it was redirected
print(output_str)
if not failed:
if not process_context.headless:
_start_tray()
return True
detail = None
if use_detail:
# In case stdout was not captured, use the tracebacks as detail
if not output_str:
output_str = "\n".join(tracebacks)
detail = output_str
_handle_error(process_context, message, detail)
if exit_on_failure:
sys.exit(1)
return False
def ensure_addons_are_process_ready(
addon_name: str,
addon_version: str,
project_name: Optional[str] = None,
headless: Optional[bool] = None,
*,
addons_manager: Optional[AddonsManager] = None,
exit_on_failure: bool = True,
**kwargs,
) -> bool:
"""Ensure all enabled addons are ready to be used in the given context.
Call this method only in AYON launcher process and as first thing
to avoid possible clashes with preparation. For example 'QApplication'
should not be created.
Args:
addon_name (str): Addon name which triggered process.
addon_version (str): Addon version which triggered process.
project_name (Optional[str]): Project name. Can be filled in case
process is triggered for specific project. Some addons can have
different behavior based on project. Value is NOT autofilled.
headless (Optional[bool]): Is process running in headless mode. Value
is filled with value based on state set in AYON launcher.
addons_manager (Optional[AddonsManager]): The addons
manager to use. If not provided, a new one will be created.
exit_on_failure (bool, optional): If True, the process will exit
if an error occurs. Defaults to True.
kwargs: The keyword arguments to pass to the ProcessContext.
Returns:
bool: True if all addons are ready, False otherwise.
"""
context: ProcessContext = ProcessContext(
addon_name,
addon_version,
project_name,
headless,
**kwargs
)
return ensure_addons_are_process_context_ready(
context, addons_manager, exit_on_failure
)

View file

@ -21,21 +21,7 @@ from ayon_core.lib import (
class AliasedGroup(click.Group):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._aliases = {}
def set_alias(self, src_name, dst_name):
self._aliases[dst_name] = src_name
def get_command(self, ctx, cmd_name):
if cmd_name in self._aliases:
cmd_name = self._aliases[cmd_name]
return super().get_command(ctx, cmd_name)
@click.group(cls=AliasedGroup, invoke_without_command=True)
@click.group(invoke_without_command=True)
@click.pass_context
@click.option("--use-staging", is_flag=True,
expose_value=False, help="use staging variants")
@ -86,10 +72,6 @@ def addon(ctx):
pass
# Add 'addon' as alias for module
main_cli.set_alias("addon", "module")
@main_cli.command()
@click.pass_context
@click.argument("output_json_path")

View file

@ -28,7 +28,8 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
"substancepainter",
"aftereffects",
"wrap",
"openrv"
"openrv",
"cinema4d"
}
launch_types = {LaunchTypes.local}

View file

@ -94,4 +94,4 @@ class GlobalHostDataHook(PreLaunchHook):
task_entity = get_task_by_name(
project_name, folder_entity["id"], task_name
)
self.data["task_entity"] = task_entity
self.data["task_entity"] = task_entity

View file

@ -19,7 +19,8 @@ class OCIOEnvHook(PreLaunchHook):
"nuke",
"hiero",
"resolve",
"openrv"
"openrv",
"cinema4d"
}
launch_types = set()

View file

@ -7,11 +7,10 @@ from .local_settings import (
JSONSettingRegistry,
AYONSecureRegistry,
AYONSettingsRegistry,
OpenPypeSecureRegistry,
OpenPypeSettingsRegistry,
get_launcher_local_dir,
get_launcher_storage_dir,
get_local_site_id,
get_ayon_username,
get_openpype_username,
)
from .ayon_connection import initialize_ayon_connection
from .cache import (
@ -57,13 +56,11 @@ from .env_tools import (
from .terminal import Terminal
from .execute import (
get_ayon_launcher_args,
get_openpype_execute_args,
get_linux_launcher_args,
execute,
run_subprocess,
run_detached_process,
run_ayon_launcher_process,
run_openpype_process,
path_to_subprocess_arg,
CREATE_NO_WINDOW
)
@ -130,6 +127,7 @@ from .ayon_info import (
is_in_ayon_launcher_process,
is_running_from_build,
is_using_ayon_console,
is_headless_mode_enabled,
is_staging_enabled,
is_dev_mode_enabled,
is_in_tests,
@ -142,11 +140,10 @@ __all__ = [
"JSONSettingRegistry",
"AYONSecureRegistry",
"AYONSettingsRegistry",
"OpenPypeSecureRegistry",
"OpenPypeSettingsRegistry",
"get_launcher_local_dir",
"get_launcher_storage_dir",
"get_local_site_id",
"get_ayon_username",
"get_openpype_username",
"initialize_ayon_connection",
@ -157,13 +154,11 @@ __all__ = [
"register_event_callback",
"get_ayon_launcher_args",
"get_openpype_execute_args",
"get_linux_launcher_args",
"execute",
"run_subprocess",
"run_detached_process",
"run_ayon_launcher_process",
"run_openpype_process",
"path_to_subprocess_arg",
"CREATE_NO_WINDOW",
@ -241,6 +236,7 @@ __all__ = [
"is_in_ayon_launcher_process",
"is_running_from_build",
"is_using_ayon_console",
"is_headless_mode_enabled",
"is_staging_enabled",
"is_dev_mode_enabled",
"is_in_tests",

File diff suppressed because it is too large Load diff

View file

@ -78,6 +78,10 @@ def is_using_ayon_console():
return "ayon_console" in executable_filename
def is_headless_mode_enabled():
return os.getenv("AYON_HEADLESS_MODE") == "1"
def is_staging_enabled():
return os.getenv("AYON_USE_STAGING") == "1"

View file

@ -8,7 +8,6 @@ import logging
import weakref
from uuid import uuid4
from .python_2_comp import WeakMethod
from .python_module_tools import is_func_signature_supported
@ -18,7 +17,7 @@ class MissingEventSystem(Exception):
def _get_func_ref(func):
if inspect.ismethod(func):
return WeakMethod(func)
return weakref.WeakMethod(func)
return weakref.ref(func)
@ -123,7 +122,7 @@ class weakref_partial:
)
class EventCallback(object):
class EventCallback:
"""Callback registered to a topic.
The callback function is registered to a topic. Topic is a string which
@ -380,8 +379,7 @@ class EventCallback(object):
self._partial_func = None
# Inherit from 'object' for Python 2 hosts
class Event(object):
class Event:
"""Base event object.
Can be used for any event because is not specific. Only required argument
@ -488,7 +486,7 @@ class Event(object):
return obj
class EventSystem(object):
class EventSystem:
"""Encapsulate event handling into an object.
System wraps registered callbacks and triggered events into single object,
@ -568,6 +566,10 @@ class EventSystem(object):
self._process_event(event)
def clear_callbacks(self):
"""Clear all registered callbacks."""
self._registered_callbacks = []
def _process_event(self, event):
"""Process event topic and trigger callbacks.

View file

@ -108,6 +108,20 @@ def run_subprocess(*args, **kwargs):
| getattr(subprocess, "CREATE_NO_WINDOW", 0)
)
# Escape parentheses for bash
if (
kwargs.get("shell") is True
and len(args) == 1
and isinstance(args[0], str)
and os.getenv("SHELL") in ("/bin/bash", "/bin/sh")
):
new_arg = (
args[0]
.replace("(", "\\(")
.replace(")", "\\)")
)
args = (new_arg, )
# Get environents from kwarg or use current process environments if were
# not passed.
env = kwargs.get("env") or os.environ
@ -179,7 +193,7 @@ def clean_envs_for_ayon_process(env=None):
return env
def run_ayon_launcher_process(*args, **kwargs):
def run_ayon_launcher_process(*args, add_sys_paths=False, **kwargs):
"""Execute AYON process with passed arguments and wait.
Wrapper for 'run_process' which prepends AYON executable arguments
@ -209,29 +223,18 @@ def run_ayon_launcher_process(*args, **kwargs):
# - fill more if you find more
env = clean_envs_for_ayon_process(os.environ)
if add_sys_paths:
new_pythonpath = list(sys.path)
lookup_set = set(new_pythonpath)
for path in (env.get("PYTHONPATH") or "").split(os.pathsep):
if path and path not in lookup_set:
new_pythonpath.append(path)
lookup_set.add(path)
env["PYTHONPATH"] = os.pathsep.join(new_pythonpath)
return run_subprocess(args, env=env, **kwargs)
def run_openpype_process(*args, **kwargs):
"""Execute AYON process with passed arguments and wait.
Wrapper for 'run_process' which prepends AYON executable arguments
before passed arguments and define environments if are not passed.
Values from 'os.environ' are used for environments if are not passed.
They are cleaned using 'clean_envs_for_ayon_process' function.
Example:
>>> run_openpype_process("version")
Args:
*args (tuple): AYON cli arguments.
**kwargs (dict): Keyword arguments for subprocess.Popen.
"""
return run_ayon_launcher_process(*args, **kwargs)
def run_detached_process(args, **kwargs):
"""Execute process with passed arguments as separated process.
@ -318,14 +321,12 @@ def path_to_subprocess_arg(path):
def get_ayon_launcher_args(*args):
"""Arguments to run ayon-launcher process.
"""Arguments to run AYON launcher process.
Arguments for subprocess when need to spawn new pype process. Which may be
needed when new python process for pype scripts must be executed in build
pype.
Arguments for subprocess when need to spawn new AYON launcher process.
Reasons:
Ayon-launcher started from code has different executable set to
AYON launcher started from code has different executable set to
virtual env python and must have path to script as first argument
which is not needed for built application.
@ -333,7 +334,8 @@ def get_ayon_launcher_args(*args):
*args (str): Any arguments that will be added after executables.
Returns:
list[str]: List of arguments to run ayon-launcher process.
list[str]: List of arguments to run AYON launcher process.
"""
executable = os.environ["AYON_EXECUTABLE"]
launch_args = [executable]
@ -391,21 +393,3 @@ def get_linux_launcher_args(*args):
launch_args.extend(args)
return launch_args
def get_openpype_execute_args(*args):
"""Arguments to run pype command.
Arguments for subprocess when need to spawn new pype process. Which may be
needed when new python process for pype scripts must be executed in build
pype.
## Why is this needed?
Pype executed from code has different executable set to virtual env python
and must have path to script as first argument which is not needed for
build pype.
It is possible to pass any arguments that will be added after pype
executables.
"""
return get_ayon_launcher_args(*args)

View file

@ -22,7 +22,7 @@ class DuplicateDestinationError(ValueError):
"""
class FileTransaction(object):
class FileTransaction:
"""File transaction with rollback options.
The file transaction is a three-step process.

View file

@ -3,26 +3,11 @@
import os
import json
import platform
import configparser
import warnings
from datetime import datetime
from abc import ABC, abstractmethod
# disable lru cache in Python 2
try:
from functools import lru_cache
except ImportError:
def lru_cache(maxsize):
def max_size(func):
def wrapper(*args, **kwargs):
value = func(*args, **kwargs)
return value
return wrapper
return max_size
# ConfigParser was renamed in python3 to configparser
try:
import configparser
except ImportError:
import ConfigParser as configparser
from functools import lru_cache
import appdirs
import ayon_api
@ -30,6 +15,87 @@ import ayon_api
_PLACEHOLDER = object()
def _get_ayon_appdirs(*args):
return os.path.join(
appdirs.user_data_dir("AYON", "Ynput"),
*args
)
def get_ayon_appdirs(*args):
"""Local app data directory of AYON client.
Deprecated:
Use 'get_launcher_local_dir' or 'get_launcher_storage_dir' based on
use-case. Deprecation added 24/08/09 (0.4.4-dev.1).
Args:
*args (Iterable[str]): Subdirectories/files in local app data dir.
Returns:
str: Path to directory/file in local app data dir.
"""
warnings.warn(
(
"Function 'get_ayon_appdirs' is deprecated. Should be replaced"
" with 'get_launcher_local_dir' or 'get_launcher_storage_dir'"
" based on use-case."
),
DeprecationWarning
)
return _get_ayon_appdirs(*args)
def get_launcher_storage_dir(*subdirs: str) -> str:
"""Get storage directory for launcher.
Storage directory is used for storing shims, addons, dependencies, etc.
It is not recommended, but the location can be shared across
multiple machines.
Note:
This function should be called at least once on bootstrap.
Args:
*subdirs (str): Subdirectories relative to storage dir.
Returns:
str: Path to storage directory.
"""
storage_dir = os.getenv("AYON_LAUNCHER_STORAGE_DIR")
if not storage_dir:
storage_dir = _get_ayon_appdirs()
return os.path.join(storage_dir, *subdirs)
def get_launcher_local_dir(*subdirs: str) -> str:
"""Get local directory for launcher.
Local directory is used for storing machine or user specific data.
The location is user specific.
Note:
This function should be called at least once on bootstrap.
Args:
*subdirs (str): Subdirectories relative to local dir.
Returns:
str: Path to local directory.
"""
storage_dir = os.getenv("AYON_LAUNCHER_LOCAL_DIR")
if not storage_dir:
storage_dir = _get_ayon_appdirs()
return os.path.join(storage_dir, *subdirs)
class AYONSecureRegistry:
"""Store information using keyring.
@ -470,55 +536,17 @@ class JSONSettingRegistry(ASettingRegistry):
class AYONSettingsRegistry(JSONSettingRegistry):
"""Class handling AYON general settings registry.
Attributes:
vendor (str): Name used for path construction.
product (str): Additional name used for path construction.
Args:
name (Optional[str]): Name of the registry.
"""
def __init__(self, name=None):
self.vendor = "Ynput"
self.product = "AYON"
if not name:
name = "AYON_settings"
path = appdirs.user_data_dir(self.product, self.vendor)
path = get_launcher_storage_dir()
super(AYONSettingsRegistry, self).__init__(name, path)
def _create_local_site_id(registry=None):
"""Create a local site identifier."""
from coolname import generate_slug
if registry is None:
registry = AYONSettingsRegistry()
new_id = generate_slug(3)
print("Created local site id \"{}\"".format(new_id))
registry.set_item("localId", new_id)
return new_id
def get_ayon_appdirs(*args):
"""Local app data directory of AYON client.
Args:
*args (Iterable[str]): Subdirectories/files in local app data dir.
Returns:
str: Path to directory/file in local app data dir.
"""
return os.path.join(
appdirs.user_data_dir("AYON", "Ynput"),
*args
)
def get_local_site_id():
"""Get local site identifier.
@ -529,7 +557,7 @@ def get_local_site_id():
if site_id:
return site_id
site_id_path = get_ayon_appdirs("site_id")
site_id_path = get_launcher_local_dir("site_id")
if os.path.exists(site_id_path):
with open(site_id_path, "r") as stream:
site_id = stream.read()
@ -556,11 +584,3 @@ def get_ayon_username():
"""
return ayon_api.get_user()["name"]
def get_openpype_username():
return get_ayon_username()
OpenPypeSecureRegistry = AYONSecureRegistry
OpenPypeSettingsRegistry = AYONSettingsRegistry

View file

@ -1,6 +1,5 @@
import os
import sys
import uuid
import getpass
import logging
import platform
@ -11,12 +10,12 @@ import copy
from . import Terminal
# Check for `unicode` in builtins
USE_UNICODE = hasattr(__builtins__, "unicode")
class LogStreamHandler(logging.StreamHandler):
""" StreamHandler class designed to handle utf errors in python 2.x hosts.
"""StreamHandler class.
This was originally designed to handle UTF errors in python 2.x hosts,
however currently solely remains for backwards compatibility.
"""
@ -25,49 +24,27 @@ class LogStreamHandler(logging.StreamHandler):
self.enabled = True
def enable(self):
""" Enable StreamHandler
"""Enable StreamHandler
Used to silence output
Make StreamHandler output again
"""
self.enabled = True
def disable(self):
""" Disable StreamHandler
"""Disable StreamHandler
Make StreamHandler output again
Used to silence output
"""
self.enabled = False
def emit(self, record):
if not self.enable:
if not self.enabled or self.stream is None:
return
try:
msg = self.format(record)
msg = Terminal.log(msg)
stream = self.stream
if stream is None:
return
fs = "%s\n"
# if no unicode support...
if not USE_UNICODE:
stream.write(fs % msg)
else:
try:
if (isinstance(msg, unicode) and # noqa: F821
getattr(stream, 'encoding', None)):
ufs = u'%s\n'
try:
stream.write(ufs % msg)
except UnicodeEncodeError:
stream.write((ufs % msg).encode(stream.encoding))
else:
if (getattr(stream, 'encoding', 'utf-8')):
ufs = u'%s\n'
stream.write(ufs % unicode(msg)) # noqa: F821
else:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
stream.write(f"{msg}\n")
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
@ -141,8 +118,6 @@ class Logger:
process_data = None
# Cached process name or ability to set different process name
_process_name = None
# TODO Remove 'mongo_process_id' in 1.x.x
mongo_process_id = uuid.uuid4().hex
@classmethod
def get_logger(cls, name=None):

View file

@ -38,7 +38,7 @@ class TemplateUnsolved(Exception):
)
class StringTemplate(object):
class StringTemplate:
"""String that can be formatted."""
def __init__(self, template):
if not isinstance(template, str):
@ -410,7 +410,7 @@ class TemplatePartResult:
self._invalid_types[key] = type(value)
class FormatObject(object):
class FormatObject:
"""Object that can be used for formatting.
This is base that is valid for to be used in 'StringTemplate' value.
@ -460,6 +460,34 @@ class FormattingPart:
return True
return False
@staticmethod
def validate_key_is_matched(key):
"""Validate that opening has closing at correct place.
Future-proof, only square brackets are currently used in keys.
Example:
>>> is_matched("[]()()(((([])))")
False
>>> is_matched("[](){{{[]}}}")
True
Returns:
bool: Openings and closing are valid.
"""
mapping = dict(zip("({[", ")}]"))
opening = set(mapping.keys())
closing = set(mapping.values())
queue = []
for letter in key:
if letter in opening:
queue.append(mapping[letter])
elif letter in closing:
if not queue or letter != queue.pop():
return False
return not queue
def format(self, data, result):
"""Format the formattings string.
@ -472,6 +500,12 @@ class FormattingPart:
result.add_output(result.realy_used_values[key])
return result
# ensure key is properly formed [({})] properly closed.
if not self.validate_key_is_matched(key):
result.add_missing_key(key)
result.add_output(self.template)
return result
# check if key expects subdictionary keys (e.g. project[name])
existence_check = key
key_padding = list(KEY_PADDING_PATTERN.findall(existence_check))

View file

@ -1,7 +1,6 @@
import os
import re
import logging
import platform
import clique
@ -38,31 +37,7 @@ def create_hard_link(src_path, dst_path):
dst_path(str): Full path to a file where a link of source will be
added.
"""
# Use `os.link` if is available
# - should be for all platforms with newer python versions
if hasattr(os, "link"):
os.link(src_path, dst_path)
return
# Windows implementation of hardlinks
# - used in Python 2
if platform.system().lower() == "windows":
import ctypes
from ctypes.wintypes import BOOL
CreateHardLink = ctypes.windll.kernel32.CreateHardLinkW
CreateHardLink.argtypes = [
ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_void_p
]
CreateHardLink.restype = BOOL
res = CreateHardLink(dst_path, src_path, None)
if res == 0:
raise ctypes.WinError()
return
# Raises not implemented error if gets here
raise NotImplementedError(
"Implementation of hardlink for current environment is missing."
)
os.link(src_path, dst_path)
def collect_frames(files):
@ -81,7 +56,10 @@ def collect_frames(files):
dict: {'/folder/product_v001.0001.png': '0001', ....}
"""
patterns = [clique.PATTERNS["frames"]]
# clique.PATTERNS["frames"] supports only `.1001.exr` not `_1001.exr` so
# we use a customized pattern.
pattern = "[_.](?P<index>(?P<padding>0*)\\d+)\\.\\D+\\d?$"
patterns = [pattern]
collections, remainder = clique.assemble(
files, minimum_items=1, patterns=patterns)
@ -207,7 +185,7 @@ def get_last_version_from_path(path_dir, filter):
assert isinstance(filter, list) and (
len(filter) != 0), "`filter` argument needs to be list and not empty"
filtred_files = list()
filtered_files = list()
# form regex for filtering
pattern = r".*".join(filter)
@ -215,10 +193,10 @@ def get_last_version_from_path(path_dir, filter):
for file in os.listdir(path_dir):
if not re.findall(pattern, file):
continue
filtred_files.append(file)
filtered_files.append(file)
if filtred_files:
sorted(filtred_files)
return filtred_files[-1]
if filtered_files:
filtered_files.sort()
return filtered_files[-1]
return None

View file

@ -1,44 +1,17 @@
# Deprecated file
# - the file container 'WeakMethod' implementation for Python 2 which is not
# needed anymore.
import warnings
import weakref
WeakMethod = getattr(weakref, "WeakMethod", None)
WeakMethod = weakref.WeakMethod
if WeakMethod is None:
class _WeakCallable:
def __init__(self, obj, func):
self.im_self = obj
self.im_func = func
def __call__(self, *args, **kws):
if self.im_self is None:
return self.im_func(*args, **kws)
else:
return self.im_func(self.im_self, *args, **kws)
class WeakMethod:
""" Wraps a function or, more importantly, a bound method in
a way that allows a bound method's object to be GCed, while
providing the same interface as a normal weak reference. """
def __init__(self, fn):
try:
self._obj = weakref.ref(fn.im_self)
self._meth = fn.im_func
except AttributeError:
# It's not a bound method
self._obj = None
self._meth = fn
def __call__(self):
if self._dead():
return None
return _WeakCallable(self._getobj(), self._meth)
def _dead(self):
return self._obj is not None and self._obj() is None
def _getobj(self):
if self._obj is None:
return None
return self._obj()
warnings.warn(
(
"'ayon_core.lib.python_2_comp' is deprecated."
"Please use 'weakref.WeakMethod'."
),
DeprecationWarning,
stacklevel=2
)

View file

@ -5,43 +5,30 @@ import importlib
import inspect
import logging
import six
log = logging.getLogger(__name__)
def import_filepath(filepath, module_name=None):
"""Import python file as python module.
Python 2 and Python 3 compatibility.
Args:
filepath(str): Path to python file.
module_name(str): Name of loaded module. Only for Python 3. By default
filepath (str): Path to python file.
module_name (str): Name of loaded module. Only for Python 3. By default
is filled with filename of filepath.
"""
if module_name is None:
module_name = os.path.splitext(os.path.basename(filepath))[0]
# Make sure it is not 'unicode' in Python 2
module_name = str(module_name)
# Prepare module object where content of file will be parsed
module = types.ModuleType(module_name)
module.__file__ = filepath
if six.PY3:
# Use loader so module has full specs
module_loader = importlib.machinery.SourceFileLoader(
module_name, filepath
)
module_loader.exec_module(module)
else:
# Execute module code and store content to module
with open(filepath) as _stream:
# Execute content and store it to module object
six.exec_(_stream.read(), module.__dict__)
# Use loader so module has full specs
module_loader = importlib.machinery.SourceFileLoader(
module_name, filepath
)
module_loader.exec_module(module)
return module
@ -139,35 +126,31 @@ def classes_from_module(superclass, module):
return classes
def _import_module_from_dirpath_py2(dirpath, module_name, dst_module_name):
"""Import passed dirpath as python module using `imp`."""
def import_module_from_dirpath(dirpath, folder_name, dst_module_name=None):
"""Import passed directory as a python module.
Imported module can be assigned as a child attribute of already loaded
module from `sys.modules` if has support of `setattr`. That is not default
behavior of python modules so parent module must be a custom module with
that ability.
It is not possible to reimport already cached module. If you need to
reimport module you have to remove it from caches manually.
Args:
dirpath (str): Parent directory path of loaded folder.
folder_name (str): Folder name which should be imported inside passed
directory.
dst_module_name (str): Parent module name under which can be loaded
module added.
"""
# Import passed dirpath as python module
if dst_module_name:
full_module_name = "{}.{}".format(dst_module_name, module_name)
full_module_name = "{}.{}".format(dst_module_name, folder_name)
dst_module = sys.modules[dst_module_name]
else:
full_module_name = module_name
dst_module = None
if full_module_name in sys.modules:
return sys.modules[full_module_name]
import imp
fp, pathname, description = imp.find_module(module_name, [dirpath])
module = imp.load_module(full_module_name, fp, pathname, description)
if dst_module is not None:
setattr(dst_module, module_name, module)
return module
def _import_module_from_dirpath_py3(dirpath, module_name, dst_module_name):
"""Import passed dirpath as python module using Python 3 modules."""
if dst_module_name:
full_module_name = "{}.{}".format(dst_module_name, module_name)
dst_module = sys.modules[dst_module_name]
else:
full_module_name = module_name
full_module_name = folder_name
dst_module = None
# Skip import if is already imported
@ -191,7 +174,7 @@ def _import_module_from_dirpath_py3(dirpath, module_name, dst_module_name):
# Store module to destination module and `sys.modules`
# WARNING this mus be done before module execution
if dst_module is not None:
setattr(dst_module, module_name, module)
setattr(dst_module, folder_name, module)
sys.modules[full_module_name] = module
@ -201,37 +184,6 @@ def _import_module_from_dirpath_py3(dirpath, module_name, dst_module_name):
return module
def import_module_from_dirpath(dirpath, folder_name, dst_module_name=None):
"""Import passed directory as a python module.
Python 2 and 3 compatible.
Imported module can be assigned as a child attribute of already loaded
module from `sys.modules` if has support of `setattr`. That is not default
behavior of python modules so parent module must be a custom module with
that ability.
It is not possible to reimport already cached module. If you need to
reimport module you have to remove it from caches manually.
Args:
dirpath(str): Parent directory path of loaded folder.
folder_name(str): Folder name which should be imported inside passed
directory.
dst_module_name(str): Parent module name under which can be loaded
module added.
"""
if six.PY3:
module = _import_module_from_dirpath_py3(
dirpath, folder_name, dst_module_name
)
else:
module = _import_module_from_dirpath_py2(
dirpath, folder_name, dst_module_name
)
return module
def is_func_signature_supported(func, *args, **kwargs):
"""Check if a function signature supports passed args and kwargs.
@ -275,25 +227,12 @@ def is_func_signature_supported(func, *args, **kwargs):
Returns:
bool: Function can pass in arguments.
"""
if hasattr(inspect, "signature"):
# Python 3 using 'Signature' object where we try to bind arg
# or kwarg. Using signature is recommended approach based on
# documentation.
sig = inspect.signature(func)
try:
sig.bind(*args, **kwargs)
return True
except TypeError:
pass
else:
# In Python 2 'signature' is not available so 'getcallargs' is used
# - 'getcallargs' is marked as deprecated since Python 3.0
try:
inspect.getcallargs(func, *args, **kwargs)
return True
except TypeError:
pass
sig = inspect.signature(func)
try:
sig.bind(*args, **kwargs)
return True
except TypeError:
pass
return False

View file

@ -1152,9 +1152,7 @@ def convert_colorspace(
input_arg, input_path,
# Tell oiiotool which channels should be put to top stack
# (and output)
"--ch", channels_arg,
# Use first subimage
"--subimage", "0"
"--ch", channels_arg
])
if all([target_colorspace, view, display]):
@ -1168,12 +1166,12 @@ def convert_colorspace(
oiio_cmd.extend(additional_command_args)
if target_colorspace:
oiio_cmd.extend(["--colorconvert",
oiio_cmd.extend(["--colorconvert:subimages=0",
source_colorspace,
target_colorspace])
if view and display:
oiio_cmd.extend(["--iscolorspace", source_colorspace])
oiio_cmd.extend(["--ociodisplay", display, view])
oiio_cmd.extend(["--ociodisplay:subimages=0", display, view])
oiio_cmd.extend(["-o", output_path])

View file

@ -1,40 +0,0 @@
# -*- coding: utf-8 -*-
from . import click_wrap
from .interfaces import (
IPluginPaths,
ITrayAddon,
ITrayModule,
ITrayAction,
ITrayService,
IHostAddon,
)
from .base import (
AYONAddon,
OpenPypeModule,
OpenPypeAddOn,
load_modules,
ModulesManager,
)
__all__ = (
"click_wrap",
"IPluginPaths",
"ITrayAddon",
"ITrayModule",
"ITrayAction",
"ITrayService",
"IHostAddon",
"AYONAddon",
"OpenPypeModule",
"OpenPypeAddOn",
"load_modules",
"ModulesManager",
)

View file

@ -1,25 +0,0 @@
# Backwards compatibility support
# - TODO should be removed before release 1.0.0
from ayon_core.addon import (
AYONAddon,
AddonsManager,
load_addons,
)
from ayon_core.addon.base import (
OpenPypeModule,
OpenPypeAddOn,
)
ModulesManager = AddonsManager
load_modules = load_addons
__all__ = (
"AYONAddon",
"AddonsManager",
"load_addons",
"OpenPypeModule",
"OpenPypeAddOn",
"ModulesManager",
"load_modules",
)

View file

@ -1 +0,0 @@
from ayon_core.addon.click_wrap import *

View file

@ -1,21 +0,0 @@
from ayon_core.addon.interfaces import (
IPluginPaths,
ITrayAddon,
ITrayAction,
ITrayService,
IHostAddon,
)
ITrayModule = ITrayAddon
ILaunchHookPaths = object
__all__ = (
"IPluginPaths",
"ITrayAddon",
"ITrayAction",
"ITrayService",
"IHostAddon",
"ITrayModule",
"ILaunchHookPaths",
)

View file

@ -3,7 +3,6 @@ from .constants import (
AVALON_INSTANCE_ID,
AYON_CONTAINER_ID,
AYON_INSTANCE_ID,
HOST_WORKFILE_EXTENSIONS,
)
from .anatomy import Anatomy
@ -51,11 +50,11 @@ from .load import (
)
from .publish import (
KnownPublishError,
PublishError,
PublishValidationError,
PublishXmlValidationError,
KnownPublishError,
AYONPyblishPluginMixin,
OpenPypePyblishPluginMixin,
OptionalPyblishPluginMixin,
)
@ -77,7 +76,6 @@ from .actions import (
from .context_tools import (
install_ayon_plugins,
install_openpype_plugins,
install_host,
uninstall_host,
is_installed,
@ -115,7 +113,6 @@ __all__ = (
"AVALON_INSTANCE_ID",
"AYON_CONTAINER_ID",
"AYON_INSTANCE_ID",
"HOST_WORKFILE_EXTENSIONS",
# --- Anatomy ---
"Anatomy",
@ -164,11 +161,11 @@ __all__ = (
"get_repres_contexts",
# --- Publish ---
"KnownPublishError",
"PublishError",
"PublishValidationError",
"PublishXmlValidationError",
"KnownPublishError",
"AYONPyblishPluginMixin",
"OpenPypePyblishPluginMixin",
"OptionalPyblishPluginMixin",
# --- Actions ---
@ -187,7 +184,6 @@ __all__ = (
# --- Process context ---
"install_ayon_plugins",
"install_openpype_plugins",
"install_host",
"uninstall_host",
"is_installed",

View file

@ -699,6 +699,34 @@ def get_ocio_config_views(config_path):
)
def _get_config_path_from_profile_data(
profile, profile_type, template_data
):
"""Get config path from profile data.
Args:
profile (dict[str, Any]): Profile data.
profile_type (str): Profile type.
template_data (dict[str, Any]): Template data.
Returns:
dict[str, str]: Config data with path and template.
"""
template = profile[profile_type]
result = StringTemplate.format_strict_template(
template, template_data
)
normalized_path = str(result.normalized())
if not os.path.exists(normalized_path):
log.warning(f"Path was not found '{normalized_path}'.")
return None
return {
"path": normalized_path,
"template": template
}
def _get_global_config_data(
project_name,
host_name,
@ -717,7 +745,7 @@ def _get_global_config_data(
2. Custom path to ocio config.
3. Path to 'ocioconfig' representation on product. Name of product can be
defined in settings. Product name can be regex but exact match is
always preferred.
always preferred. Fallback can be defined in case no product is found.
None is returned when no profile is found, when path
@ -755,30 +783,36 @@ def _get_global_config_data(
profile_type = profile["type"]
if profile_type in ("builtin_path", "custom_path"):
template = profile[profile_type]
result = StringTemplate.format_strict_template(
template, template_data
)
normalized_path = str(result.normalized())
if not os.path.exists(normalized_path):
log.warning(f"Path was not found '{normalized_path}'.")
return None
return {
"path": normalized_path,
"template": template
}
return _get_config_path_from_profile_data(
profile, profile_type, template_data)
# TODO decide if this is the right name for representation
repre_name = "ocioconfig"
published_product_data = profile["published_product"]
product_name = published_product_data["product_name"]
fallback_data = published_product_data["fallback"]
if product_name == "":
log.error(
"Colorspace OCIO config path cannot be set. "
"Profile is set to published product but `Product name` is empty."
)
return None
folder_info = template_data.get("folder")
if not folder_info:
log.warning("Folder info is missing.")
return None
log.info("Using fallback data for ocio config path.")
# in case no product was found we need to use fallback
fallback_type = fallback_data["fallback_type"]
return _get_config_path_from_profile_data(
fallback_data, fallback_type, template_data
)
folder_path = folder_info["path"]
product_name = profile["product_name"]
if folder_id is None:
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path, fields={"id"}
@ -797,12 +831,13 @@ def _get_global_config_data(
fields={"id", "name"}
)
}
if not product_entities_by_name:
log.debug(
f"No product entities were found for folder '{folder_path}' with"
f" product name filter '{product_name}'."
# in case no product was found we need to use fallback
fallback_type = fallback_data["type"]
return _get_config_path_from_profile_data(
fallback_data, fallback_type, template_data
)
return None
# Try to use exact match first, otherwise use first available product
product_entity = product_entities_by_name.get(product_name)
@ -837,6 +872,7 @@ def _get_global_config_data(
path = get_representation_path_with_anatomy(repre_entity, anatomy)
template = repre_entity["attrib"]["template"]
return {
"path": path,
"template": template,

View file

@ -4,20 +4,3 @@ AYON_INSTANCE_ID = "ayon.create.instance"
# Backwards compatibility
AVALON_CONTAINER_ID = "pyblish.avalon.container"
AVALON_INSTANCE_ID = "pyblish.avalon.instance"
# TODO get extensions from host implementations
HOST_WORKFILE_EXTENSIONS = {
"blender": [".blend"],
"celaction": [".scn"],
"tvpaint": [".tvpp"],
"fusion": [".comp"],
"harmony": [".zip"],
"houdini": [".hip", ".hiplc", ".hipnc"],
"maya": [".ma", ".mb"],
"nuke": [".nk"],
"hiero": [".hrox"],
"photoshop": [".psd", ".psb"],
"premiere": [".prproj"],
"resolve": [".drp"],
"aftereffects": [".aep"]
}

View file

@ -132,7 +132,10 @@ def install_host(host):
def modified_emit(obj, record):
"""Method replacing `emit` in Pyblish's MessageHandler."""
record.msg = record.getMessage()
try:
record.msg = record.getMessage()
except Exception:
record.msg = str(record.msg)
obj.records.append(record)
MessageHandler.emit = modified_emit
@ -234,16 +237,6 @@ def install_ayon_plugins(project_name=None, host_name=None):
register_inventory_action_path(path)
def install_openpype_plugins(project_name=None, host_name=None):
"""Install AYON core plugins and make sure the core is initialized.
Deprecated:
Use `install_ayon_plugins` instead.
"""
install_ayon_plugins(project_name, host_name)
def uninstall_host():
"""Undo all of what `install()` did"""
host = registered_host()

View file

@ -4,21 +4,41 @@ from .constants import (
PRE_CREATE_THUMBNAIL_KEY,
DEFAULT_VARIANT_VALUE,
)
from .exceptions import (
UnavailableSharedData,
ImmutableKeyError,
HostMissRequiredMethod,
ConvertorsOperationFailed,
ConvertorsFindFailed,
ConvertorsConversionFailed,
CreatorError,
CreatorsCreateFailed,
CreatorsCollectionFailed,
CreatorsSaveFailed,
CreatorsRemoveFailed,
CreatorsOperationFailed,
TaskNotSetError,
TemplateFillError,
)
from .structures import (
CreatedInstance,
ConvertorItem,
AttributeValues,
CreatorAttributeValues,
PublishAttributeValues,
PublishAttributes,
)
from .utils import (
get_last_versions_for_instances,
get_next_versions_for_instances,
)
from .product_name import (
TaskNotSetError,
get_product_name,
get_product_name_template,
)
from .creator_plugins import (
CreatorError,
BaseCreator,
Creator,
AutoCreator,
@ -36,10 +56,7 @@ from .creator_plugins import (
cache_and_get_instances,
)
from .context import (
CreatedInstance,
CreateContext
)
from .context import CreateContext
from .legacy_create import (
LegacyCreator,
@ -53,10 +70,31 @@ __all__ = (
"PRE_CREATE_THUMBNAIL_KEY",
"DEFAULT_VARIANT_VALUE",
"UnavailableSharedData",
"ImmutableKeyError",
"HostMissRequiredMethod",
"ConvertorsOperationFailed",
"ConvertorsFindFailed",
"ConvertorsConversionFailed",
"CreatorError",
"CreatorsCreateFailed",
"CreatorsCollectionFailed",
"CreatorsSaveFailed",
"CreatorsRemoveFailed",
"CreatorsOperationFailed",
"TaskNotSetError",
"TemplateFillError",
"CreatedInstance",
"ConvertorItem",
"AttributeValues",
"CreatorAttributeValues",
"PublishAttributeValues",
"PublishAttributes",
"get_last_versions_for_instances",
"get_next_versions_for_instances",
"TaskNotSetError",
"get_product_name",
"get_product_name_template",
@ -78,7 +116,6 @@ __all__ = (
"cache_and_get_instances",
"CreatedInstance",
"CreateContext",
"LegacyCreator",

View file

@ -0,0 +1,313 @@
import copy
_EMPTY_VALUE = object()
class TrackChangesItem:
"""Helper object to track changes in data.
Has access to full old and new data and will create deep copy of them,
so it is not needed to create copy before passed in.
Can work as a dictionary if old or new value is a dictionary. In
that case received object is another object of 'TrackChangesItem'.
Goal is to be able to get old or new value as was or only changed values
or get information about removed/changed keys, and all of that on
any "dictionary level".
```
# Example of possible usages
>>> old_value = {
... "key_1": "value_1",
... "key_2": {
... "key_sub_1": 1,
... "key_sub_2": {
... "enabled": True
... }
... },
... "key_3": "value_2"
... }
>>> new_value = {
... "key_1": "value_1",
... "key_2": {
... "key_sub_2": {
... "enabled": False
... },
... "key_sub_3": 3
... },
... "key_3": "value_3"
... }
>>> changes = TrackChangesItem(old_value, new_value)
>>> changes.changed
True
>>> changes["key_2"]["key_sub_1"].new_value is None
True
>>> list(sorted(changes.changed_keys))
['key_2', 'key_3']
>>> changes["key_2"]["key_sub_2"]["enabled"].changed
True
>>> changes["key_2"].removed_keys
{'key_sub_1'}
>>> list(sorted(changes["key_2"].available_keys))
['key_sub_1', 'key_sub_2', 'key_sub_3']
>>> changes.new_value == new_value
True
# Get only changed values
only_changed_new_values = {
key: changes[key].new_value
for key in changes.changed_keys
}
```
Args:
old_value (Any): Old value.
new_value (Any): New value.
"""
def __init__(self, old_value, new_value):
self._changed = old_value != new_value
# Resolve if value is '_EMPTY_VALUE' after comparison of the values
if old_value is _EMPTY_VALUE:
old_value = None
if new_value is _EMPTY_VALUE:
new_value = None
self._old_value = copy.deepcopy(old_value)
self._new_value = copy.deepcopy(new_value)
self._old_is_dict = isinstance(old_value, dict)
self._new_is_dict = isinstance(new_value, dict)
self._old_keys = None
self._new_keys = None
self._available_keys = None
self._removed_keys = None
self._changed_keys = None
self._sub_items = None
def __getitem__(self, key):
"""Getter looks into subitems if object is dictionary."""
if self._sub_items is None:
self._prepare_sub_items()
return self._sub_items[key]
def __bool__(self):
"""Boolean of object is if old and new value are the same."""
return self._changed
def get(self, key, default=None):
"""Try to get sub item."""
if self._sub_items is None:
self._prepare_sub_items()
return self._sub_items.get(key, default)
@property
def old_value(self):
"""Get copy of old value.
Returns:
Any: Whatever old value was.
"""
return copy.deepcopy(self._old_value)
@property
def new_value(self):
"""Get copy of new value.
Returns:
Any: Whatever new value was.
"""
return copy.deepcopy(self._new_value)
@property
def changed(self):
"""Value changed.
Returns:
bool: If data changed.
"""
return self._changed
@property
def is_dict(self):
"""Object can be used as dictionary.
Returns:
bool: When can be used that way.
"""
return self._old_is_dict or self._new_is_dict
@property
def changes(self):
"""Get changes in raw data.
This method should be used only if 'is_dict' value is 'True'.
Returns:
Dict[str, Tuple[Any, Any]]: Changes are by key in tuple
(<old value>, <new value>). If 'is_dict' is 'False' then
output is always empty dictionary.
"""
output = {}
if not self.is_dict:
return output
old_value = self.old_value
new_value = self.new_value
for key in self.changed_keys:
_old = None
_new = None
if self._old_is_dict:
_old = old_value.get(key)
if self._new_is_dict:
_new = new_value.get(key)
output[key] = (_old, _new)
return output
# Methods/properties that can be used when 'is_dict' is 'True'
@property
def old_keys(self):
"""Keys from old value.
Empty set is returned if old value is not a dict.
Returns:
Set[str]: Keys from old value.
"""
if self._old_keys is None:
self._prepare_keys()
return set(self._old_keys)
@property
def new_keys(self):
"""Keys from new value.
Empty set is returned if old value is not a dict.
Returns:
Set[str]: Keys from new value.
"""
if self._new_keys is None:
self._prepare_keys()
return set(self._new_keys)
@property
def changed_keys(self):
"""Keys that has changed from old to new value.
Empty set is returned if both old and new value are not a dict.
Returns:
Set[str]: Keys of changed keys.
"""
if self._changed_keys is None:
self._prepare_sub_items()
return set(self._changed_keys)
@property
def available_keys(self):
"""All keys that are available in old and new value.
Empty set is returned if both old and new value are not a dict.
Output is Union of 'old_keys' and 'new_keys'.
Returns:
Set[str]: All keys from old and new value.
"""
if self._available_keys is None:
self._prepare_keys()
return set(self._available_keys)
@property
def removed_keys(self):
"""Key that are not available in new value but were in old value.
Returns:
Set[str]: All removed keys.
"""
if self._removed_keys is None:
self._prepare_sub_items()
return set(self._removed_keys)
def _prepare_keys(self):
old_keys = set()
new_keys = set()
if self._old_is_dict and self._new_is_dict:
old_keys = set(self._old_value.keys())
new_keys = set(self._new_value.keys())
elif self._old_is_dict:
old_keys = set(self._old_value.keys())
elif self._new_is_dict:
new_keys = set(self._new_value.keys())
self._old_keys = old_keys
self._new_keys = new_keys
self._available_keys = old_keys | new_keys
self._removed_keys = old_keys - new_keys
def _prepare_sub_items(self):
sub_items = {}
changed_keys = set()
old_keys = self.old_keys
new_keys = self.new_keys
new_value = self.new_value
old_value = self.old_value
if self._old_is_dict and self._new_is_dict:
for key in self.available_keys:
item = TrackChangesItem(
old_value.get(key), new_value.get(key)
)
sub_items[key] = item
if item.changed or key not in old_keys or key not in new_keys:
changed_keys.add(key)
elif self._old_is_dict:
old_keys = set(old_value.keys())
available_keys = set(old_keys)
changed_keys = set(available_keys)
for key in available_keys:
# NOTE Use '_EMPTY_VALUE' because old value could be 'None'
# which would result in "unchanged" item
sub_items[key] = TrackChangesItem(
old_value.get(key), _EMPTY_VALUE
)
elif self._new_is_dict:
new_keys = set(new_value.keys())
available_keys = set(new_keys)
changed_keys = set(available_keys)
for key in available_keys:
# NOTE Use '_EMPTY_VALUE' because new value could be 'None'
# which would result in "unchanged" item
sub_items[key] = TrackChangesItem(
_EMPTY_VALUE, new_value.get(key)
)
self._sub_items = sub_items
self._changed_keys = changed_keys

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
import copy
import collections
from typing import TYPE_CHECKING, Optional
from typing import TYPE_CHECKING, Optional, Dict, Any
from abc import ABC, abstractmethod
@ -19,21 +19,12 @@ from .constants import DEFAULT_VARIANT_VALUE
from .product_name import get_product_name
from .utils import get_next_versions_for_instances
from .legacy_create import LegacyCreator
from .structures import CreatedInstance
if TYPE_CHECKING:
from ayon_core.lib import AbstractAttrDef
# Avoid cyclic imports
from .context import CreateContext, CreatedInstance, UpdateData # noqa: F401
class CreatorError(Exception):
"""Should be raised when creator failed because of known issue.
Message of error should be user readable.
"""
def __init__(self, message):
super(CreatorError, self).__init__(message)
from .context import CreateContext, UpdateData # noqa: F401
class ProductConvertorPlugin(ABC):
@ -214,6 +205,7 @@ class BaseCreator(ABC):
self.headless = headless
self.apply_settings(project_settings)
self.register_callbacks()
@staticmethod
def _get_settings_values(project_settings, category_name, plugin_name):
@ -299,6 +291,14 @@ class BaseCreator(ABC):
))
setattr(self, key, value)
def register_callbacks(self):
"""Register callbacks for creator.
Default implementation does nothing. It can be overridden to register
callbacks for creator.
"""
pass
@property
def identifier(self):
"""Identifier of creator (must be unique).
@ -372,6 +372,35 @@ class BaseCreator(ABC):
self._log = Logger.get_logger(self.__class__.__name__)
return self._log
def _create_instance(
self,
product_name: str,
data: Dict[str, Any],
product_type: Optional[str] = None
) -> CreatedInstance:
"""Create instance and add instance to context.
Args:
product_name (str): Product name.
data (Dict[str, Any]): Instance data.
product_type (Optional[str]): Product type, object attribute
'product_type' is used if not passed.
Returns:
CreatedInstance: Created instance.
"""
if product_type is None:
product_type = self.product_type
instance = CreatedInstance(
product_type,
product_name,
data,
creator=self,
)
self._add_instance_to_context(instance)
return instance
def _add_instance_to_context(self, instance):
"""Helper method to add instance to create context.
@ -561,6 +590,16 @@ class BaseCreator(ABC):
return self.instance_attr_defs
def get_attr_defs_for_instance(self, instance):
"""Get attribute definitions for an instance.
Args:
instance (CreatedInstance): Instance for which to get
attribute definitions.
"""
return self.get_instance_attr_defs()
@property
def collection_shared_data(self):
"""Access to shared data that can be used during creator's collection.
@ -654,7 +693,7 @@ class Creator(BaseCreator):
cls._get_default_variant_wrap,
cls._set_default_variant_wrap
)
super(Creator, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
@property
def show_order(self):
@ -792,6 +831,16 @@ class Creator(BaseCreator):
"""
return self.pre_create_attr_defs
def _pre_create_attr_defs_changed(self):
"""Called when pre-create attribute definitions change.
Create plugin can call this method when knows that
'get_pre_create_attr_defs' should be called again.
"""
self.create_context.create_plugin_pre_create_attr_defs_changed(
self.identifier
)
class HiddenCreator(BaseCreator):
@abstractmethod

View file

@ -0,0 +1,127 @@
import os
import inspect
class UnavailableSharedData(Exception):
"""Shared data are not available at the moment when are accessed."""
pass
class ImmutableKeyError(TypeError):
"""Accessed key is immutable so does not allow changes or removals."""
def __init__(self, key, msg=None):
self.immutable_key = key
if not msg:
msg = "Key \"{}\" is immutable and does not allow changes.".format(
key
)
super().__init__(msg)
class HostMissRequiredMethod(Exception):
"""Host does not have implemented required functions for creation."""
def __init__(self, host, missing_methods):
self.missing_methods = missing_methods
self.host = host
joined_methods = ", ".join(
['"{}"'.format(name) for name in missing_methods]
)
dirpath = os.path.dirname(
os.path.normpath(inspect.getsourcefile(host))
)
dirpath_parts = dirpath.split(os.path.sep)
host_name = dirpath_parts.pop(-1)
if host_name == "api":
host_name = dirpath_parts.pop(-1)
msg = "Host \"{}\" does not have implemented method/s {}".format(
host_name, joined_methods
)
super().__init__(msg)
class ConvertorsOperationFailed(Exception):
def __init__(self, msg, failed_info):
super().__init__(msg)
self.failed_info = failed_info
class ConvertorsFindFailed(ConvertorsOperationFailed):
def __init__(self, failed_info):
msg = "Failed to find incompatible products"
super().__init__(msg, failed_info)
class ConvertorsConversionFailed(ConvertorsOperationFailed):
def __init__(self, failed_info):
msg = "Failed to convert incompatible products"
super().__init__(msg, failed_info)
class CreatorError(Exception):
"""Should be raised when creator failed because of known issue.
Message of error should be artist friendly.
"""
pass
class CreatorsOperationFailed(Exception):
"""Raised when a creator process crashes in 'CreateContext'.
The exception contains information about the creator and error. The data
are prepared using 'prepare_failed_creator_operation_info' and can be
serialized using json.
Usage is for UI purposes which may not have access to exceptions directly
and would not have ability to catch exceptions 'per creator'.
Args:
msg (str): General error message.
failed_info (list[dict[str, Any]]): List of failed creators with
exception message and optionally formatted traceback.
"""
def __init__(self, msg, failed_info):
super().__init__(msg)
self.failed_info = failed_info
class CreatorsCollectionFailed(CreatorsOperationFailed):
def __init__(self, failed_info):
msg = "Failed to collect instances"
super().__init__(msg, failed_info)
class CreatorsSaveFailed(CreatorsOperationFailed):
def __init__(self, failed_info):
msg = "Failed update instance changes"
super().__init__(msg, failed_info)
class CreatorsRemoveFailed(CreatorsOperationFailed):
def __init__(self, failed_info):
msg = "Failed to remove instances"
super().__init__(msg, failed_info)
class CreatorsCreateFailed(CreatorsOperationFailed):
def __init__(self, failed_info):
msg = "Failed to create instances"
super().__init__(msg, failed_info)
class TaskNotSetError(KeyError):
def __init__(self, msg=None):
if not msg:
msg = "Creator's product name template requires task name."
super().__init__(msg)
class TemplateFillError(Exception):
def __init__(self, msg=None):
if not msg:
msg = "Creator's product name template is missing key value."
super().__init__(msg)

View file

@ -14,7 +14,7 @@ from ayon_core.pipeline.constants import AVALON_INSTANCE_ID
from .product_name import get_product_name
class LegacyCreator(object):
class LegacyCreator:
"""Determine how assets are created"""
label = None
product_type = None

View file

@ -1,23 +1,9 @@
import ayon_api
from ayon_core.lib import StringTemplate, filter_profiles, prepare_template_data
from ayon_core.settings import get_project_settings
from ayon_core.lib import filter_profiles, prepare_template_data
from .constants import DEFAULT_PRODUCT_TEMPLATE
class TaskNotSetError(KeyError):
def __init__(self, msg=None):
if not msg:
msg = "Creator's product name template requires task name."
super(TaskNotSetError, self).__init__(msg)
class TemplateFillError(Exception):
def __init__(self, msg=None):
if not msg:
msg = "Creator's product name template is missing key value."
super(TemplateFillError, self).__init__(msg)
from .exceptions import TaskNotSetError, TemplateFillError
def get_product_name_template(
@ -183,7 +169,10 @@ def get_product_name(
fill_pairs[key] = value
try:
return template.format(**prepare_template_data(fill_pairs))
return StringTemplate.format_strict_template(
template=template,
data=prepare_template_data(fill_pairs)
)
except KeyError as exp:
raise TemplateFillError(
"Value for {} key is missing in template '{}'."

View file

@ -0,0 +1,872 @@
import copy
import collections
from uuid import uuid4
from typing import Optional, Dict, List, Any
from ayon_core.lib.attribute_definitions import (
AbstractAttrDef,
UnknownDef,
serialize_attr_defs,
deserialize_attr_defs,
)
from ayon_core.pipeline import (
AYON_INSTANCE_ID,
AVALON_INSTANCE_ID,
)
from .exceptions import ImmutableKeyError
from .changes import TrackChangesItem
class ConvertorItem:
"""Item representing convertor plugin.
Args:
identifier (str): Identifier of convertor.
label (str): Label which will be shown in UI.
"""
def __init__(self, identifier, label):
self._id = str(uuid4())
self.identifier = identifier
self.label = label
@property
def id(self):
return self._id
def to_data(self):
return {
"id": self.id,
"identifier": self.identifier,
"label": self.label
}
@classmethod
def from_data(cls, data):
obj = cls(data["identifier"], data["label"])
obj._id = data["id"]
return obj
class InstanceMember:
"""Representation of instance member.
TODO:
Implement and use!
"""
def __init__(self, instance, name):
self.instance = instance
instance.add_members(self)
self.name = name
self._actions = []
def add_action(self, label, callback):
self._actions.append({
"label": label,
"callback": callback
})
class AttributeValues:
"""Container which keep values of Attribute definitions.
Goal is to have one object which hold values of attribute definitions for
single instance.
Has dictionary like methods. Not all of them are allowed all the time.
Args:
parent (Union[CreatedInstance, PublishAttributes]): Parent object.
key (str): Key of attribute values.
attr_defs (List[AbstractAttrDef]): Definitions of value type
and properties.
values (dict): Values after possible conversion.
origin_data (dict): Values loaded from host before conversion.
"""
def __init__(self, parent, key, attr_defs, values, origin_data=None):
self._parent = parent
self._key = key
if origin_data is None:
origin_data = copy.deepcopy(values)
self._origin_data = origin_data
attr_defs_by_key = {
attr_def.key: attr_def
for attr_def in attr_defs
if attr_def.is_value_def
}
for key, value in values.items():
if key not in attr_defs_by_key:
new_def = UnknownDef(key, label=key, default=value)
attr_defs.append(new_def)
attr_defs_by_key[key] = new_def
self._attr_defs = attr_defs
self._attr_defs_by_key = attr_defs_by_key
self._data = {}
for attr_def in attr_defs:
value = values.get(attr_def.key)
if value is None:
continue
converted_value = attr_def.convert_value(value)
if converted_value == value:
self._data[attr_def.key] = value
def __setitem__(self, key, value):
if key not in self._attr_defs_by_key:
raise KeyError("Key \"{}\" was not found.".format(key))
self.update({key: value})
def __getitem__(self, key):
if key not in self._attr_defs_by_key:
return self._data[key]
return self._data.get(key, self._attr_defs_by_key[key].default)
def __contains__(self, key):
return key in self._attr_defs_by_key
def __iter__(self):
for key in self._attr_defs_by_key:
yield key
def get(self, key, default=None):
if key in self._attr_defs_by_key:
return self[key]
return default
def keys(self):
return self._attr_defs_by_key.keys()
def values(self):
for key in self._attr_defs_by_key.keys():
yield self._data.get(key)
def items(self):
for key in self._attr_defs_by_key.keys():
yield key, self._data.get(key)
def get_attr_def(self, key, default=None):
return self._attr_defs_by_key.get(key, default)
def update(self, value):
changes = {}
for _key, _value in dict(value).items():
if _key in self._data and self._data.get(_key) == _value:
continue
self._data[_key] = _value
changes[_key] = _value
if changes:
self._parent.attribute_value_changed(self._key, changes)
def pop(self, key, default=None):
has_key = key in self._data
value = self._data.pop(key, default)
# Remove attribute definition if is 'UnknownDef'
# - gives option to get rid of unknown values
attr_def = self._attr_defs_by_key.get(key)
if isinstance(attr_def, UnknownDef):
self._attr_defs_by_key.pop(key)
self._attr_defs.remove(attr_def)
elif has_key:
self._parent.attribute_value_changed(self._key, {key: None})
return value
def reset_values(self):
self._data = {}
def mark_as_stored(self):
self._origin_data = copy.deepcopy(self._data)
@property
def attr_defs(self):
"""Pointer to attribute definitions.
Returns:
List[AbstractAttrDef]: Attribute definitions.
"""
return list(self._attr_defs)
@property
def origin_data(self):
return copy.deepcopy(self._origin_data)
def data_to_store(self):
"""Create new dictionary with data to store.
Returns:
Dict[str, Any]: Attribute values that should be stored.
"""
output = {}
for key in self._data:
output[key] = self[key]
for key, attr_def in self._attr_defs_by_key.items():
if key not in output:
output[key] = attr_def.default
return output
def get_serialized_attr_defs(self):
"""Serialize attribute definitions to json serializable types.
Returns:
List[Dict[str, Any]]: Serialized attribute definitions.
"""
return serialize_attr_defs(self._attr_defs)
class CreatorAttributeValues(AttributeValues):
"""Creator specific attribute values of an instance."""
@property
def instance(self):
return self._parent
class PublishAttributeValues(AttributeValues):
"""Publish plugin specific attribute values.
Values are for single plugin which can be on `CreatedInstance`
or context values stored on `CreateContext`.
"""
@property
def publish_attributes(self):
return self._parent
class PublishAttributes:
"""Wrapper for publish plugin attribute definitions.
Cares about handling attribute definitions of multiple publish plugins.
Keep information about attribute definitions and their values.
Args:
parent(CreatedInstance, CreateContext): Parent for which will be
data stored and from which are data loaded.
origin_data(dict): Loaded data by plugin class name.
"""
def __init__(self, parent, origin_data):
self._parent = parent
self._origin_data = copy.deepcopy(origin_data)
self._data = copy.deepcopy(origin_data)
def __getitem__(self, key):
return self._data[key]
def __contains__(self, key):
return key in self._data
def keys(self):
return self._data.keys()
def values(self):
return self._data.values()
def items(self):
return self._data.items()
def get(self, key, default=None):
return self._data.get(key, default)
def pop(self, key, default=None):
"""Remove or reset value for plugin.
Plugin values are reset to defaults if plugin is available but
data of plugin which was not found are removed.
Args:
key(str): Plugin name.
default: Default value if plugin was not found.
"""
if key not in self._data:
return default
value = self._data[key]
if not isinstance(value, AttributeValues):
self.attribute_value_changed(key, None)
return self._data.pop(key)
value_item = self._data[key]
# Prepare value to return
output = value_item.data_to_store()
# Reset values
value_item.reset_values()
self.attribute_value_changed(
key, value_item.data_to_store()
)
return output
def mark_as_stored(self):
self._origin_data = copy.deepcopy(self.data_to_store())
def data_to_store(self):
"""Convert attribute values to "data to store"."""
output = {}
for key, attr_value in self._data.items():
if isinstance(attr_value, AttributeValues):
output[key] = attr_value.data_to_store()
else:
output[key] = attr_value
return output
@property
def origin_data(self):
return copy.deepcopy(self._origin_data)
def attribute_value_changed(self, key, changes):
self._parent.publish_attribute_value_changed(key, changes)
def set_publish_plugin_attr_defs(
self,
plugin_name: str,
attr_defs: List[AbstractAttrDef],
value: Optional[Dict[str, Any]] = None
):
"""Set attribute definitions for plugin.
Args:
plugin_name (str): Name of plugin.
attr_defs (List[AbstractAttrDef]): Attribute definitions.
value (Optional[Dict[str, Any]]): Attribute values.
"""
# TODO what if 'attr_defs' is 'None'?
if value is None:
value = self._data.get(plugin_name)
if value is None:
value = {}
self._data[plugin_name] = PublishAttributeValues(
self, plugin_name, attr_defs, value, value
)
def serialize_attributes(self):
return {
"attr_defs": {
plugin_name: attrs_value.get_serialized_attr_defs()
for plugin_name, attrs_value in self._data.items()
},
}
def deserialize_attributes(self, data):
attr_defs = deserialize_attr_defs(data["attr_defs"])
origin_data = self._origin_data
data = self._data
self._data = {}
added_keys = set()
for plugin_name, attr_defs_data in attr_defs.items():
attr_defs = deserialize_attr_defs(attr_defs_data)
value = data.get(plugin_name) or {}
orig_value = copy.deepcopy(origin_data.get(plugin_name) or {})
self._data[plugin_name] = PublishAttributeValues(
self, plugin_name, attr_defs, value, orig_value
)
for key, value in data.items():
if key not in added_keys:
self._data[key] = value
class InstanceContextInfo:
def __init__(
self,
folder_path: Optional[str],
task_name: Optional[str],
folder_is_valid: bool,
task_is_valid: bool,
):
self.folder_path: Optional[str] = folder_path
self.task_name: Optional[str] = task_name
self.folder_is_valid: bool = folder_is_valid
self.task_is_valid: bool = task_is_valid
@property
def is_valid(self) -> bool:
return self.folder_is_valid and self.task_is_valid
class CreatedInstance:
"""Instance entity with data that will be stored to workfile.
I think `data` must be required argument containing all minimum information
about instance like "folderPath" and "task" and all data used for filling
product name as creators may have custom data for product name filling.
Notes:
Object have 2 possible initialization. One using 'creator' object which
is recommended for api usage. Second by passing information about
creator.
Args:
product_type (str): Product type that will be created.
product_name (str): Name of product that will be created.
data (Dict[str, Any]): Data used for filling product name or override
data from already existing instance.
creator (BaseCreator): Creator responsible for instance.
"""
# Keys that can't be changed or removed from data after loading using
# creator.
# - 'creator_attributes' and 'publish_attributes' can change values of
# their individual children but not on their own
__immutable_keys = (
"id",
"instance_id",
"productType",
"creator_identifier",
"creator_attributes",
"publish_attributes"
)
# Keys that can be changed, but should not be removed from instance
__required_keys = {
"folderPath": None,
"task": None,
"productName": None,
"active": True,
}
def __init__(
self,
product_type,
product_name,
data,
creator,
):
self._creator = creator
creator_identifier = creator.identifier
group_label = creator.get_group_label()
creator_label = creator.label
self._creator_label = creator_label
self._group_label = group_label or creator_identifier
# Instance members may have actions on them
# TODO implement members logic
self._members = []
# Data that can be used for lifetime of object
self._transient_data = {}
# Create a copy of passed data to avoid changing them on the fly
data = copy.deepcopy(data or {})
# Pop dictionary values that will be converted to objects to be able
# catch changes
orig_creator_attributes = data.pop("creator_attributes", None) or {}
orig_publish_attributes = data.pop("publish_attributes", None) or {}
# Store original value of passed data
self._orig_data = copy.deepcopy(data)
# Pop 'productType' and 'productName' to prevent unexpected changes
data.pop("productType", None)
data.pop("productName", None)
# Backwards compatibility with OpenPype instances
data.pop("family", None)
data.pop("subset", None)
asset_name = data.pop("asset", None)
if "folderPath" not in data:
data["folderPath"] = asset_name
# QUESTION Does it make sense to have data stored as ordered dict?
self._data = collections.OrderedDict()
# QUESTION Do we need this "id" information on instance?
item_id = data.get("id")
# TODO use only 'AYON_INSTANCE_ID' when all hosts support it
if item_id not in {AYON_INSTANCE_ID, AVALON_INSTANCE_ID}:
item_id = AVALON_INSTANCE_ID
self._data["id"] = item_id
self._data["productType"] = product_type
self._data["productName"] = product_name
self._data["active"] = data.get("active", True)
self._data["creator_identifier"] = creator_identifier
# Pop from source data all keys that are defined in `_data` before
# this moment and through their values away
# - they should be the same and if are not then should not change
# already set values
for key in self._data.keys():
if key in data:
data.pop(key)
self._data["variant"] = self._data.get("variant") or ""
# Stored creator specific attribute values
# {key: value}
creator_values = copy.deepcopy(orig_creator_attributes)
self._data["creator_attributes"] = creator_values
# Stored publish specific attribute values
# {<plugin name>: {key: value}}
self._data["publish_attributes"] = PublishAttributes(
self, orig_publish_attributes
)
if data:
self._data.update(data)
for key, default in self.__required_keys.items():
self._data.setdefault(key, default)
if not self._data.get("instance_id"):
self._data["instance_id"] = str(uuid4())
creator_attr_defs = creator.get_attr_defs_for_instance(self)
self.set_create_attr_defs(
creator_attr_defs, creator_values
)
def __str__(self):
return (
"<CreatedInstance {product[name]}"
" ({product[type]}[{creator_identifier}])> {data}"
).format(
creator_identifier=self.creator_identifier,
product={"name": self.product_name, "type": self.product_type},
data=str(self._data)
)
# --- Dictionary like methods ---
def __getitem__(self, key):
return self._data[key]
def __contains__(self, key):
return key in self._data
def __setitem__(self, key, value):
# Validate immutable keys
if key in self.__immutable_keys:
if value == self._data.get(key):
return
# Raise exception if key is immutable and value has changed
raise ImmutableKeyError(key)
if key in self._data and self._data[key] == value:
return
self._data[key] = value
self._create_context.instance_values_changed(
self.id, {key: value}
)
def get(self, key, default=None):
return self._data.get(key, default)
def pop(self, key, *args, **kwargs):
# Raise exception if is trying to pop key which is immutable
if key in self.__immutable_keys:
raise ImmutableKeyError(key)
has_key = key in self._data
output = self._data.pop(key, *args, **kwargs)
if has_key:
if key in self.__required_keys:
self._data[key] = self.__required_keys[key]
self._create_context.instance_values_changed(
self.id, {key: None}
)
return output
def keys(self):
return self._data.keys()
def values(self):
return self._data.values()
def items(self):
return self._data.items()
# ------
@property
def product_type(self):
return self._data["productType"]
@property
def product_name(self):
return self._data["productName"]
@property
def label(self):
label = self._data.get("label")
if not label:
label = self.product_name
return label
@property
def group_label(self):
label = self._data.get("group")
if label:
return label
return self._group_label
@property
def origin_data(self):
output = copy.deepcopy(self._orig_data)
output["creator_attributes"] = self.creator_attributes.origin_data
output["publish_attributes"] = self.publish_attributes.origin_data
return output
@property
def creator_identifier(self):
return self._data["creator_identifier"]
@property
def creator_label(self):
return self._creator.label or self.creator_identifier
@property
def id(self):
"""Instance identifier.
Returns:
str: UUID of instance.
"""
return self._data["instance_id"]
@property
def data(self):
"""Legacy access to data.
Access to data is needed to modify values.
Returns:
CreatedInstance: Object can be used as dictionary but with
validations of immutable keys.
"""
return self
@property
def transient_data(self):
"""Data stored for lifetime of instance object.
These data are not stored to scene and will be lost on object
deletion.
Can be used to store objects. In some host implementations is not
possible to reference to object in scene with some unique identifier
(e.g. node in Fusion.). In that case it is handy to store the object
here. Should be used that way only if instance data are stored on the
node itself.
Returns:
Dict[str, Any]: Dictionary object where you can store data related
to instance for lifetime of instance object.
"""
return self._transient_data
def changes(self):
"""Calculate and return changes."""
return TrackChangesItem(self.origin_data, self.data_to_store())
def mark_as_stored(self):
"""Should be called when instance data are stored.
Origin data are replaced by current data so changes are cleared.
"""
orig_keys = set(self._orig_data.keys())
for key, value in self._data.items():
orig_keys.discard(key)
if key in ("creator_attributes", "publish_attributes"):
continue
self._orig_data[key] = copy.deepcopy(value)
for key in orig_keys:
self._orig_data.pop(key)
self.creator_attributes.mark_as_stored()
self.publish_attributes.mark_as_stored()
@property
def creator_attributes(self):
return self._data["creator_attributes"]
@property
def creator_attribute_defs(self):
"""Attribute definitions defined by creator plugin.
Returns:
List[AbstractAttrDef]: Attribute definitions.
"""
return self.creator_attributes.attr_defs
@property
def publish_attributes(self):
return self._data["publish_attributes"]
@property
def has_promised_context(self) -> bool:
"""Get context data that are promised to be set by creator.
Returns:
bool: Has context that won't bo validated. Artist can't change
value when set to True.
"""
return self._transient_data.get("has_promised_context", False)
def data_to_store(self):
"""Collect data that contain json parsable types.
It is possible to recreate the instance using these data.
Todos:
We probably don't need OrderedDict. When data are loaded they
are not ordered anymore.
Returns:
OrderedDict: Ordered dictionary with instance data.
"""
output = collections.OrderedDict()
for key, value in self._data.items():
if key in ("creator_attributes", "publish_attributes"):
continue
output[key] = value
if isinstance(self.creator_attributes, AttributeValues):
creator_attributes = self.creator_attributes.data_to_store()
else:
creator_attributes = copy.deepcopy(self.creator_attributes)
output["creator_attributes"] = creator_attributes
output["publish_attributes"] = self.publish_attributes.data_to_store()
return output
def set_create_attr_defs(self, attr_defs, value=None):
"""Create plugin updates create attribute definitions.
Method called by create plugin when attribute definitions should
be changed.
Args:
attr_defs (List[AbstractAttrDef]): Attribute definitions.
value (Optional[Dict[str, Any]]): Values of attribute definitions.
Current values are used if not passed in.
"""
if value is None:
value = self._data["creator_attributes"]
if isinstance(value, AttributeValues):
value = value.data_to_store()
if isinstance(self._data["creator_attributes"], AttributeValues):
origin_data = self._data["creator_attributes"].origin_data
else:
origin_data = copy.deepcopy(self._data["creator_attributes"])
self._data["creator_attributes"] = CreatorAttributeValues(
self,
"creator_attributes",
attr_defs,
value,
origin_data
)
self._create_context.instance_create_attr_defs_changed(self.id)
@classmethod
def from_existing(cls, instance_data, creator):
"""Convert instance data from workfile to CreatedInstance.
Args:
instance_data (Dict[str, Any]): Data in a structure ready for
'CreatedInstance' object.
creator (BaseCreator): Creator plugin which is creating the
instance of for which the instance belong.
"""
instance_data = copy.deepcopy(instance_data)
product_type = instance_data.get("productType")
if product_type is None:
product_type = instance_data.get("family")
if product_type is None:
product_type = creator.product_type
product_name = instance_data.get("productName")
if product_name is None:
product_name = instance_data.get("subset")
return cls(
product_type, product_name, instance_data, creator
)
def attribute_value_changed(self, key, changes):
"""A value changed.
Args:
key (str): Key of attribute values.
changes (Dict[str, Any]): Changes in values.
"""
self._create_context.instance_values_changed(self.id, {key: changes})
def set_publish_plugin_attr_defs(self, plugin_name, attr_defs):
"""Set attribute definitions for publish plugin.
Args:
plugin_name(str): Name of publish plugin.
attr_defs(List[AbstractAttrDef]): Attribute definitions.
"""
self.publish_attributes.set_publish_plugin_attr_defs(
plugin_name, attr_defs
)
self._create_context.instance_publish_attr_defs_changed(
self.id, plugin_name
)
def publish_attribute_value_changed(self, plugin_name, value):
"""Method called from PublishAttributes.
Args:
plugin_name (str): Plugin name.
value (Dict[str, Any]): Changes in values for the plugin.
"""
self._create_context.instance_values_changed(
self.id,
{
"publish_attributes": {
plugin_name: value,
},
},
)
def add_members(self, members):
"""Currently unused method."""
for member in members:
if member not in self._members:
self._members.append(member)
@property
def _create_context(self):
"""Get create context.
Returns:
CreateContext: Context object which wraps object.
"""
return self._creator.create_context

View file

@ -3,11 +3,20 @@ import os
import copy
import shutil
import glob
import clique
import collections
from typing import Dict, Any, Iterable
import clique
import ayon_api
from ayon_core.lib import create_hard_link
from .template_data import (
get_general_template_data,
get_folder_template_data,
get_task_template_data,
)
def _copy_file(src_path, dst_path):
"""Hardlink file if possible(to save space), copy if not.
@ -327,3 +336,82 @@ def deliver_sequence(
uploaded += 1
return report_items, uploaded
def _merge_data(data, new_data):
queue = collections.deque()
queue.append((data, new_data))
while queue:
q_data, q_new_data = queue.popleft()
for key, value in q_new_data.items():
if key in q_data and isinstance(value, dict):
queue.append((q_data[key], value))
continue
q_data[key] = value
def get_representations_delivery_template_data(
project_name: str,
representation_ids: Iterable[str],
) -> Dict[str, Dict[str, Any]]:
representation_ids = set(representation_ids)
output = {
repre_id: {}
for repre_id in representation_ids
}
if not representation_ids:
return output
project_entity = ayon_api.get_project(project_name)
general_template_data = get_general_template_data()
repres_hierarchy = ayon_api.get_representations_hierarchy(
project_name,
representation_ids,
project_fields=set(),
folder_fields={"path", "folderType"},
task_fields={"name", "taskType"},
product_fields={"name", "productType"},
version_fields={"version", "productId"},
representation_fields=None,
)
for repre_id, repre_hierarchy in repres_hierarchy.items():
repre_entity = repre_hierarchy.representation
if repre_entity is None:
continue
template_data = repre_entity["context"]
# Bug in 'ayon_api', 'get_representations_hierarchy' did not fully
# convert representation entity. Fixed in 'ayon_api' 1.0.10.
if isinstance(template_data, str):
con = ayon_api.get_server_api_connection()
repre_entity = con._representation_conversion(repre_entity)
template_data = repre_entity["context"]
template_data.update(copy.deepcopy(general_template_data))
template_data.update(get_folder_template_data(
repre_hierarchy.folder, project_name
))
if repre_hierarchy.task:
template_data.update(get_task_template_data(
project_entity, repre_hierarchy.task
))
product_entity = repre_hierarchy.product
version_entity = repre_hierarchy.version
template_data.update({
"product": {
"name": product_entity["name"],
"type": product_entity["productType"],
},
"version": version_entity["version"],
})
_merge_data(template_data, repre_entity["context"])
# Remove roots from template data to auto-fill them with anatomy data
template_data.pop("root", None)
output[repre_id] = template_data
return output

View file

@ -88,7 +88,7 @@ def trim_media_range(media_range, source_range):
"""
rw_media_start = _ot.RationalTime(
media_range.start_time.value + source_range.start_time.value,
source_range.start_time.value,
media_range.start_time.rate
)
rw_media_duration = _ot.RationalTime(
@ -173,11 +173,145 @@ def _sequence_resize(source, length):
yield (1 - ratio) * source[int(low)] + ratio * source[int(high)]
def is_clip_from_media_sequence(otio_clip):
"""
Args:
otio_clip (otio.schema.Clip): The OTIO clip to check.
Returns:
bool. Is the provided clip from an input media sequence ?
"""
media_ref = otio_clip.media_reference
metadata = media_ref.metadata
# OpenTimelineIO 0.13 and newer
is_input_sequence = (
hasattr(otio.schema, "ImageSequenceReference") and
isinstance(media_ref, otio.schema.ImageSequenceReference)
)
# OpenTimelineIO 0.12 and older
is_input_sequence_legacy = bool(metadata.get("padding"))
return is_input_sequence or is_input_sequence_legacy
def remap_range_on_file_sequence(otio_clip, in_out_range):
"""
Args:
otio_clip (otio.schema.Clip): The OTIO clip to check.
in_out_range (tuple[float, float]): The in-out range to remap.
Returns:
tuple(int, int): The remapped range as discrete frame number.
Raises:
ValueError. When the otio_clip or provided range is invalid.
"""
if not is_clip_from_media_sequence(otio_clip):
raise ValueError(f"Cannot map on non-file sequence clip {otio_clip}.")
try:
media_in_trimmed, media_out_trimmed = in_out_range
except ValueError as error:
raise ValueError("Invalid in_out_range provided.") from error
media_ref = otio_clip.media_reference
available_range = otio_clip.available_range()
source_range = otio_clip.source_range
available_range_rate = available_range.start_time.rate
media_in = available_range.start_time.value
# Temporary.
# Some AYON custom OTIO exporter were implemented with relative
# source range for image sequence. Following code maintain
# backward-compatibility by adjusting media_in
# while we are updating those.
if (
is_clip_from_media_sequence(otio_clip)
and otio_clip.available_range().start_time.to_frames() == media_ref.start_frame
and source_range.start_time.to_frames() < media_ref.start_frame
):
media_in = 0
frame_in = otio.opentime.RationalTime.from_frames(
media_in_trimmed - media_in + media_ref.start_frame,
rate=available_range_rate,
).to_frames()
frame_out = otio.opentime.RationalTime.from_frames(
media_out_trimmed - media_in + media_ref.start_frame,
rate=available_range_rate,
).to_frames()
return frame_in, frame_out
def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
source_range = otio_clip.source_range
available_range = otio_clip.available_range()
media_in = available_range.start_time.value
media_out = available_range.end_time_inclusive().value
available_range_rate = available_range.start_time.rate
# If media source is an image sequence, returned
# mediaIn/mediaOut have to correspond
# to frame numbers from source sequence.
media_ref = otio_clip.media_reference
is_input_sequence = is_clip_from_media_sequence(otio_clip)
# Temporary.
# Some AYON custom OTIO exporter were implemented with relative
# source range for image sequence. Following code maintain
# backward-compatibility by adjusting available range
# while we are updating those.
if (
is_input_sequence
and available_range.start_time.to_frames() == media_ref.start_frame
and source_range.start_time.to_frames() < media_ref.start_frame
):
available_range = _ot.TimeRange(
_ot.RationalTime(0, rate=available_range_rate),
available_range.duration,
)
# Conform source range bounds to available range rate
# .e.g. embedded TC of (3600 sec/ 1h), duration 100 frames
#
# available |----------------------------------------| 24fps
# 86400 86500
#
#
# 90010 90060
# src |-----|______duration 2s___|----| 25fps
# 90000
#
#
# 86409.6 86466.8
# conformed |-------|_____duration _2.38s____|-------| 24fps
# 86400
#
# Note that 24fps is slower than 25fps hence extended duration
# to preserve media range
# Compute new source range based on available rate.
# Backward-compatibility for Hiero OTIO exporter.
# NTSC compatibility might introduce floating rates, when these are
# not exactly the same (23.976 vs 23.976024627685547)
# this will cause precision issue in computation.
# Currently round to 2 decimals for comparison,
# but this should always rescale after that.
rounded_av_rate = round(available_range_rate, 2)
rounded_src_rate = round(source_range.start_time.rate, 2)
if rounded_av_rate != rounded_src_rate:
conformed_src_in = source_range.start_time.rescaled_to(available_range_rate)
conformed_src_duration = source_range.duration.rescaled_to(available_range_rate)
conformed_source_range = otio.opentime.TimeRange(
start_time=conformed_src_in,
duration=conformed_src_duration
)
else:
conformed_source_range = source_range
# modifiers
time_scalar = 1.
@ -224,38 +358,51 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
offset_in *= time_scalar
offset_out *= time_scalar
# filip offset if reversed speed
if time_scalar < 0:
_offset_in = offset_out
_offset_out = offset_in
offset_in = _offset_in
offset_out = _offset_out
# scale handles
handle_start *= abs(time_scalar)
handle_end *= abs(time_scalar)
# filip handles if reversed speed
# flip offset and handles if reversed speed
if time_scalar < 0:
_handle_start = handle_end
_handle_end = handle_start
handle_start = _handle_start
handle_end = _handle_end
offset_in, offset_out = offset_out, offset_in
handle_start, handle_end = handle_end, handle_start
source_in = source_range.start_time.value
# compute retimed range
media_in_trimmed = conformed_source_range.start_time.value + offset_in
media_out_trimmed = media_in_trimmed + (
(
conformed_source_range.duration.value
* abs(time_scalar)
+ offset_out
) - 1
)
media_in_trimmed = (
media_in + source_in + offset_in)
media_out_trimmed = (
media_in + source_in + (
((source_range.duration.value - 1) * abs(
time_scalar)) + offset_out))
media_in = available_range.start_time.value
media_out = available_range.end_time_inclusive().value
# calculate available handles
# If media source is an image sequence, returned
# mediaIn/mediaOut have to correspond
# to frame numbers from source sequence.
if is_input_sequence:
# preserve discrete frame numbers
media_in_trimmed, media_out_trimmed = remap_range_on_file_sequence(
otio_clip,
(media_in_trimmed, media_out_trimmed)
)
media_in = media_ref.start_frame
media_out = media_in + available_range.duration.to_frames() - 1
# adjust available handles if needed
if (media_in_trimmed - media_in) < handle_start:
handle_start = (media_in_trimmed - media_in)
handle_start = max(0, media_in_trimmed - media_in)
if (media_out - media_out_trimmed) < handle_end:
handle_end = (media_out - media_out_trimmed)
handle_end = max(0, media_out - media_out_trimmed)
# FFmpeg extraction ignores embedded timecode
# so substract to get a (mediaIn-mediaOut) range from 0.
if not is_input_sequence:
media_in_trimmed -= media_in
media_out_trimmed -= media_in
# create version data
version_data = {
@ -263,16 +410,16 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
"retime": True,
"speed": time_scalar,
"timewarps": time_warp_nodes,
"handleStart": int(round(handle_start)),
"handleEnd": int(round(handle_end))
"handleStart": int(handle_start),
"handleEnd": int(handle_end)
}
}
returning_dict = {
"mediaIn": media_in_trimmed,
"mediaOut": media_out_trimmed,
"handleStart": int(round(handle_start)),
"handleEnd": int(round(handle_end)),
"handleStart": int(handle_start),
"handleEnd": int(handle_end),
"speed": time_scalar
}

View file

@ -1,5 +1,5 @@
import os
import copy
import os
import re
import warnings
from copy import deepcopy
@ -7,14 +7,11 @@ from copy import deepcopy
import attr
import ayon_api
import clique
from ayon_core.pipeline import (
get_current_project_name,
get_representation_path,
)
from ayon_core.lib import Logger
from ayon_core.pipeline.publish import KnownPublishError
from ayon_core.lib import Logger, collect_frames
from ayon_core.pipeline import get_current_project_name, get_representation_path
from ayon_core.pipeline.create import get_product_name
from ayon_core.pipeline.farm.patterning import match_aov_pattern
from ayon_core.pipeline.publish import KnownPublishError
@attr.s
@ -250,6 +247,9 @@ def create_skeleton_instance(
"colorspace": data.get("colorspace")
}
if data.get("renderlayer"):
instance_skeleton_data["renderlayer"] = data["renderlayer"]
# skip locking version if we are creating v01
instance_version = data.get("version") # take this if exists
if instance_version != 1:
@ -295,11 +295,17 @@ def _add_review_families(families):
return families
def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
skip_integration_repre_list,
do_not_add_review,
context,
color_managed_plugin):
def prepare_representations(
skeleton_data,
exp_files,
anatomy,
aov_filter,
skip_integration_repre_list,
do_not_add_review,
context,
color_managed_plugin,
frames_to_render=None
):
"""Create representations for file sequences.
This will return representations of expected files if they are not
@ -315,6 +321,8 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
skip_integration_repre_list (list): exclude specific extensions,
do_not_add_review (bool): explicitly skip review
color_managed_plugin (publish.ColormanagedPyblishPluginMixin)
frames_to_render (str): implicit or explicit range of frames to render
this value is sent to Deadline in JobInfo.Frames
Returns:
list of representations
@ -325,6 +333,14 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
log = Logger.get_logger("farm_publishing")
if frames_to_render is not None:
frames_to_render = _get_real_frames_to_render(frames_to_render)
else:
# Backwards compatibility for older logic
frame_start = int(skeleton_data.get("frameStartHandle"))
frame_end = int(skeleton_data.get("frameEndHandle"))
frames_to_render = list(range(frame_start, frame_end + 1))
# create representation for every collected sequence
for collection in collections:
ext = collection.tail.lstrip(".")
@ -361,18 +377,21 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
" This may cause issues on farm."
).format(staging))
frame_start = int(skeleton_data.get("frameStartHandle"))
frame_start = frames_to_render[0]
frame_end = frames_to_render[-1]
if skeleton_data.get("slate"):
frame_start -= 1
files = _get_real_files_to_rendered(collection, frames_to_render)
# explicitly disable review by user
preview = preview and not do_not_add_review
rep = {
"name": ext,
"ext": ext,
"files": [os.path.basename(f) for f in list(collection)],
"files": files,
"frameStart": frame_start,
"frameEnd": int(skeleton_data.get("frameEndHandle")),
"frameEnd": frame_end,
# If expectedFile are absolute, we need only filenames
"stagingDir": staging,
"fps": skeleton_data.get("fps"),
@ -413,10 +432,13 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
" This may cause issues on farm."
).format(staging))
files = _get_real_files_to_rendered(
[os.path.basename(remainder)], frames_to_render)
rep = {
"name": ext,
"ext": ext,
"files": os.path.basename(remainder),
"files": files[0],
"stagingDir": staging,
}
@ -453,6 +475,53 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
return representations
def _get_real_frames_to_render(frames):
"""Returns list of frames that should be rendered.
Artists could want to selectively render only particular frames
"""
frames_to_render = []
for frame in frames.split(","):
if "-" in frame:
splitted = frame.split("-")
frames_to_render.extend(
range(int(splitted[0]), int(splitted[1])+1))
else:
frames_to_render.append(int(frame))
frames_to_render.sort()
return frames_to_render
def _get_real_files_to_rendered(collection, frames_to_render):
"""Use expected files based on real frames_to_render.
Artists might explicitly set frames they want to render via Publisher UI.
This uses this value to filter out files
Args:
frames_to_render (list): of str '1001'
"""
files = [os.path.basename(f) for f in list(collection)]
file_name, extracted_frame = list(collect_frames(files).items())[0]
if not extracted_frame:
return files
found_frame_pattern_length = len(extracted_frame)
normalized_frames_to_render = {
str(frame_to_render).zfill(found_frame_pattern_length)
for frame_to_render in frames_to_render
}
return [
file_name
for file_name in files
if any(
frame in file_name
for frame in normalized_frames_to_render
)
]
def create_instances_for_aov(instance, skeleton, aov_filter,
skip_integration_repre_list,
do_not_add_review):
@ -464,7 +533,9 @@ def create_instances_for_aov(instance, skeleton, aov_filter,
Args:
instance (pyblish.api.Instance): Original instance.
skeleton (dict): Skeleton instance data.
aov_filter (dict): AOV filter.
skip_integration_repre_list (list): skip
do_not_add_review (bool): Explicitly disable reviews
Returns:
list of pyblish.api.Instance: Instances created from
@ -515,6 +586,131 @@ def create_instances_for_aov(instance, skeleton, aov_filter,
)
def _get_legacy_product_name_and_group(
product_type,
source_product_name,
task_name,
dynamic_data):
"""Get product name with legacy logic.
This function holds legacy behaviour of creating product name
that is deprecated. This wasn't using product name templates
at all, only hardcoded values. It shouldn't be used anymore,
but transition to templates need careful checking of the project
and studio settings.
Deprecated:
since 0.4.4
Args:
product_type (str): Product type.
source_product_name (str): Source product name.
task_name (str): Task name.
dynamic_data (dict): Dynamic data (camera, aov, ...)
Returns:
tuple: product name and group name
"""
warnings.warn("Using legacy product name for renders",
DeprecationWarning)
if not source_product_name.startswith(product_type):
resulting_group_name = '{}{}{}{}{}'.format(
product_type,
task_name[0].upper(), task_name[1:],
source_product_name[0].upper(), source_product_name[1:])
else:
resulting_group_name = source_product_name
# create product name `<product type><Task><Product name>`
if not source_product_name.startswith(product_type):
resulting_group_name = '{}{}{}{}{}'.format(
product_type,
task_name[0].upper(), task_name[1:],
source_product_name[0].upper(), source_product_name[1:])
else:
resulting_group_name = source_product_name
resulting_product_name = resulting_group_name
camera = dynamic_data.get("camera")
aov = dynamic_data.get("aov")
if camera:
if not aov:
resulting_product_name = '{}_{}'.format(
resulting_group_name, camera)
elif not aov.startswith(camera):
resulting_product_name = '{}_{}_{}'.format(
resulting_group_name, camera, aov)
else:
resulting_product_name = "{}_{}".format(
resulting_group_name, aov)
else:
if aov:
resulting_product_name = '{}_{}'.format(
resulting_group_name, aov)
return resulting_product_name, resulting_group_name
def get_product_name_and_group_from_template(
project_name,
task_entity,
product_type,
variant,
host_name,
dynamic_data=None):
"""Get product name and group name from template.
This will get product name and group name from template based on
data provided. It is doing similar work as
`func::_get_legacy_product_name_and_group` but using templates.
To get group name, template is called without any dynamic data, so
(depending on the template itself) it should be product name without
aov.
Todo:
Maybe we should introduce templates for the groups themselves.
Args:
task_entity (dict): Task entity.
project_name (str): Project name.
host_name (str): Host name.
product_type (str): Product type.
variant (str): Variant.
dynamic_data (dict): Dynamic data (aov, renderlayer, camera, ...).
Returns:
tuple: product name and group name.
"""
# remove 'aov' from data used to format group. See todo comment above
# for possible solution.
_dynamic_data = deepcopy(dynamic_data) or {}
_dynamic_data.pop("aov", None)
resulting_group_name = get_product_name(
project_name=project_name,
task_name=task_entity["name"],
task_type=task_entity["taskType"],
host_name=host_name,
product_type=product_type,
dynamic_data=_dynamic_data,
variant=variant,
)
resulting_product_name = get_product_name(
project_name=project_name,
task_name=task_entity["name"],
task_type=task_entity["taskType"],
host_name=host_name,
product_type=product_type,
dynamic_data=dynamic_data,
variant=variant,
)
return resulting_product_name, resulting_group_name
def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
skip_integration_repre_list, do_not_add_review):
"""Create instance for each AOV found.
@ -526,10 +722,10 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
instance (pyblish.api.Instance): Original instance.
skeleton (dict): Skeleton data for instance (those needed) later
by collector.
additional_data (dict): ..
additional_data (dict): ...
skip_integration_repre_list (list): list of extensions that shouldn't
be published
do_not_addbe _review (bool): explicitly disable review
do_not_add_review (bool): explicitly disable review
Returns:
@ -539,68 +735,70 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
ValueError:
"""
# TODO: this needs to be taking the task from context or instance
task = os.environ["AYON_TASK_NAME"]
anatomy = instance.context.data["anatomy"]
s_product_name = skeleton["productName"]
source_product_name = skeleton["productName"]
cameras = instance.data.get("cameras", [])
exp_files = instance.data["expectedFiles"]
expected_files = instance.data["expectedFiles"]
log = Logger.get_logger("farm_publishing")
instances = []
# go through AOVs in expected files
for aov, files in exp_files[0].items():
cols, rem = clique.assemble(files)
# we shouldn't have any reminders. And if we do, it should
# be just one item for single frame renders.
if not cols and rem:
if len(rem) != 1:
raise ValueError("Found multiple non related files "
"to render, don't know what to do "
"with them.")
col = rem[0]
ext = os.path.splitext(col)[1].lstrip(".")
else:
# but we really expect only one collection.
# Nothing else make sense.
if len(cols) != 1:
raise ValueError("Only one image sequence type is expected.") # noqa: E501
ext = cols[0].tail.lstrip(".")
col = list(cols[0])
for aov, files in expected_files[0].items():
collected_files = _collect_expected_files_for_aov(files)
# create product name `<product type><Task><Product name>`
# TODO refactor/remove me
product_type = skeleton["productType"]
if not s_product_name.startswith(product_type):
group_name = '{}{}{}{}{}'.format(
product_type,
task[0].upper(), task[1:],
s_product_name[0].upper(), s_product_name[1:])
else:
group_name = s_product_name
expected_filepath = collected_files
if isinstance(collected_files, (list, tuple)):
expected_filepath = collected_files[0]
# if there are multiple cameras, we need to add camera name
expected_filepath = col[0] if isinstance(col, (list, tuple)) else col
cams = [cam for cam in cameras if cam in expected_filepath]
if cams:
for cam in cams:
if not aov:
product_name = '{}_{}'.format(group_name, cam)
elif not aov.startswith(cam):
product_name = '{}_{}_{}'.format(group_name, cam, aov)
else:
product_name = "{}_{}".format(group_name, aov)
else:
if aov:
product_name = '{}_{}'.format(group_name, aov)
else:
product_name = '{}'.format(group_name)
dynamic_data = {
"aov": aov,
"renderlayer": instance.data.get("renderlayer"),
}
# find if camera is used in the file path
# TODO: this must be changed to be more robust. Any coincidence
# of camera name in the file path will be considered as
# camera name. This is not correct.
camera = [cam for cam in cameras if cam in expected_filepath]
# Is there just one camera matching?
# TODO: this is not true, we can have multiple cameras in the scene
# and we should be able to detect them all. Currently, we are
# keeping the old behavior, taking the first one found.
if camera:
dynamic_data["camera"] = camera[0]
project_settings = instance.context.data.get("project_settings")
use_legacy_product_name = True
try:
use_legacy_product_name = project_settings["core"]["tools"]["creator"]["use_legacy_product_names_for_renders"] # noqa: E501
except KeyError:
warnings.warn(
("use_legacy_for_renders not found in project settings. "
"Using legacy product name for renders. Please update "
"your ayon-core version."), DeprecationWarning)
use_legacy_product_name = True
if use_legacy_product_name:
product_name, group_name = _get_legacy_product_name_and_group(
product_type=skeleton["productType"],
source_product_name=source_product_name,
task_name=instance.data["task"],
dynamic_data=dynamic_data)
if isinstance(col, (list, tuple)):
staging = os.path.dirname(col[0])
else:
staging = os.path.dirname(col)
product_name, group_name = get_product_name_and_group_from_template(
task_entity=instance.data["taskEntity"],
project_name=instance.context.data["projectName"],
host_name=instance.context.data["hostName"],
product_type=skeleton["productType"],
variant=instance.data.get("variant", source_product_name),
dynamic_data=dynamic_data
)
staging = os.path.dirname(expected_filepath)
try:
staging = remap_source(staging, anatomy)
@ -611,10 +809,8 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
app = os.environ.get("AYON_HOST_NAME", "")
if isinstance(col, list):
render_file_name = os.path.basename(col[0])
else:
render_file_name = os.path.basename(col)
render_file_name = os.path.basename(expected_filepath)
aov_patterns = aov_filter
preview = match_aov_pattern(app, aov_patterns, render_file_name)
@ -622,9 +818,10 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
new_instance = deepcopy(skeleton)
new_instance["productName"] = product_name
new_instance["productGroup"] = group_name
new_instance["aov"] = aov
# toggle preview on if multipart is on
# Because we cant query the multipartExr data member of each AOV we'll
# Because we can't query the multipartExr data member of each AOV we'll
# need to have hardcoded rule of excluding any renders with
# "cryptomatte" in the file name from being a multipart EXR. This issue
# happens with Redshift that forces Cryptomatte renders to be separate
@ -650,10 +847,7 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
new_instance["review"] = True
# create representation
if isinstance(col, (list, tuple)):
files = [os.path.basename(f) for f in col]
else:
files = os.path.basename(col)
ext = os.path.splitext(render_file_name)[-1].lstrip(".")
# Copy render product "colorspace" data to representation.
colorspace = ""
@ -663,10 +857,15 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
colorspace = product.colorspace
break
if isinstance(collected_files, (list, tuple)):
collected_files = [os.path.basename(f) for f in collected_files]
else:
collected_files = os.path.basename(collected_files)
rep = {
"name": ext,
"ext": ext,
"files": files,
"files": collected_files,
"frameStart": int(skeleton["frameStartHandle"]),
"frameEnd": int(skeleton["frameEndHandle"]),
# If expectedFile are absolute, we need only filenames
@ -708,6 +907,35 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
return instances
def _collect_expected_files_for_aov(files):
"""Collect expected files.
Args:
files (list): List of files.
Returns:
list or str: Collection of files or single file.
Raises:
ValueError: If there are multiple collections.
"""
cols, rem = clique.assemble(files)
# we shouldn't have any reminders. And if we do, it should
# be just one item for single frame renders.
if not cols and rem:
if len(rem) != 1:
raise ValueError("Found multiple non related files "
"to render, don't know what to do "
"with them.")
return rem[0]
# but we really expect only one collection.
# Nothing else make sense.
if len(cols) != 1:
raise ValueError("Only one image sequence type is expected.") # noqa: E501
return list(cols[0])
def get_resources(project_name, version_entity, extension=None):
"""Get the files from the specific version.

View file

@ -1,24 +0,0 @@
import pyblish.api
from ayon_core.pipeline import Anatomy
from typing import Tuple, List
class TimeData:
start: int
end: int
fps: float | int
step: int
handle_start: int
handle_end: int
def __init__(self, start: int, end: int, fps: float | int, step: int, handle_start: int, handle_end: int):
...
...
def remap_source(source: str, anatomy: Anatomy): ...
def extend_frames(folder_path: str, product_name: str, start: int, end: int) -> Tuple[int, int]: ...
def get_time_data_from_instance_or_context(instance: pyblish.api.Instance) -> TimeData: ...
def get_transferable_representations(instance: pyblish.api.Instance) -> list: ...
def create_skeleton_instance(instance: pyblish.api.Instance, families_transfer: list = ..., instance_transfer: dict = ...) -> dict: ...
def create_instances_for_aov(instance: pyblish.api.Instance, skeleton: dict, aov_filter: dict) -> List[pyblish.api.Instance]: ...
def attach_instances_to_product(attach_to: list, instances: list) -> list: ...

View file

@ -242,6 +242,26 @@ class LoaderPlugin(list):
if hasattr(self, "_fname"):
return self._fname
@classmethod
def get_representation_name_aliases(cls, representation_name: str):
"""Return representation names to which switching is allowed from
the input representation name, like an alias replacement of the input
`representation_name`.
For example, to allow an automated switch on update from representation
`ma` to `mb` or `abc`, then when `representation_name` is `ma` return:
["mb", "abc"]
The order of the names in the returned representation names is
important, because the first one existing under the new version will
be chosen.
Returns:
List[str]: Representation names switching to is allowed on update
if the input representation name is not found on the new version.
"""
return []
class ProductLoaderPlugin(LoaderPlugin):
"""Load product into host application

View file

@ -505,21 +505,6 @@ def update_container(container, version=-1):
project_name, product_entity["folderId"]
)
repre_name = current_representation["name"]
new_representation = ayon_api.get_representation_by_name(
project_name, repre_name, new_version["id"]
)
if new_representation is None:
raise ValueError(
"Representation '{}' wasn't found on requested version".format(
repre_name
)
)
path = get_representation_path(new_representation)
if not path or not os.path.exists(path):
raise ValueError("Path {} doesn't exist".format(path))
# Run update on the Loader for this container
Loader = _get_container_loader(container)
if not Loader:
@ -527,6 +512,39 @@ def update_container(container, version=-1):
"Can't update container because loader '{}' was not found."
.format(container.get("loader"))
)
repre_name = current_representation["name"]
new_representation = ayon_api.get_representation_by_name(
project_name, repre_name, new_version["id"]
)
if new_representation is None:
# The representation name is not found in the new version.
# Allow updating to a 'matching' representation if the loader
# has defined compatible update conversions
repre_name_aliases = Loader.get_representation_name_aliases(repre_name)
if repre_name_aliases:
representations = ayon_api.get_representations(
project_name,
representation_names=repre_name_aliases,
version_ids=[new_version["id"]])
representations_by_name = {
repre["name"]: repre for repre in representations
}
for name in repre_name_aliases:
if name in representations_by_name:
new_representation = representations_by_name[name]
break
if new_representation is None:
raise ValueError(
"Representation '{}' wasn't found on requested version".format(
repre_name
)
)
path = get_representation_path(new_representation)
if not path or not os.path.exists(path):
raise ValueError("Path {} doesn't exist".format(path))
project_entity = ayon_api.get_project(project_name)
context = {
"project": project_entity,

View file

@ -1,20 +1,22 @@
# Publish
AYON is using `pyblish` for publishing process which is a little bit extented and modified mainly for UI purposes. OpenPype's (new) publish UI does not allow to enable/disable instances or plugins that can be done during creation part. Also does support actions only for validators after validation exception.
AYON is using `pyblish` for publishing process which is a little bit extented and modified mainly for UI purposes. AYON's (new) publish UI does not allow to enable/disable instances or plugins that can be done during creation part. Also does support actions only for validators after validation exception.
## Exceptions
AYON define few specific exceptions that should be used in publish plugins.
### Publish error
Exception `PublishError` can be raised on known error. The message is shown to artist.
- **message** Error message.
- **title** Short description of error (2-5 words). Title can be used for grouping of exceptions per plugin.
- **description** Override of 'message' for UI, you can add markdown and html. By default, is filled with 'message'.
- **detail** Additional detail message that is hidden under collapsed component.
Arguments `title`, `description` and `detail` are optional. Title is filled with generic message "This is not your fault" if is not passed.
### Validation exception
Validation plugins should raise `PublishValidationError` to show to an artist what's wrong and give him actions to fix it. The exception says that error happened in plugin can be fixed by artist himself (with or without action on plugin). Any other errors will stop publishing immediately. Exception `PublishValidationError` raised after validation order has same effect as any other exception.
Exception `PublishValidationError` 3 arguments:
- **message** Which is not used in UI but for headless publishing.
- **title** Short description of error (2-5 words). Title is used for grouping of exceptions per plugin.
- **description** Detailed description of happened issue where markdown and html can be used.
### Known errors
When there is a known error that can't be fixed by user (e.g. can't connect to deadline service, etc.) `KnownPublishError` should be raise. The only difference is that it's message is shown in UI to artist otherwise a neutral message without context is shown.
Exception expect same arguments as `PublishError`. Value of `title` is filled with plugin label if is not passed.
## Plugin extension
Publish plugins can be extended by additional logic when inherits from `AYONPyblishPluginMixin` which can be used as mixin (additional inheritance of class).

View file

@ -9,11 +9,12 @@ from .publish_plugins import (
AbstractMetaInstancePlugin,
AbstractMetaContextPlugin,
KnownPublishError,
PublishError,
PublishValidationError,
PublishXmlValidationError,
KnownPublishError,
AYONPyblishPluginMixin,
OpenPypePyblishPluginMixin,
OptionalPyblishPluginMixin,
RepairAction,
@ -62,11 +63,12 @@ __all__ = (
"AbstractMetaInstancePlugin",
"AbstractMetaContextPlugin",
"KnownPublishError",
"PublishError",
"PublishValidationError",
"PublishXmlValidationError",
"KnownPublishError",
"AYONPyblishPluginMixin",
"OpenPypePyblishPluginMixin",
"OptionalPyblishPluginMixin",
"RepairAction",

View file

@ -379,7 +379,7 @@ def get_plugin_settings(plugin, project_settings, log, category=None):
plugin_kind = split_path[-2]
# TODO: change after all plugins are moved one level up
if category_from_file in ("ayon_core", "openpype"):
if category_from_file == "ayon_core":
category_from_file = "core"
try:

View file

@ -1,9 +1,19 @@
import inspect
from abc import ABCMeta
import typing
from typing import Optional
import pyblish.api
import pyblish.logic
from pyblish.plugin import MetaPlugin, ExplicitMetaPlugin
from ayon_core.lib import BoolDef
from ayon_core.pipeline.colorspace import (
get_colorspace_settings_from_publish_context,
set_colorspace_data_to_representation
)
from .lib import (
load_help_content_from_plugin,
get_errored_instances_from_context,
@ -11,10 +21,8 @@ from .lib import (
get_instance_staging_dir,
)
from ayon_core.pipeline.colorspace import (
get_colorspace_settings_from_publish_context,
set_colorspace_data_to_representation
)
if typing.TYPE_CHECKING:
from ayon_core.pipeline.create import CreateContext, CreatedInstance
class AbstractMetaInstancePlugin(ABCMeta, MetaPlugin):
@ -25,27 +33,52 @@ class AbstractMetaContextPlugin(ABCMeta, ExplicitMetaPlugin):
pass
class PublishValidationError(Exception):
"""Validation error happened during publishing.
class KnownPublishError(Exception):
"""Publishing crashed because of known error.
This exception should be used when validation publishing failed.
Artist can't affect source of the error.
Has additional UI specific attributes that may be handy for artist.
Deprecated:
Please use `PublishError` instead. Marked as deprecated 24/09/02.
"""
pass
class PublishError(Exception):
"""Publishing crashed because of known error.
Message will be shown in UI for artist.
Args:
message(str): Message of error. Short explanation an issue.
title(str): Title showed in UI. All instances are grouped under
single title.
description(str): Detailed description of an error. It is possible
to use Markdown syntax.
"""
message (str): Message of error. Short explanation an issue.
title (Optional[str]): Title showed in UI.
description (Optional[str]): Detailed description of an error.
It is possible to use Markdown syntax.
"""
def __init__(self, message, title=None, description=None, detail=None):
self.message = message
self.title = title
self.description = description or message
self.detail = detail
super(PublishValidationError, self).__init__(message)
super().__init__(message)
class PublishValidationError(PublishError):
"""Validation error happened during publishing.
This exception should be used when validation publishing failed.
Publishing does not stop during validation order if this
exception is raised.
Has additional UI specific attributes that may be handy for artist.
Argument 'title' is used to group errors.
"""
pass
class PublishXmlValidationError(PublishValidationError):
@ -68,15 +101,6 @@ class PublishXmlValidationError(PublishValidationError):
)
class KnownPublishError(Exception):
"""Publishing crashed because of known error.
Message will be shown in UI for artist.
"""
pass
class AYONPyblishPluginMixin:
# TODO
# executable_in_thread = False
@ -109,32 +133,118 @@ class AYONPyblishPluginMixin:
# for callback in self._state_change_callbacks:
# callback(self)
@classmethod
def register_create_context_callbacks(
cls, create_context: "CreateContext"
):
"""Register callbacks for create context.
It is possible to register callbacks listening to changes happened
in create context.
Methods available on create context:
- add_instances_added_callback
- add_instances_removed_callback
- add_value_changed_callback
- add_pre_create_attr_defs_change_callback
- add_create_attr_defs_change_callback
- add_publish_attr_defs_change_callback
Args:
create_context (CreateContext): Create context.
"""
pass
@classmethod
def get_attribute_defs(cls):
"""Publish attribute definitions.
Attributes available for all families in plugin's `families` attribute.
Returns:
list<AbstractAttrDef>: Attribute definitions for plugin.
"""
Returns:
list[AbstractAttrDef]: Attribute definitions for plugin.
"""
return []
@classmethod
def convert_attribute_values(cls, attribute_values):
if cls.__name__ not in attribute_values:
return attribute_values
def get_attr_defs_for_context(cls, create_context: "CreateContext"):
"""Publish attribute definitions for context.
plugin_values = attribute_values[cls.__name__]
Attributes available for all families in plugin's `families` attribute.
attr_defs = cls.get_attribute_defs()
for attr_def in attr_defs:
key = attr_def.key
if key in plugin_values:
plugin_values[key] = attr_def.convert_value(
plugin_values[key]
)
return attribute_values
Args:
create_context (CreateContext): Create context.
Returns:
list[AbstractAttrDef]: Attribute definitions for plugin.
"""
if cls.__instanceEnabled__:
return []
return cls.get_attribute_defs()
@classmethod
def instance_matches_plugin_families(
cls, instance: Optional["CreatedInstance"]
):
"""Check if instance matches families.
Args:
instance (Optional[CreatedInstance]): Instance to check. Or None
for context.
Returns:
bool: True if instance matches plugin families.
"""
if instance is None:
return not cls.__instanceEnabled__
if not cls.__instanceEnabled__:
return False
families = [instance.product_type]
families.extend(instance.get("families", []))
for _ in pyblish.logic.plugins_by_families([cls], families):
return True
return False
@classmethod
def get_attr_defs_for_instance(
cls, create_context: "CreateContext", instance: "CreatedInstance"
):
"""Publish attribute definitions for an instance.
Attributes available for all families in plugin's `families` attribute.
Args:
create_context (CreateContext): Create context.
instance (CreatedInstance): Instance for which attributes are
collected.
Returns:
list[AbstractAttrDef]: Attribute definitions for plugin.
"""
if not cls.instance_matches_plugin_families(instance):
return []
return cls.get_attribute_defs()
@classmethod
def convert_attribute_values(
cls, create_context: "CreateContext", instance: "CreatedInstance"
):
"""Convert attribute values for instance.
Args:
create_context (CreateContext): Create context.
instance (CreatedInstance): Instance for which attributes are
converted.
"""
return
@staticmethod
def get_attr_values_from_data_for_plugin(plugin, data):
@ -165,9 +275,6 @@ class AYONPyblishPluginMixin:
return self.get_attr_values_from_data_for_plugin(self.__class__, data)
OpenPypePyblishPluginMixin = AYONPyblishPluginMixin
class OptionalPyblishPluginMixin(AYONPyblishPluginMixin):
"""Prepare mixin for optional plugins.

View file

@ -25,13 +25,7 @@ def create_custom_tempdir(project_name, anatomy=None):
"""
env_tmpdir = os.getenv("AYON_TMPDIR")
if not env_tmpdir:
env_tmpdir = os.getenv("OPENPYPE_TMPDIR")
if not env_tmpdir:
return
print(
"DEPRECATION WARNING: Used 'OPENPYPE_TMPDIR' environment"
" variable. Please use 'AYON_TMPDIR' instead."
)
return
custom_tempdir = None
if "{" in env_tmpdir:

View file

@ -4,7 +4,7 @@ import collections
import ayon_api
from ayon_core.lib.local_settings import get_ayon_appdirs
from ayon_core.lib.local_settings import get_launcher_local_dir
FileInfo = collections.namedtuple(
@ -54,7 +54,7 @@ class ThumbnailsCache:
"""
if self._thumbnails_dir is None:
self._thumbnails_dir = get_ayon_appdirs("thumbnails")
self._thumbnails_dir = get_launcher_local_dir("thumbnails")
return self._thumbnails_dir
thumbnails_dir = property(get_thumbnails_dir)

View file

@ -506,55 +506,61 @@ class AbstractTemplateBuilder(ABC):
keep_placeholders (bool): Add flag to placeholder data for
hosts to decide if they want to remove
placeholder after it is used.
create_first_version (bool): create first version of a workfile
workfile_creation_enabled (bool): If True, it might create
first version but ignore
process if version is created
create_first_version (bool): Create first version of a workfile.
When set to True, this option initiates the saving of the
workfile for an initial version. It will skip saving if
a version already exists.
workfile_creation_enabled (bool): Whether the call is part of
creating a new workfile.
When True, we only build if the current file is not
an existing saved workfile but a "new" file. Basically when
enabled we assume the user tries to load it only into a
"New File" (unsaved empty workfile).
When False, the default value, we assume we explicitly want to
build the template in our current scene regardless of current
scene state.
"""
if any(
value is None
for value in [
template_path,
keep_placeholders,
create_first_version,
]
):
template_preset = self.get_template_preset()
if template_path is None:
template_path = template_preset["path"]
if keep_placeholders is None:
keep_placeholders = template_preset["keep_placeholder"]
if create_first_version is None:
create_first_version = template_preset["create_first_version"]
# More accurate variable name
# - logic related to workfile creation should be moved out in future
explicit_build_requested = not workfile_creation_enabled
# check if first version is created
created_version_workfile = False
if create_first_version:
created_version_workfile = self.create_first_workfile_version()
# if first version is created, import template
# and populate placeholders
# Get default values if not provided
if (
create_first_version
and workfile_creation_enabled
and created_version_workfile
template_path is None
or keep_placeholders is None
or create_first_version is None
):
preset = self.get_template_preset()
template_path: str = template_path or preset["path"]
if keep_placeholders is None:
keep_placeholders: bool = preset["keep_placeholder"]
if create_first_version is None:
create_first_version: bool = preset["create_first_version"]
# Build the template if we are explicitly requesting it or if it's
# an unsaved "new file".
is_new_file = not self.host.get_current_workfile()
if is_new_file or explicit_build_requested:
self.log.info(f"Building the workfile template: {template_path}")
self.import_template(template_path)
self.populate_scene_placeholders(
level_limit, keep_placeholders)
# save workfile after template is populated
self.save_workfile(created_version_workfile)
# ignore process if first workfile is enabled
# but a version is already created
if workfile_creation_enabled:
# Do not consider saving a first workfile version, if this is not set
# to be a "workfile creation" or `create_first_version` is disabled.
if explicit_build_requested or not create_first_version:
return
self.import_template(template_path)
self.populate_scene_placeholders(
level_limit, keep_placeholders)
# If there is no existing workfile, save the first version
workfile_path = self.get_workfile_path()
if not os.path.exists(workfile_path):
self.log.info("Saving first workfile: %s", workfile_path)
self.save_workfile(workfile_path)
else:
self.log.info(
"A workfile already exists. Skipping save of workfile as "
"initial version.")
def rebuild_template(self):
"""Go through existing placeholders in scene and update them.
@ -608,29 +614,16 @@ class AbstractTemplateBuilder(ABC):
pass
def create_first_workfile_version(self):
"""
Create first version of workfile.
def get_workfile_path(self):
"""Return last known workfile path or the first workfile path create.
Should load the content of template into scene so
'populate_scene_placeholders' can be started.
Args:
template_path (str): Fullpath for current task and
host's template file.
Return:
str: Last workfile path, or first version to create if none exist.
"""
# AYON_LAST_WORKFILE will be set to the last existing workfile OR
# if none exist it will be set to the first version.
last_workfile_path = os.environ.get("AYON_LAST_WORKFILE")
self.log.info("__ last_workfile_path: {}".format(last_workfile_path))
if os.path.exists(last_workfile_path):
# ignore in case workfile existence
self.log.info("Workfile already exists, skipping creation.")
return False
# Create first version
self.log.info("Creating first version of workfile.")
self.save_workfile(last_workfile_path)
# Confirm creation of first version
return last_workfile_path
def save_workfile(self, workfile_path):
@ -859,7 +852,7 @@ class AbstractTemplateBuilder(ABC):
"Settings\\Profiles"
).format(host_name.title()))
# Try fill path with environments and anatomy roots
# Try to fill path with environments and anatomy roots
anatomy = Anatomy(project_name)
fill_data = {
key: value
@ -872,9 +865,7 @@ class AbstractTemplateBuilder(ABC):
"code": anatomy.project_code,
}
result = StringTemplate.format_template(path, fill_data)
if result.solved:
path = result.normalized()
path = self.resolve_template_path(path, fill_data)
if path and os.path.exists(path):
self.log.info("Found template at: '{}'".format(path))
@ -914,6 +905,27 @@ class AbstractTemplateBuilder(ABC):
"create_first_version": create_first_version
}
def resolve_template_path(self, path, fill_data) -> str:
"""Resolve the template path.
By default, this does nothing except returning the path directly.
This can be overridden in host integrations to perform additional
resolving over the template. Like, `hou.text.expandString` in Houdini.
Arguments:
path (str): The input path.
fill_data (dict[str, str]): Data to use for template formatting.
Returns:
str: The resolved path.
"""
result = StringTemplate.format_template(path, fill_data)
if result.solved:
path = result.normalized()
return path
def emit_event(self, topic, data=None, source=None) -> Event:
return self._event_system.emit(topic, data, source)
@ -1519,9 +1531,10 @@ class PlaceholderLoadMixin(object):
if "asset" in placeholder.data:
return []
representation_name = placeholder.data["representation"]
if not representation_name:
return []
representation_names = None
representation_name: str = placeholder.data["representation"]
if representation_name:
representation_names = [representation_name]
project_name = self.builder.project_name
current_folder_entity = self.builder.current_folder_entity
@ -1578,7 +1591,7 @@ class PlaceholderLoadMixin(object):
)
return list(get_representations(
project_name,
representation_names={representation_name},
representation_names=representation_names,
version_ids=version_ids
))

View file

@ -99,7 +99,7 @@ class OpenTaskPath(LauncherAction):
if platform_name == "windows":
args = ["start", path]
elif platform_name == "darwin":
args = ["open", "-na", path]
args = ["open", "-R", path]
elif platform_name == "linux":
args = ["xdg-open", path]
else:

View file

@ -0,0 +1,87 @@
import os
import urllib.parse
import webbrowser
from ayon_core.pipeline import LauncherAction
from ayon_core.resources import get_ayon_icon_filepath
import ayon_api
def get_ayon_entity_uri(
project_name,
entity_id,
entity_type,
) -> str:
"""Resolve AYON Entity URI from representation context.
Note:
The representation context is the `get_representation_context` dict
containing the `project`, `folder, `representation` and so forth.
It is not the representation entity `context` key.
Arguments:
project_name (str): The project name.
entity_id (str): The entity UUID.
entity_type (str): The entity type, like "folder" or"task".
Raises:
RuntimeError: Unable to resolve to a single valid URI.
Returns:
str: The AYON entity URI.
"""
response = ayon_api.post(
f"projects/{project_name}/uris",
entityType=entity_type,
ids=[entity_id])
if response.status_code != 200:
raise RuntimeError(
f"Unable to resolve AYON entity URI for '{project_name}' "
f"{entity_type} id '{entity_id}': {response.text}"
)
uris = response.data["uris"]
if len(uris) != 1:
raise RuntimeError(
f"Unable to resolve AYON entity URI for '{project_name}' "
f"{entity_type} id '{entity_id}' to single URI. "
f"Received data: {response.data}"
)
return uris[0]["uri"]
class ShowInAYON(LauncherAction):
"""Open AYON browser page to the current context."""
name = "showinayon"
label = "Show in AYON"
icon = get_ayon_icon_filepath()
order = 999
def process(self, selection, **kwargs):
url = os.environ["AYON_SERVER_URL"]
if selection.is_project_selected:
project_name = selection.project_name
url += f"/projects/{project_name}/browser"
# Specify entity URI if task or folder is select
entity = None
entity_type = None
if selection.is_task_selected:
entity = selection.get_task_entity()
entity_type = "task"
elif selection.is_folder_selected:
entity = selection.get_folder_entity()
entity_type = "folder"
if entity and entity_type:
uri = get_ayon_entity_uri(
project_name,
entity_id=entity["id"],
entity_type=entity_type
)
uri_encoded = urllib.parse.quote_plus(uri)
url += f"?uri={uri_encoded}"
# Open URL in webbrowser
self.log.info(f"Opening URL: {url}")
webbrowser.open_new_tab(url)

View file

@ -1,24 +1,22 @@
import copy
import platform
from collections import defaultdict
import ayon_api
from qtpy import QtWidgets, QtCore, QtGui
from ayon_core.pipeline import load, Anatomy
from ayon_core import resources, style
from ayon_core.lib import (
format_file_size,
collect_frames,
get_datetime_data,
)
from ayon_core.pipeline import load, Anatomy
from ayon_core.pipeline.load import get_representation_path_with_anatomy
from ayon_core.pipeline.delivery import (
get_format_dict,
check_destination_path,
deliver_single_file,
deliver_sequence,
get_representations_delivery_template_data,
)
@ -201,20 +199,31 @@ class DeliveryOptionsDialog(QtWidgets.QDialog):
format_dict = get_format_dict(self.anatomy, self.root_line_edit.text())
renumber_frame = self.renumber_frame.isChecked()
frame_offset = self.first_frame_start.value()
filtered_repres = []
repre_ids = set()
for repre in self._representations:
if repre["name"] not in selected_repres:
continue
if repre["name"] in selected_repres:
filtered_repres.append(repre)
repre_ids.add(repre["id"])
template_data_by_repre_id = (
get_representations_delivery_template_data(
self.anatomy.project_name, repre_ids
)
)
for repre in filtered_repres:
repre_path = get_representation_path_with_anatomy(
repre, self.anatomy
)
anatomy_data = copy.deepcopy(repre["context"])
new_report_items = check_destination_path(repre["id"],
self.anatomy,
anatomy_data,
datetime_data,
template_name)
template_data = template_data_by_repre_id[repre["id"]]
new_report_items = check_destination_path(
repre["id"],
self.anatomy,
template_data,
datetime_data,
template_name
)
report_items.update(new_report_items)
if new_report_items:
@ -225,57 +234,61 @@ class DeliveryOptionsDialog(QtWidgets.QDialog):
repre,
self.anatomy,
template_name,
anatomy_data,
template_data,
format_dict,
report_items,
self.log
]
if repre.get("files"):
src_paths = []
for repre_file in repre["files"]:
src_path = self.anatomy.fill_root(repre_file["path"])
src_paths.append(src_path)
sources_and_frames = collect_frames(src_paths)
# TODO: This will currently incorrectly detect 'resources'
# that are published along with the publish, because those should
# not adhere to the template directly but are ingested in a
# customized way. For example, maya look textures or any publish
# that directly adds files into `instance.data["transfers"]`
src_paths = []
for repre_file in repre["files"]:
src_path = self.anatomy.fill_root(repre_file["path"])
src_paths.append(src_path)
sources_and_frames = collect_frames(src_paths)
frames = set(sources_and_frames.values())
frames.discard(None)
first_frame = None
if frames:
first_frame = min(frames)
frames = set(sources_and_frames.values())
frames.discard(None)
first_frame = None
if frames:
first_frame = min(frames)
for src_path, frame in sources_and_frames.items():
args[0] = src_path
# Renumber frames
if renumber_frame and frame is not None:
# Calculate offset between
# first frame and current frame
# - '0' for first frame
offset = frame_offset - int(first_frame)
# Add offset to new frame start
dst_frame = int(frame) + offset
if dst_frame < 0:
msg = "Renumber frame has a smaller number than original frame" # noqa
report_items[msg].append(src_path)
self.log.warning("{} <{}>".format(
msg, dst_frame))
continue
frame = dst_frame
for src_path, frame in sources_and_frames.items():
args[0] = src_path
# Renumber frames
if renumber_frame and frame is not None:
# Calculate offset between
# first frame and current frame
# - '0' for first frame
offset = frame_offset - int(first_frame)
# Add offset to new frame start
dst_frame = int(frame) + offset
if dst_frame < 0:
msg = "Renumber frame has a smaller number than original frame" # noqa
report_items[msg].append(src_path)
self.log.warning("{} <{}>".format(
msg, dst_frame))
continue
frame = dst_frame
if frame is not None:
anatomy_data["frame"] = frame
new_report_items, uploaded = deliver_single_file(*args)
report_items.update(new_report_items)
self._update_progress(uploaded)
else: # fallback for Pype2 and representations without files
frame = repre["context"].get("frame")
if frame:
repre["context"]["frame"] = len(str(frame)) * "#"
if not frame:
new_report_items, uploaded = deliver_single_file(*args)
else:
new_report_items, uploaded = deliver_sequence(*args)
if frame is not None:
if repre["context"].get("frame"):
template_data["frame"] = frame
elif repre["context"].get("udim"):
template_data["udim"] = frame
else:
# Fallback
self.log.warning(
"Representation context has no frame or udim"
" data. Supplying sequence frame to '{frame}'"
" formatting data."
)
template_data["frame"] = frame
new_report_items, uploaded = deliver_single_file(*args)
report_items.update(new_report_items)
self._update_progress(uploaded)
@ -339,8 +352,8 @@ class DeliveryOptionsDialog(QtWidgets.QDialog):
def _get_selected_repres(self):
"""Returns list of representation names filtered from checkboxes."""
selected_repres = []
for repre_name, chckbox in self._representation_checkboxes.items():
if chckbox.isChecked():
for repre_name, checkbox in self._representation_checkboxes.items():
if checkbox.isChecked():
selected_repres.append(repre_name)
return selected_repres

View file

@ -0,0 +1,591 @@
import logging
import os
from pathlib import Path
from collections import defaultdict
from qtpy import QtWidgets, QtCore, QtGui
from ayon_api import get_representations
from ayon_core.pipeline import load, Anatomy
from ayon_core import resources, style
from ayon_core.lib.transcoding import (
IMAGE_EXTENSIONS,
get_oiio_info_for_input,
)
from ayon_core.lib import (
get_ffprobe_data,
is_oiio_supported,
)
from ayon_core.pipeline.load import get_representation_path_with_anatomy
from ayon_core.tools.utils import show_message_dialog
OTIO = None
FRAME_SPLITTER = "__frame_splitter__"
def _import_otio():
global OTIO
if OTIO is None:
import opentimelineio
OTIO = opentimelineio
class ExportOTIO(load.ProductLoaderPlugin):
"""Export selected versions to OpenTimelineIO."""
is_multiple_contexts_compatible = True
sequence_splitter = "__sequence_splitter__"
representations = {"*"}
product_types = {"*"}
tool_names = ["library_loader"]
label = "Export OTIO"
order = 35
icon = "save"
color = "#d8d8d8"
def load(self, contexts, name=None, namespace=None, options=None):
_import_otio()
try:
dialog = ExportOTIOOptionsDialog(contexts, self.log)
dialog.exec_()
except Exception:
self.log.error("Failed to export OTIO.", exc_info=True)
class ExportOTIOOptionsDialog(QtWidgets.QDialog):
"""Dialog to select template where to deliver selected representations."""
def __init__(self, contexts, log=None, parent=None):
# Not all hosts have OpenTimelineIO available.
self.log = log
super().__init__(parent=parent)
self.setWindowTitle("AYON - Export OTIO")
icon = QtGui.QIcon(resources.get_ayon_icon_filepath())
self.setWindowIcon(icon)
self.setWindowFlags(
QtCore.Qt.WindowStaysOnTopHint
| QtCore.Qt.WindowCloseButtonHint
| QtCore.Qt.WindowMinimizeButtonHint
)
project_name = contexts[0]["project"]["name"]
versions_by_id = {
context["version"]["id"]: context["version"]
for context in contexts
}
repre_entities = list(get_representations(
project_name, version_ids=set(versions_by_id)
))
version_by_representation_id = {
repre_entity["id"]: versions_by_id[repre_entity["versionId"]]
for repre_entity in repre_entities
}
version_path_by_id = {}
representations_by_version_id = {}
for context in contexts:
version_id = context["version"]["id"]
if version_id in version_path_by_id:
continue
representations_by_version_id[version_id] = []
version_path_by_id[version_id] = "/".join([
context["folder"]["path"],
context["product"]["name"],
context["version"]["name"]
])
for repre_entity in repre_entities:
representations_by_version_id[repre_entity["versionId"]].append(
repre_entity
)
all_representation_names = list(sorted({
repo_entity["name"]
for repo_entity in repre_entities
}))
input_widget = QtWidgets.QWidget(self)
input_layout = QtWidgets.QGridLayout(input_widget)
input_layout.setContentsMargins(8, 8, 8, 8)
row = 0
repres_label = QtWidgets.QLabel("Representations:", input_widget)
input_layout.addWidget(repres_label, row, 0)
repre_name_buttons = []
for idx, name in enumerate(all_representation_names):
repre_name_btn = QtWidgets.QPushButton(name, input_widget)
input_layout.addWidget(
repre_name_btn, row, idx + 1,
alignment=QtCore.Qt.AlignCenter
)
repre_name_btn.clicked.connect(self._toggle_all)
repre_name_buttons.append(repre_name_btn)
row += 1
representation_widgets = defaultdict(list)
items = representations_by_version_id.items()
for version_id, representations in items:
version_path = version_path_by_id[version_id]
label_widget = QtWidgets.QLabel(version_path, input_widget)
input_layout.addWidget(label_widget, row, 0)
repres_by_name = {
repre_entity["name"]: repre_entity
for repre_entity in representations
}
radio_group = QtWidgets.QButtonGroup(input_widget)
for idx, name in enumerate(all_representation_names):
if name in repres_by_name:
widget = QtWidgets.QRadioButton(input_widget)
radio_group.addButton(widget)
representation_widgets[name].append(
{
"widget": widget,
"representation": repres_by_name[name]
}
)
else:
widget = QtWidgets.QLabel("x", input_widget)
input_layout.addWidget(
widget, row, idx + 1, 1, 1,
alignment=QtCore.Qt.AlignCenter
)
row += 1
export_widget = QtWidgets.QWidget(self)
options_widget = QtWidgets.QWidget(export_widget)
uri_label = QtWidgets.QLabel("URI paths:", options_widget)
uri_path_format = QtWidgets.QCheckBox(options_widget)
uri_path_format.setToolTip(
"Use URI paths (file:///) instead of absolute paths. "
"This is useful when the OTIO file will be used on Foundry Hiero."
)
button_output_path = QtWidgets.QPushButton(
"Output Path:", options_widget
)
button_output_path.setToolTip(
"Click to select the output path for the OTIO file."
)
line_edit_output_path = QtWidgets.QLineEdit(
(Path.home() / f"{project_name}.otio").as_posix(),
options_widget
)
options_layout = QtWidgets.QHBoxLayout(options_widget)
options_layout.setContentsMargins(0, 0, 0, 0)
options_layout.addWidget(uri_label)
options_layout.addWidget(uri_path_format)
options_layout.addWidget(button_output_path)
options_layout.addWidget(line_edit_output_path)
button_export = QtWidgets.QPushButton("Export", export_widget)
export_layout = QtWidgets.QVBoxLayout(export_widget)
export_layout.setContentsMargins(0, 0, 0, 0)
export_layout.addWidget(options_widget, 0)
export_layout.addWidget(button_export, 0)
main_layout = QtWidgets.QVBoxLayout(self)
main_layout.setContentsMargins(8, 8, 8, 8)
main_layout.addWidget(input_widget, 0)
main_layout.addStretch(1)
# TODO add line spacer?
main_layout.addSpacing(30)
main_layout.addWidget(export_widget, 0)
button_export.clicked.connect(self._on_export_click)
button_output_path.clicked.connect(self._set_output_path)
self._project_name = project_name
self._version_path_by_id = version_path_by_id
self._version_by_representation_id = version_by_representation_id
self._representation_widgets = representation_widgets
self._repre_name_buttons = repre_name_buttons
self._uri_path_format = uri_path_format
self._button_output_path = button_output_path
self._line_edit_output_path = line_edit_output_path
self._button_export = button_export
self._first_show = True
def showEvent(self, event):
super().showEvent(event)
if self._first_show:
self._first_show = False
self.setStyleSheet(style.load_stylesheet())
def _toggle_all(self):
representation_name = self.sender().text()
for item in self._representation_widgets[representation_name]:
item["widget"].setChecked(True)
def _set_output_path(self):
file_path, _ = QtWidgets.QFileDialog.getSaveFileName(
None, "Save OTIO file.", "", "OTIO Files (*.otio)"
)
if file_path:
self._line_edit_output_path.setText(file_path)
def _on_export_click(self):
output_path = self._line_edit_output_path.text()
# Validate output path is not empty.
if not output_path:
show_message_dialog(
"Missing output path",
(
"Output path is empty. Please enter a path to export the "
"OTIO file to."
),
level="critical",
parent=self
)
return
# Validate output path ends with .otio.
if not output_path.endswith(".otio"):
show_message_dialog(
"Wrong extension.",
(
"Output path needs to end with \".otio\"."
),
level="critical",
parent=self
)
return
representations = []
for name, items in self._representation_widgets.items():
for item in items:
if item["widget"].isChecked():
representations.append(item["representation"])
anatomy = Anatomy(self._project_name)
clips_data = {}
for representation in representations:
version = self._version_by_representation_id[
representation["id"]
]
name = (
f'{self._version_path_by_id[version["id"]]}'
f'/{representation["name"]}'
).replace("/", "_")
clips_data[name] = {
"representation": representation,
"anatomy": anatomy,
"frames": (
version["attrib"]["frameEnd"]
- version["attrib"]["frameStart"]
),
"framerate": version["attrib"]["fps"],
}
self.export_otio(clips_data, output_path)
# Feedback about success.
show_message_dialog(
"Success!",
"Export was successful.",
level="info",
parent=self
)
self.close()
def create_clip(self, name, clip_data, timeline_framerate):
representation = clip_data["representation"]
anatomy = clip_data["anatomy"]
frames = clip_data["frames"]
framerate = clip_data["framerate"]
# Get path to representation with correct frame number
repre_path = get_representation_path_with_anatomy(
representation, anatomy)
media_start_frame = clip_start_frame = 0
media_framerate = framerate
if file_metadata := get_image_info_metadata(
repre_path, ["timecode", "duration", "framerate"], self.log
):
# get media framerate and convert to float with 3 decimal places
media_framerate = file_metadata["framerate"]
media_framerate = float(f"{media_framerate:.4f}")
framerate = float(f"{timeline_framerate:.4f}")
media_start_frame = self.get_timecode_start_frame(
media_framerate, file_metadata
)
clip_start_frame = self.get_timecode_start_frame(
timeline_framerate, file_metadata
)
if "duration" in file_metadata:
frames = int(float(file_metadata["duration"]) * framerate)
repre_path = Path(repre_path)
first_frame = representation["context"].get("frame")
if first_frame is None:
media_range = OTIO.opentime.TimeRange(
start_time=OTIO.opentime.RationalTime(
media_start_frame, media_framerate
),
duration=OTIO.opentime.RationalTime(
frames, media_framerate),
)
clip_range = OTIO.opentime.TimeRange(
start_time=OTIO.opentime.RationalTime(
clip_start_frame, timeline_framerate
),
duration=OTIO.opentime.RationalTime(
frames, timeline_framerate),
)
# Use 'repre_path' as single file
media_reference = OTIO.schema.ExternalReference(
available_range=media_range,
target_url=self.convert_to_uri_or_posix(repre_path),
)
else:
# This is sequence
repre_files = [
file["path"].format(root=anatomy.roots)
for file in representation["files"]
]
# Change frame in representation context to get path with frame
# splitter.
representation["context"]["frame"] = FRAME_SPLITTER
frame_repre_path = get_representation_path_with_anatomy(
representation, anatomy
)
frame_repre_path = Path(frame_repre_path)
repre_dir, repre_filename = (
frame_repre_path.parent, frame_repre_path.name)
# Get sequence prefix and suffix
file_prefix, file_suffix = repre_filename.split(FRAME_SPLITTER)
# Get frame number from path as string to get frame padding
frame_str = str(repre_path)[len(file_prefix):][:len(file_suffix)]
frame_padding = len(frame_str)
media_range = OTIO.opentime.TimeRange(
start_time=OTIO.opentime.RationalTime(
media_start_frame, media_framerate
),
duration=OTIO.opentime.RationalTime(
len(repre_files), media_framerate
),
)
clip_range = OTIO.opentime.TimeRange(
start_time=OTIO.opentime.RationalTime(
clip_start_frame, timeline_framerate
),
duration=OTIO.opentime.RationalTime(
len(repre_files), timeline_framerate
),
)
media_reference = OTIO.schema.ImageSequenceReference(
available_range=media_range,
start_frame=int(first_frame),
frame_step=1,
rate=framerate,
target_url_base=f"{self.convert_to_uri_or_posix(repre_dir)}/",
name_prefix=file_prefix,
name_suffix=file_suffix,
frame_zero_padding=frame_padding,
)
return OTIO.schema.Clip(
name=name, media_reference=media_reference, source_range=clip_range
)
def convert_to_uri_or_posix(self, path: Path) -> str:
"""Convert path to URI or Posix path.
Args:
path (Path): Path to convert.
Returns:
str: Path as URI or Posix path.
"""
if self._uri_path_format.isChecked():
return path.as_uri()
return path.as_posix()
def get_timecode_start_frame(self, framerate, file_metadata):
# use otio to convert timecode into frame number
timecode_start_frame = OTIO.opentime.from_timecode(
file_metadata["timecode"], framerate)
return timecode_start_frame.to_frames()
def export_otio(self, clips_data, output_path):
# first find the highest framerate and set it as default framerate
# for the timeline
timeline_framerate = 0
for clip_data in clips_data.values():
framerate = clip_data["framerate"]
if framerate > timeline_framerate:
timeline_framerate = framerate
# reduce decimal places to 3 - otio does not like more
timeline_framerate = float(f"{timeline_framerate:.4f}")
# create clips from the representations
clips = [
self.create_clip(name, clip_data, timeline_framerate)
for name, clip_data in clips_data.items()
]
timeline = OTIO.schema.timeline_from_clips(clips)
# set the timeline framerate to the highest framerate
timeline.global_start_time = OTIO.opentime.RationalTime(
0, timeline_framerate)
OTIO.adapters.write_to_file(timeline, output_path)
def get_image_info_metadata(
path_to_file,
keys=None,
logger=None,
):
"""Get flattened metadata from image file
With combined approach via FFMPEG and OIIOTool.
At first it will try to detect if the image input is supported by
OpenImageIO. If it is then it gets the metadata from the image using
OpenImageIO. If it is not supported by OpenImageIO then it will try to
get the metadata using FFprobe.
Args:
path_to_file (str): Path to image file.
keys (list[str]): List of keys that should be returned. If None then
all keys are returned. Keys are expected all lowercase.
Additional keys are:
- "framerate" - will be created from "r_frame_rate" or
"framespersecond" and evaluated to float value.
logger (logging.Logger): Logger used for logging.
"""
if logger is None:
logger = logging.getLogger(__name__)
def _ffprobe_metadata_conversion(metadata):
"""Convert ffprobe metadata unified format."""
output = {}
for key, val in metadata.items():
if key in ("tags", "disposition"):
output.update(val)
else:
output[key] = val
return output
def _get_video_metadata_from_ffprobe(ffprobe_stream):
"""Extract video metadata from ffprobe stream.
Args:
ffprobe_stream (dict): Stream data obtained from ffprobe.
Returns:
dict: Video metadata extracted from the ffprobe stream.
"""
video_stream = None
for stream in ffprobe_stream["streams"]:
if stream["codec_type"] == "video":
video_stream = stream
break
metadata_stream = _ffprobe_metadata_conversion(video_stream)
return metadata_stream
metadata_stream = None
ext = os.path.splitext(path_to_file)[-1].lower()
if ext not in IMAGE_EXTENSIONS:
logger.info(
(
'File extension "{}" is not supported by OpenImageIO.'
" Trying to get metadata using FFprobe."
).format(ext)
)
ffprobe_stream = get_ffprobe_data(path_to_file, logger)
if "streams" in ffprobe_stream and len(ffprobe_stream["streams"]) > 0:
metadata_stream = _get_video_metadata_from_ffprobe(ffprobe_stream)
if not metadata_stream and is_oiio_supported():
oiio_stream = get_oiio_info_for_input(path_to_file, logger=logger)
if "attribs" in (oiio_stream or {}):
metadata_stream = {}
for key, val in oiio_stream["attribs"].items():
if "smpte:" in key.lower():
key = key.replace("smpte:", "")
metadata_stream[key.lower()] = val
for key, val in oiio_stream.items():
if key == "attribs":
continue
metadata_stream[key] = val
else:
logger.info(
(
"OpenImageIO is not supported on this system."
" Trying to get metadata using FFprobe."
)
)
ffprobe_stream = get_ffprobe_data(path_to_file, logger)
if "streams" in ffprobe_stream and len(ffprobe_stream["streams"]) > 0:
metadata_stream = _get_video_metadata_from_ffprobe(ffprobe_stream)
if not metadata_stream:
logger.warning("Failed to get metadata from image file.")
return {}
if keys is None:
return metadata_stream
# create framerate key from available ffmpeg:r_frame_rate
# or oiiotool:framespersecond and evaluate its string expression
# value into flaot value
if (
"r_frame_rate" in metadata_stream
or "framespersecond" in metadata_stream
):
rate_info = metadata_stream.get("r_frame_rate")
if rate_info is None:
rate_info = metadata_stream.get("framespersecond")
# calculate framerate from string expression
if "/" in str(rate_info):
time, frame = str(rate_info).split("/")
rate_info = float(time) / float(frame)
try:
metadata_stream["framerate"] = float(str(rate_info))
except Exception as e:
logger.warning(
"Failed to evaluate '{}' value to framerate. Error: {}".format(
rate_info, e
)
)
# aggregate all required metadata from prepared metadata stream
output = {}
for key in keys:
for k, v in metadata_stream.items():
if key == k:
output[key] = v
break
if isinstance(v, dict) and key in v:
output[key] = v[key]
break
return output

View file

@ -15,5 +15,3 @@ class CollectAddons(pyblish.api.ContextPlugin):
manager = AddonsManager()
context.data["ayonAddonsManager"] = manager
context.data["ayonAddons"] = manager.addons_by_name
# Backwards compatibility - remove
context.data["openPypeModules"] = manager.addons_by_name

View file

@ -138,7 +138,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
folder_path_by_id = {}
for instance in context:
folder_entity = instance.data.get("folderEntity")
# Skip if instnace does not have filled folder entity
# Skip if instance does not have filled folder entity
if not folder_entity:
continue
folder_id = folder_entity["id"]
@ -217,9 +217,8 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
joined_paths = ", ".join(
["\"{}\"".format(path) for path in not_found_task_paths]
)
self.log.warning((
"Not found task entities with paths \"{}\"."
).format(joined_paths))
self.log.warning(
f"Not found task entities with paths {joined_paths}.")
def fill_latest_versions(self, context, project_name):
"""Try to find latest version for each instance's product name.
@ -321,7 +320,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
use_context_version = instance.data["followWorkfileVersion"]
if use_context_version:
version_number = context.data("version")
version_number = context.data.get("version")
# Even if 'follow_workfile_version' is enabled, it may not be set
# because workfile version was not collected to 'context.data'
@ -385,8 +384,19 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
json.dumps(anatomy_data, indent=4)
))
# make render layer available in anatomy data
render_layer = instance.data.get("renderlayer")
if render_layer:
anatomy_data["renderlayer"] = render_layer
# make aov name available in anatomy data
aov = instance.data.get("aov")
if aov:
anatomy_data["aov"] = aov
def _fill_folder_data(self, instance, project_entity, anatomy_data):
# QUESTION should we make sure that all folder data are poped if
# QUESTION: should we make sure that all folder data are popped if
# folder data cannot be found?
# - 'folder', 'hierarchy', 'parent', 'folder'
folder_entity = instance.data.get("folderEntity")
@ -429,7 +439,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
})
def _fill_task_data(self, instance, task_types_by_name, anatomy_data):
# QUESTION should we make sure that all task data are poped if task
# QUESTION: should we make sure that all task data are popped if task
# data cannot be resolved?
# - 'task'

View file

@ -53,8 +53,9 @@ class CollectContextEntities(pyblish.api.ContextPlugin):
context.data["folderEntity"] = folder_entity
context.data["taskEntity"] = task_entity
folder_attributes = folder_entity["attrib"]
context_attributes = (
task_entity["attrib"] if task_entity else folder_entity["attrib"]
)
# Task type
task_type = None
@ -63,12 +64,12 @@ class CollectContextEntities(pyblish.api.ContextPlugin):
context.data["taskType"] = task_type
frame_start = folder_attributes.get("frameStart")
frame_start = context_attributes.get("frameStart")
if frame_start is None:
frame_start = 1
self.log.warning("Missing frame start. Defaulting to 1.")
frame_end = folder_attributes.get("frameEnd")
frame_end = context_attributes.get("frameEnd")
if frame_end is None:
frame_end = 2
self.log.warning("Missing frame end. Defaulting to 2.")
@ -76,8 +77,8 @@ class CollectContextEntities(pyblish.api.ContextPlugin):
context.data["frameStart"] = frame_start
context.data["frameEnd"] = frame_end
handle_start = folder_attributes.get("handleStart") or 0
handle_end = folder_attributes.get("handleEnd") or 0
handle_start = context_attributes.get("handleStart") or 0
handle_end = context_attributes.get("handleEnd") or 0
context.data["handleStart"] = int(handle_start)
context.data["handleEnd"] = int(handle_end)
@ -87,7 +88,7 @@ class CollectContextEntities(pyblish.api.ContextPlugin):
context.data["frameStartHandle"] = frame_start_h
context.data["frameEndHandle"] = frame_end_h
context.data["fps"] = folder_attributes["fps"]
context.data["fps"] = context_attributes["fps"]
def _get_folder_entity(self, project_name, folder_path):
if not folder_path:
@ -113,4 +114,4 @@ class CollectContextEntities(pyblish.api.ContextPlugin):
"Task '{}' was not found in project '{}'.".format(
task_path, project_name)
)
return task_entity
return task_entity

View file

@ -13,8 +13,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
label = "Collect Hierarchy"
order = pyblish.api.CollectorOrder - 0.076
families = ["shot"]
hosts = ["resolve", "hiero", "flame"]
hosts = ["resolve", "hiero", "flame", "traypublisher"]
def process(self, context):
project_name = context.data["projectName"]
@ -32,36 +31,49 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
product_type = instance.data["productType"]
families = instance.data["families"]
# exclude other families then self.families with intersection
if not set(self.families).intersection(
set(families + [product_type])
):
# exclude other families then "shot" with intersection
if "shot" not in (families + [product_type]):
self.log.debug("Skipping not a shot: {}".format(families))
continue
# exclude if not masterLayer True
# Skip if is not a hero track
if not instance.data.get("heroTrack"):
self.log.debug("Skipping not a shot from hero track")
continue
shot_data = {
"entity_type": "folder",
# WARNING Default folder type is hardcoded
# suppose that all instances are Shots
"folder_type": "Shot",
# WARNING unless overwritten, default folder type is hardcoded to shot
"folder_type": instance.data.get("folder_type") or "Shot",
"tasks": instance.data.get("tasks") or {},
"comments": instance.data.get("comments", []),
"attributes": {
"handleStart": instance.data["handleStart"],
"handleEnd": instance.data["handleEnd"],
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"clipIn": instance.data["clipIn"],
"clipOut": instance.data["clipOut"],
"fps": instance.data["fps"],
"resolutionWidth": instance.data["resolutionWidth"],
"resolutionHeight": instance.data["resolutionHeight"],
"pixelAspect": instance.data["pixelAspect"],
},
}
shot_data["attributes"] = {}
SHOT_ATTRS = (
"handleStart",
"handleEnd",
"frameStart",
"frameEnd",
"clipIn",
"clipOut",
"fps",
"resolutionWidth",
"resolutionHeight",
"pixelAspect",
)
for shot_attr in SHOT_ATTRS:
attr_value = instance.data.get(shot_attr)
if attr_value is None:
# Shot attribute might not be defined (e.g. CSV ingest)
self.log.debug(
"%s shot attribute is not defined for instance.",
shot_attr
)
continue
shot_data["attributes"][shot_attr] = attr_value
# Split by '/' for AYON where asset is a path
name = instance.data["folderPath"].split("/")[-1]
actual = {name: shot_data}

View file

@ -7,7 +7,7 @@ class CollectInputRepresentationsToVersions(pyblish.api.ContextPlugin):
"""Converts collected input representations to input versions.
Any data in `instance.data["inputRepresentations"]` gets converted into
`instance.data["inputVersions"]` as supported in OpenPype v3.
`instance.data["inputVersions"]` as supported in OpenPype.
"""
# This is a ContextPlugin because then we can query the database only once

View file

@ -29,6 +29,10 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin):
otio_range_with_handles
)
if not instance.data.get("otioClip"):
self.log.debug("Skipping collect OTIO frame range.")
return
# get basic variables
otio_clip = instance.data["otioClip"]
workfile_start = instance.data["workfileFrameStart"]

View file

@ -95,9 +95,42 @@ class CollectOtioReview(pyblish.api.InstancePlugin):
instance.data["label"] = label + " (review)"
instance.data["families"] += ["review", "ftrack"]
instance.data["otioReviewClips"] = otio_review_clips
self.log.info(
"Creating review track: {}".format(otio_review_clips))
# get colorspace from metadata if available
# get metadata from first clip with media reference
r_otio_cl = next(
(
clip
for clip in otio_review_clips
if (
isinstance(clip, otio.schema.Clip)
and clip.media_reference
)
),
None
)
if r_otio_cl is not None:
media_ref = r_otio_cl.media_reference
media_metadata = media_ref.metadata
# TODO: we might need some alternative method since
# native OTIO exports do not support ayon metadata
review_colorspace = media_metadata.get(
"ayon.source.colorspace"
)
if review_colorspace is None:
# Backwards compatibility for older scenes
review_colorspace = media_metadata.get(
"openpype.source.colourtransform"
)
if review_colorspace:
instance.data["reviewColorspace"] = review_colorspace
self.log.info(
"Review colorspace: {}".format(review_colorspace))
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
self.log.debug(

View file

@ -10,12 +10,16 @@ import os
import clique
import pyblish.api
from ayon_core.pipeline import publish
from ayon_core.pipeline.publish import (
get_publish_template_name
)
class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
class CollectOtioSubsetResources(
pyblish.api.InstancePlugin,
publish.ColormanagedPyblishPluginMixin
):
"""Get Resources for a product version"""
label = "Collect OTIO Subset Resources"
@ -190,9 +194,13 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
instance.data["originalDirname"] = self.staging_dir
if repre:
colorspace = instance.data.get("colorspace")
# add colorspace data to representation
self.set_representation_colorspace(
repre, instance.context, colorspace)
# add representation to instance data
instance.data["representations"].append(repre)
self.log.debug(">>>>>>>> {}".format(repre))
self.log.debug(instance.data)

View file

@ -138,10 +138,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
def process(self, context):
self._context = context
publish_data_paths = (
os.environ.get("AYON_PUBLISH_DATA")
or os.environ.get("OPENPYPE_PUBLISH_DATA")
)
publish_data_paths = os.environ.get("AYON_PUBLISH_DATA")
if not publish_data_paths:
raise KnownPublishError("Missing `AYON_PUBLISH_DATA`")

View file

@ -47,8 +47,9 @@ class CollectSceneVersion(pyblish.api.ContextPlugin):
return
if not context.data.get('currentFile'):
raise KnownPublishError("Cannot get current workfile path. "
"Make sure your scene is saved.")
self.log.error("Cannot get current workfile path. "
"Make sure your scene is saved.")
return
filename = os.path.basename(context.data.get('currentFile'))

View file

@ -199,7 +199,7 @@ class ExtractBurnin(publish.Extractor):
if not burnins_per_repres:
self.log.debug(
"Skipped instance. No representations found matching a burnin"
"definition in: %s", burnin_defs
" definition in: %s", burnin_defs
)
return
@ -399,7 +399,7 @@ class ExtractBurnin(publish.Extractor):
add_repre_files_for_cleanup(instance, new_repre)
# Cleanup temp staging dir after procesisng of output definitions
# Cleanup temp staging dir after processing of output definitions
if do_convert:
temp_dir = repre["stagingDir"]
shutil.rmtree(temp_dir)
@ -420,6 +420,12 @@ class ExtractBurnin(publish.Extractor):
self.log.debug("Removed: \"{}\"".format(filepath))
def _get_burnin_options(self):
"""Get the burnin options from `ExtractBurnin` settings.
Returns:
dict[str, Any]: Burnin options.
"""
# Prepare burnin options
burnin_options = copy.deepcopy(self.default_options)
if self.options:
@ -696,7 +702,7 @@ class ExtractBurnin(publish.Extractor):
"""Prepare data for representation.
Args:
instance (Instance): Currently processed Instance.
instance (pyblish.api.Instance): Currently processed Instance.
repre (dict): Currently processed representation.
burnin_data (dict): Copy of basic burnin data based on instance
data.
@ -752,9 +758,11 @@ class ExtractBurnin(publish.Extractor):
Args:
profile (dict): Profile from presets matching current context.
instance (pyblish.api.Instance): Publish instance.
Returns:
list: Contain all valid output definitions.
list[dict[str, Any]]: Contain all valid output definitions.
"""
filtered_burnin_defs = []
@ -773,12 +781,11 @@ class ExtractBurnin(publish.Extractor):
if not self.families_filter_validation(
families, families_filters
):
self.log.debug((
"Skipped burnin definition \"{}\". Family"
" filters ({}) does not match current instance families: {}"
).format(
filename_suffix, str(families_filters), str(families)
))
self.log.debug(
f"Skipped burnin definition \"{filename_suffix}\"."
f" Family filters ({families_filters}) does not match"
f" current instance families: {families}"
)
continue
# Burnin values

View file

@ -5,7 +5,6 @@ import pyblish.api
from ayon_core.pipeline import publish
from ayon_core.lib import (
is_oiio_supported,
)
@ -122,13 +121,22 @@ class ExtractOIIOTranscode(publish.Extractor):
transcoding_type = output_def["transcoding_type"]
target_colorspace = view = display = None
# NOTE: we use colorspace_data as the fallback values for
# the target colorspace.
if transcoding_type == "colorspace":
# TODO: Should we fallback to the colorspace
# (which used as source above) ?
# or should we compute the target colorspace from
# current view and display ?
target_colorspace = (output_def["colorspace"] or
colorspace_data.get("colorspace"))
else:
view = output_def["view"] or colorspace_data.get("view")
display = (output_def["display"] or
colorspace_data.get("display"))
elif transcoding_type == "display_view":
display_view = output_def["display_view"]
view = display_view["view"] or colorspace_data.get("view")
display = (
display_view["display"]
or colorspace_data.get("display")
)
# both could be already collected by DCC,
# but could be overwritten when transcoding
@ -145,12 +153,15 @@ class ExtractOIIOTranscode(publish.Extractor):
files_to_convert = self._translate_to_sequence(
files_to_convert)
self.log.debug("Files to convert: {}".format(files_to_convert))
for file_name in files_to_convert:
self.log.debug("Transcoding file: `{}`".format(file_name))
input_path = os.path.join(original_staging_dir,
file_name)
output_path = self._get_output_file_path(input_path,
new_staging_dir,
output_extension)
convert_colorspace(
input_path,
output_path,
@ -192,7 +203,7 @@ class ExtractOIIOTranscode(publish.Extractor):
new_repre["files"] = new_repre["files"][0]
# If the source representation has "review" tag, but its not
# part of the output defintion tags, then both the
# part of the output definition tags, then both the
# representations will be transcoded in ExtractReview and
# their outputs will clash in integration.
if "review" in repre.get("tags", []):

View file

@ -37,6 +37,9 @@ class ExtractColorspaceData(publish.Extractor,
# get colorspace settings
context = instance.context
# colorspace name could be kept in instance.data
colorspace = instance.data.get("colorspace")
# loop representations
for representation in representations:
# skip if colorspaceData is already at representation
@ -44,5 +47,4 @@ class ExtractColorspaceData(publish.Extractor,
continue
self.set_representation_colorspace(
representation, context
)
representation, context, colorspace)

View file

@ -22,7 +22,6 @@ class ExtractHierarchyToAYON(pyblish.api.ContextPlugin):
order = pyblish.api.ExtractorOrder - 0.01
label = "Extract Hierarchy To AYON"
families = ["clip", "shot"]
def process(self, context):
if not context.data.get("hierarchyContext"):
@ -154,7 +153,9 @@ class ExtractHierarchyToAYON(pyblish.api.ContextPlugin):
# TODO check if existing entity have 'task' type
if task_entity is None:
task_entity = entity_hub.add_new_task(
task_info["type"],
task_type=task_info["type"],
# TODO change 'parent_id' to 'folder_id' when ayon api
# is updated
parent_id=entity.id,
name=task_name
)
@ -182,7 +183,7 @@ class ExtractHierarchyToAYON(pyblish.api.ContextPlugin):
folder_type = "Folder"
child_entity = entity_hub.add_new_folder(
folder_type,
folder_type=folder_type,
parent_id=entity.id,
name=child_name
)

View file

@ -26,7 +26,10 @@ from ayon_core.lib import (
from ayon_core.pipeline import publish
class ExtractOTIOReview(publish.Extractor):
class ExtractOTIOReview(
publish.Extractor,
publish.ColormanagedPyblishPluginMixin
):
"""
Extract OTIO timeline into one concuted image sequence file.
@ -49,7 +52,6 @@ class ExtractOTIOReview(publish.Extractor):
hosts = ["resolve", "hiero", "flame"]
# plugin default attributes
temp_file_head = "tempFile."
to_width = 1280
to_height = 720
output_ext = ".jpg"
@ -58,24 +60,33 @@ class ExtractOTIOReview(publish.Extractor):
# Not all hosts can import these modules.
import opentimelineio as otio
from ayon_core.pipeline.editorial import (
otio_range_to_frame_range,
make_sequence_collection
make_sequence_collection,
remap_range_on_file_sequence,
is_clip_from_media_sequence
)
# TODO refactor from using instance variable
self.temp_file_head = self._get_folder_name_based_prefix(instance)
# TODO: convert resulting image sequence to mp4
# get otio clip and other time info from instance clip
# TODO: what if handles are different in `versionData`?
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
otio_review_clips = instance.data["otioReviewClips"]
otio_review_clips = instance.data.get("otioReviewClips")
if otio_review_clips is None:
self.log.info(f"Instance `{instance}` has no otioReviewClips")
# add plugin wide attributes
self.representation_files = list()
self.used_frames = list()
self.representation_files = []
self.used_frames = []
self.workfile_start = int(instance.data.get(
"workfileFrameStart", 1001)) - handle_start
self.padding = len(str(self.workfile_start))
# NOTE: padding has to be converted from
# end frame since start could be lower then 1000
self.padding = len(str(instance.data.get("frameEnd", 1001)))
self.used_frames.append(self.workfile_start)
self.to_width = instance.data.get(
"resolutionWidth") or self.to_width
@ -83,8 +94,10 @@ class ExtractOTIOReview(publish.Extractor):
"resolutionHeight") or self.to_height
# skip instance if no reviewable data available
if (not isinstance(otio_review_clips[0], otio.schema.Clip)) \
and (len(otio_review_clips) == 1):
if (
not isinstance(otio_review_clips[0], otio.schema.Clip)
and len(otio_review_clips) == 1
):
self.log.warning(
"Instance `{}` has nothing to process".format(instance))
return
@ -97,84 +110,110 @@ class ExtractOTIOReview(publish.Extractor):
for index, r_otio_cl in enumerate(otio_review_clips):
# QUESTION: what if transition on clip?
# check if resolution is the same
width = self.to_width
height = self.to_height
otio_media = r_otio_cl.media_reference
media_metadata = otio_media.metadata
# get from media reference metadata source
if media_metadata.get("openpype.source.width"):
width = int(media_metadata.get("openpype.source.width"))
if media_metadata.get("openpype.source.height"):
height = int(media_metadata.get("openpype.source.height"))
# compare and reset
if width != self.to_width:
self.to_width = width
if height != self.to_height:
self.to_height = height
self.log.debug("> self.to_width x self.to_height: {} x {}".format(
self.to_width, self.to_height
))
# get frame range values
# Clip: compute process range from available media range.
src_range = r_otio_cl.source_range
start = src_range.start_time.value
duration = src_range.duration.value
available_range = None
self.actual_fps = src_range.duration.rate
# add available range only if not gap
if isinstance(r_otio_cl, otio.schema.Clip):
# check if resolution is the same as source
media_ref = r_otio_cl.media_reference
media_metadata = media_ref.metadata
# get from media reference metadata source
# TODO 'openpype' prefix should be removed (added 24/09/03)
# NOTE it looks like it is set only in hiero integration
res_data = {"width": self.to_width, "height": self.to_height}
for key in res_data:
for meta_prefix in ("ayon.source.", "openpype.source."):
meta_key = f"{meta_prefix}.{key}"
value = media_metadata.get(meta_key)
if value is not None:
res_data[key] = value
break
self.to_width, self.to_height = res_data["width"], res_data["height"]
self.log.debug("> self.to_width x self.to_height: {} x {}".format(
self.to_width, self.to_height
))
available_range = r_otio_cl.available_range()
processing_range = None
self.actual_fps = available_range.duration.rate
start = src_range.start_time.rescaled_to(self.actual_fps)
duration = src_range.duration.rescaled_to(self.actual_fps)
# Temporary.
# Some AYON custom OTIO exporter were implemented with relative
# source range for image sequence. Following code maintain
# backward-compatibility by adjusting available range
# while we are updating those.
if (
is_clip_from_media_sequence(r_otio_cl)
and available_range.start_time.to_frames() == media_ref.start_frame
and src_range.start_time.to_frames() < media_ref.start_frame
):
available_range = otio.opentime.TimeRange(
otio.opentime.RationalTime(0, rate=self.actual_fps),
available_range.duration,
)
# Gap: no media, generate range based on source range
else:
available_range = processing_range = None
self.actual_fps = src_range.duration.rate
start = src_range.start_time
duration = src_range.duration
# Create handle offsets.
clip_handle_start = otio.opentime.RationalTime(
handle_start,
rate=self.actual_fps,
)
clip_handle_end = otio.opentime.RationalTime(
handle_end,
rate=self.actual_fps,
)
# reframing handles conditions
if (len(otio_review_clips) > 1) and (index == 0):
# more clips | first clip reframing with handle
start -= handle_start
duration += handle_start
start -= clip_handle_start
duration += clip_handle_start
elif len(otio_review_clips) > 1 \
and (index == len(otio_review_clips) - 1):
and (index == len(otio_review_clips) - 1):
# more clips | last clip reframing with handle
duration += handle_end
duration += clip_handle_end
elif len(otio_review_clips) == 1:
# one clip | add both handles
start -= handle_start
duration += (handle_start + handle_end)
start -= clip_handle_start
duration += (clip_handle_start + clip_handle_end)
if available_range:
available_range = self._trim_available_range(
available_range, start, duration, self.actual_fps)
processing_range = self._trim_available_range(
available_range, start, duration)
# process all track items of the track
if isinstance(r_otio_cl, otio.schema.Clip):
# process Clip
media_ref = r_otio_cl.media_reference
metadata = media_ref.metadata
is_sequence = None
# check in two way if it is sequence
if hasattr(otio.schema, "ImageSequenceReference"):
# for OpenTimelineIO 0.13 and newer
if isinstance(media_ref,
otio.schema.ImageSequenceReference):
is_sequence = True
else:
# for OpenTimelineIO 0.12 and older
if metadata.get("padding"):
is_sequence = True
is_sequence = is_clip_from_media_sequence(r_otio_cl)
# File sequence way
if is_sequence:
# file sequence way
# Remap processing range to input file sequence.
processing_range_as_frames = (
processing_range.start_time.to_frames(),
processing_range.end_time_inclusive().to_frames()
)
first, last = remap_range_on_file_sequence(
r_otio_cl,
processing_range_as_frames,
)
input_fps = processing_range.start_time.rate
if hasattr(media_ref, "target_url_base"):
dirname = media_ref.target_url_base
head = media_ref.name_prefix
tail = media_ref.name_suffix
first, last = otio_range_to_frame_range(
available_range)
collection = clique.Collection(
head=head,
tail=tail,
@ -183,8 +222,8 @@ class ExtractOTIOReview(publish.Extractor):
collection.indexes.update(
[i for i in range(first, (last + 1))])
# render segment
self._render_seqment(
sequence=[dirname, collection])
self._render_segment(
sequence=[dirname, collection, input_fps])
# generate used frames
self._generate_used_frames(
len(collection.indexes))
@ -193,33 +232,54 @@ class ExtractOTIOReview(publish.Extractor):
# `ImageSequenceReference`
path = media_ref.target_url
collection_data = make_sequence_collection(
path, available_range, metadata)
path, processing_range, metadata)
dir_path, collection = collection_data
# render segment
self._render_seqment(
sequence=[dir_path, collection])
self._render_segment(
sequence=[dir_path, collection, input_fps])
# generate used frames
self._generate_used_frames(
len(collection.indexes))
# Single video way.
# Extraction via FFmpeg.
else:
# single video file way
path = media_ref.target_url
# Set extract range from 0 (FFmpeg ignores embedded timecode).
extract_range = otio.opentime.TimeRange(
otio.opentime.RationalTime(
(
processing_range.start_time.value
- available_range.start_time.value
),
rate=available_range.start_time.rate,
),
duration=processing_range.duration,
)
# render video file to sequence
self._render_seqment(
video=[path, available_range])
self._render_segment(
video=[path, extract_range])
# generate used frames
self._generate_used_frames(
available_range.duration.value)
processing_range.duration.value)
# QUESTION: what if nested track composition is in place?
else:
# at last process a Gap
self._render_seqment(gap=duration)
self._render_segment(gap=duration.to_frames())
# generate used frames
self._generate_used_frames(duration)
self._generate_used_frames(duration.to_frames())
# creating and registering representation
representation = self._create_representation(start, duration)
# add colorspace data to representation
if colorspace := instance.data.get("reviewColorspace"):
self.set_representation_colorspace(
representation, instance.context, colorspace
)
instance.data["representations"].append(representation)
self.log.info("Adding representation: {}".format(representation))
@ -265,7 +325,7 @@ class ExtractOTIOReview(publish.Extractor):
})
return representation_data
def _trim_available_range(self, avl_range, start, duration, fps):
def _trim_available_range(self, avl_range, start, duration):
"""
Trim available media range to source range.
@ -274,69 +334,87 @@ class ExtractOTIOReview(publish.Extractor):
Args:
avl_range (otio.time.TimeRange): media available time range
start (int): start frame
duration (int): duration frames
fps (float): frame rate
start (otio.time.RationalTime): start
duration (otio.time.RationalTime): duration
Returns:
otio.time.TimeRange: trimmed available range
"""
# Not all hosts can import these modules.
import opentimelineio as otio
from ayon_core.pipeline.editorial import (
trim_media_range,
range_from_frames
)
avl_start = int(avl_range.start_time.value)
src_start = int(avl_start + start)
avl_durtation = int(avl_range.duration.value)
def _round_to_frame(rational_time):
""" Handle rounding duration to frame.
"""
# OpentimelineIO >= 0.16.0
try:
return rational_time.round().to_frames()
self.need_offset = bool(avl_start != 0 and src_start != 0)
# OpentimelineIO < 0.16.0
except AttributeError:
return otio.opentime.RationalTime(
round(rational_time.value),
rate=rational_time.rate,
).to_frames()
# if media start is les then clip requires
if src_start < avl_start:
# calculate gap
gap_duration = avl_start - src_start
avl_start = avl_range.start_time
# An additional gap is required before the available
# range to conform source start point and head handles.
if start < avl_start:
gap_duration = avl_start - start
start = avl_start
duration -= gap_duration
gap_duration = _round_to_frame(gap_duration)
# create gap data to disk
self._render_seqment(gap=gap_duration)
self._render_segment(gap=gap_duration)
# generate used frames
self._generate_used_frames(gap_duration)
# fix start and end to correct values
start = 0
# An additional gap is required after the available
# range to conform to source end point + tail handles
# (media duration is shorter then clip requirement).
end_point = start + duration
avl_end_point = avl_range.end_time_exclusive()
if end_point > avl_end_point:
gap_duration = end_point - avl_end_point
duration -= gap_duration
# if media duration is shorter then clip requirement
if duration > avl_durtation:
# calculate gap
gap_start = int(src_start + avl_durtation)
gap_end = int(src_start + duration)
gap_duration = gap_end - gap_start
gap_duration = _round_to_frame(gap_duration)
# create gap data to disk
self._render_seqment(gap=gap_duration, end_offset=avl_durtation)
self._render_segment(
gap=gap_duration,
end_offset=duration.to_frames()
)
# generate used frames
self._generate_used_frames(gap_duration, end_offset=avl_durtation)
# fix duration lenght
duration = avl_durtation
self._generate_used_frames(
gap_duration,
end_offset=duration.to_frames()
)
# return correct trimmed range
return trim_media_range(
avl_range, range_from_frames(start, duration, fps)
avl_range,
otio.opentime.TimeRange(
start,
duration
)
)
def _render_seqment(self, sequence=None,
def _render_segment(self, sequence=None,
video=None, gap=None, end_offset=None):
"""
Render seqment into image sequence frames.
Render segment into image sequence frames.
Using ffmpeg to convert compatible video and image source
to defined image sequence format.
Args:
sequence (list): input dir path string, collection object in list
sequence (list): input dir path string, collection object, fps in list
video (list)[optional]: video_path string, otio_range in list
gap (int)[optional]: gap duration
end_offset (int)[optional]: offset gap frame start in frames
@ -358,7 +436,7 @@ class ExtractOTIOReview(publish.Extractor):
input_extension = None
if sequence:
input_dir, collection = sequence
input_dir, collection, sequence_fps = sequence
in_frame_start = min(collection.indexes)
# converting image sequence to image sequence
@ -366,9 +444,28 @@ class ExtractOTIOReview(publish.Extractor):
input_path = os.path.join(input_dir, input_file)
input_extension = os.path.splitext(input_path)[-1]
# form command for rendering gap files
"""
Form Command for Rendering Sequence Files
To explicitly set the input frame range and preserve the frame
range, avoid silent dropped frames caused by input mismatch
with FFmpeg's default rate of 25.0 fps. For more info,
refer to the FFmpeg image2 demuxer.
Implicit:
- Input: 100 frames (24fps from metadata)
- Demuxer: video 25fps
- Output: 98 frames, dropped 2
Explicit with "-framerate":
- Input: 100 frames (24fps from metadata)
- Demuxer: video 24fps
- Output: 100 frames, no dropped frames
"""
command.extend([
"-start_number", str(in_frame_start),
"-framerate", str(sequence_fps),
"-i", input_path
])
@ -443,16 +540,11 @@ class ExtractOTIOReview(publish.Extractor):
padding = "{{:0{}d}}".format(self.padding)
# create frame offset
offset = 0
if self.need_offset:
offset = 1
if end_offset:
new_frames = list()
start_frame = self.used_frames[-1]
for index in range((end_offset + offset),
(int(end_offset + duration) + offset)):
for index in range(end_offset,
(int(end_offset + duration))):
seq_number = padding.format(start_frame + index)
self.log.debug(
"index: `{}` | seq_number: `{}`".format(index, seq_number))
@ -491,3 +583,20 @@ class ExtractOTIOReview(publish.Extractor):
out_frame_start = self.used_frames[-1]
return output_path, out_frame_start
def _get_folder_name_based_prefix(self, instance):
"""Creates 'unique' human readable file prefix to differentiate.
Multiple instances might share same temp folder, but each instance
would be differentiated by asset, eg. folder name.
It ix expected that there won't be multiple instances for same asset.
"""
folder_path = instance.data["folderPath"]
folder_name = folder_path.split("/")[-1]
folder_path = folder_path.replace("/", "_").lstrip("_")
file_prefix = f"{folder_path}_{folder_name}."
self.log.debug(f"file_prefix::{file_prefix}")
return file_prefix

View file

@ -74,9 +74,6 @@ class ExtractOTIOTrimmingVideo(publish.Extractor):
otio_range (opentime.TimeRange): range to trim to
"""
# Not all hosts can import this module.
from ayon_core.pipeline.editorial import frames_to_seconds
# create path to destination
output_path = self._get_ffmpeg_output(input_file_path)
@ -84,11 +81,8 @@ class ExtractOTIOTrimmingVideo(publish.Extractor):
command = get_ffmpeg_tool_args("ffmpeg")
video_path = input_file_path
frame_start = otio_range.start_time.value
input_fps = otio_range.start_time.rate
frame_duration = otio_range.duration.value - 1
sec_start = frames_to_seconds(frame_start, input_fps)
sec_duration = frames_to_seconds(frame_duration, input_fps)
sec_start = otio_range.start_time.to_seconds()
sec_duration = otio_range.duration.to_seconds()
# form command for rendering gap files
command.extend([

View file

@ -95,7 +95,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
]
# Supported extensions
image_exts = ["exr", "jpg", "jpeg", "png", "dpx", "tga"]
image_exts = ["exr", "jpg", "jpeg", "png", "dpx", "tga", "tiff", "tif"]
video_exts = ["mov", "mp4"]
supported_exts = image_exts + video_exts
@ -1900,7 +1900,7 @@ class OverscanCrop:
string_value = re.sub(r"([ ]+)?px", " ", string_value)
string_value = re.sub(r"([ ]+)%", "%", string_value)
# Make sure +/- sign at the beginning of string is next to number
string_value = re.sub(r"^([\+\-])[ ]+", "\g<1>", string_value)
string_value = re.sub(r"^([\+\-])[ ]+", r"\g<1>", string_value)
# Make sure +/- sign in the middle has zero spaces before number under
# which belongs
string_value = re.sub(

View file

@ -36,7 +36,8 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
"traypublisher",
"substancepainter",
"nuke",
"aftereffects"
"aftereffects",
"unreal"
]
enabled = False
@ -455,6 +456,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# output file
jpeg_items.append(path_to_subprocess_arg(dst_path))
subprocess_command = " ".join(jpeg_items)
try:
run_subprocess(
subprocess_command, shell=True, logger=self.log

View file

@ -4,7 +4,10 @@ import os
from typing import Dict
import pyblish.api
from pxr import Sdf
try:
from pxr import Sdf
except ImportError:
Sdf = None
from ayon_core.lib import (
TextDef,
@ -13,21 +16,24 @@ from ayon_core.lib import (
UILabelDef,
EnumDef
)
from ayon_core.pipeline.usdlib import (
get_or_define_prim_spec,
add_ordered_reference,
variant_nested_prim_path,
setup_asset_layer,
add_ordered_sublayer,
set_layer_defaults
)
try:
from ayon_core.pipeline.usdlib import (
get_or_define_prim_spec,
add_ordered_reference,
variant_nested_prim_path,
setup_asset_layer,
add_ordered_sublayer,
set_layer_defaults
)
except ImportError:
pass
from ayon_core.pipeline.entity_uri import (
construct_ayon_entity_uri,
parse_ayon_entity_uri
)
from ayon_core.pipeline.load.utils import get_representation_path_by_names
from ayon_core.pipeline.publish.lib import get_instance_expected_output_path
from ayon_core.pipeline import publish
from ayon_core.pipeline import publish, KnownPublishError
# This global toggle is here mostly for debugging purposes and should usually
@ -77,7 +83,7 @@ def get_representation_path_in_publish_context(
Allow resolving 'latest' paths from a publishing context's instances
as if they will exist after publishing without them being integrated yet.
Use first instance that has same folder path and product name,
and contains representation with passed name.
@ -138,13 +144,14 @@ def get_instance_uri_path(
folder_path = instance.data["folderPath"]
product_name = instance.data["productName"]
project_name = context.data["projectName"]
version_name = instance.data["version"]
# Get the layer's published path
path = construct_ayon_entity_uri(
project_name=project_name,
folder_path=folder_path,
product=product_name,
version="latest",
version=version_name,
representation_name="usd"
)
@ -231,7 +238,7 @@ def add_representation(instance, name,
class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
publish.OpenPypePyblishPluginMixin):
publish.AYONPyblishPluginMixin):
"""Collect the USD Layer Contributions and create dependent instances.
Our contributions go to the layer
@ -451,7 +458,18 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
return new_instance
@classmethod
def get_attribute_defs(cls):
def get_attr_defs_for_instance(cls, create_context, instance):
# Filtering of instance, if needed, can be customized
if not cls.instance_matches_plugin_families(instance):
return []
# Attributes logic
publish_attributes = instance["publish_attributes"].get(
cls.__name__, {})
visible = publish_attributes.get("contribution_enabled", True)
variant_visible = visible and publish_attributes.get(
"contribution_apply_as_variant", True)
return [
UISeparatorDef("usd_container_settings1"),
@ -477,7 +495,8 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
"the contribution itself will be added to the "
"department layer."
),
default="usdAsset"),
default="usdAsset",
visible=visible),
EnumDef("contribution_target_product_init",
label="Initialize as",
tooltip=(
@ -488,7 +507,8 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
"setting will do nothing."
),
items=["asset", "shot"],
default="asset"),
default="asset",
visible=visible),
# Asset layer, e.g. model.usd, look.usd, rig.usd
EnumDef("contribution_layer",
@ -500,7 +520,8 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
"the list) will contribute as a stronger opinion."
),
items=list(cls.contribution_layers.keys()),
default="model"),
default="model",
visible=visible),
BoolDef("contribution_apply_as_variant",
label="Add as variant",
tooltip=(
@ -511,13 +532,16 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
"appended to as a sublayer to the department layer "
"instead."
),
default=True),
default=True,
visible=visible),
TextDef("contribution_variant_set_name",
label="Variant Set Name",
default="{layer}"),
default="{layer}",
visible=variant_visible),
TextDef("contribution_variant",
label="Variant Name",
default="{variant}"),
default="{variant}",
visible=variant_visible),
BoolDef("contribution_variant_is_default",
label="Set as default variant selection",
tooltip=(
@ -528,10 +552,41 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
"The behavior is unpredictable if multiple instances "
"for the same variant set have this enabled."
),
default=False),
default=False,
visible=variant_visible),
UISeparatorDef("usd_container_settings3"),
]
@classmethod
def register_create_context_callbacks(cls, create_context):
create_context.add_value_changed_callback(cls.on_values_changed)
@classmethod
def on_values_changed(cls, event):
"""Update instance attribute definitions on attribute changes."""
# Update attributes if any of the following plug-in attributes
# change:
keys = ["contribution_enabled", "contribution_apply_as_variant"]
for instance_change in event["changes"]:
instance = instance_change["instance"]
if not cls.instance_matches_plugin_families(instance):
continue
value_changes = instance_change["changes"]
plugin_attribute_changes = (
value_changes.get("publish_attributes", {})
.get(cls.__name__, {}))
if not any(key in plugin_attribute_changes for key in keys):
continue
# Update the attribute definitions
new_attrs = cls.get_attr_defs_for_instance(
event["create_context"], instance
)
instance.set_publish_plugin_attr_defs(cls.__name__, new_attrs)
class CollectUSDLayerContributionsHoudiniLook(CollectUSDLayerContributions):
"""
@ -544,9 +599,12 @@ class CollectUSDLayerContributionsHoudiniLook(CollectUSDLayerContributions):
label = CollectUSDLayerContributions.label + " (Look)"
@classmethod
def get_attribute_defs(cls):
defs = super(CollectUSDLayerContributionsHoudiniLook,
cls).get_attribute_defs()
def get_attr_defs_for_instance(cls, create_context, instance):
# Filtering of instance, if needed, can be customized
if not cls.instance_matches_plugin_families(instance):
return []
defs = super().get_attr_defs_for_instance(create_context, instance)
# Update default for department layer to look
layer_def = next(d for d in defs if d.key == "contribution_layer")
@ -555,12 +613,24 @@ class CollectUSDLayerContributionsHoudiniLook(CollectUSDLayerContributions):
return defs
class ValidateUSDDependencies(pyblish.api.InstancePlugin):
families = ["usdLayer"]
order = pyblish.api.ValidatorOrder
def process(self, instance):
if Sdf is None:
raise KnownPublishError("USD library 'Sdf' is not available.")
class ExtractUSDLayerContribution(publish.Extractor):
families = ["usdLayer"]
label = "Extract USD Layer Contributions (Asset/Shot)"
order = pyblish.api.ExtractorOrder + 0.45
use_ayon_entity_uri = False
def process(self, instance):
folder_path = instance.data["folderPath"]
@ -578,7 +648,8 @@ class ExtractUSDLayerContribution(publish.Extractor):
contributions = instance.data.get("usd_contributions", [])
for contribution in sorted(contributions, key=attrgetter("order")):
path = get_instance_uri_path(contribution.instance)
path = get_instance_uri_path(contribution.instance,
resolve=not self.use_ayon_entity_uri)
if isinstance(contribution, VariantContribution):
# Add contribution as a reference inside a variant
self.log.debug(f"Adding variant: {contribution}")
@ -652,14 +723,14 @@ class ExtractUSDLayerContribution(publish.Extractor):
)
def remove_previous_reference_contribution(self,
prim_spec: Sdf.PrimSpec,
prim_spec: "Sdf.PrimSpec",
instance: pyblish.api.Instance):
# Remove existing contributions of the same product - ignoring
# the picked version and representation. We assume there's only ever
# one version of a product you want to have referenced into a Prim.
remove_indices = set()
for index, ref in enumerate(prim_spec.referenceList.prependedItems):
ref: Sdf.Reference # type hint
ref: "Sdf.Reference"
uri = ref.customData.get("ayon_uri")
if uri and self.instance_match_ayon_uri(instance, uri):
@ -674,8 +745,8 @@ class ExtractUSDLayerContribution(publish.Extractor):
]
def add_reference_contribution(self,
layer: Sdf.Layer,
prim_path: Sdf.Path,
layer: "Sdf.Layer",
prim_path: "Sdf.Path",
filepath: str,
contribution: VariantContribution):
instance = contribution.instance
@ -720,6 +791,8 @@ class ExtractUSDAssetContribution(publish.Extractor):
label = "Extract USD Asset/Shot Contributions"
order = ExtractUSDLayerContribution.order + 0.01
use_ayon_entity_uri = False
def process(self, instance):
folder_path = instance.data["folderPath"]
@ -795,15 +868,15 @@ class ExtractUSDAssetContribution(publish.Extractor):
layer_id = layer_instance.data["usd_layer_id"]
order = layer_instance.data["usd_layer_order"]
path = get_instance_uri_path(instance=layer_instance)
path = get_instance_uri_path(instance=layer_instance,
resolve=not self.use_ayon_entity_uri)
add_ordered_sublayer(target_layer,
contribution_path=path,
layer_id=layer_id,
order=order,
# Add the sdf argument metadata which allows
# us to later detect whether another path
# has the same layer id, so we can replace it
# it.
# has the same layer id, so we can replace it.
add_sdf_arguments_metadata=True)
# Save the file

View file

@ -11,7 +11,11 @@ Multiples instances from your scene are set to publish into the same folder > pr
### How to repair?
Remove the offending instances or rename to have a unique name.
Remove the offending instances or rename to have a unique name. Also, please
check your product name templates to ensure that resolved names are
sufficiently unique. You can find that settings:
ayon+settings://core/tools/creator/product_name_profiles
</description>
</error>
</root>
</root>

View file

@ -509,8 +509,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
if not is_sequence_representation:
files = [files]
if any(os.path.isabs(fname) for fname in files):
raise KnownPublishError("Given file names contain full paths")
for fname in files:
if os.path.isabs(fname):
raise KnownPublishError(
f"Representation file names contains full paths: {fname}"
)
if not is_sequence_representation:
return
@ -744,6 +747,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
if not is_udim:
repre_context["frame"] = first_index_padded
# store renderlayer in context if it exists
# to be later used for example by delivery templates
if instance.data.get("renderlayer"):
repre_context["renderlayer"] = instance.data["renderlayer"]
# Update the destination indexes and padding
dst_collection = clique.assemble(dst_filepaths)[0][0]
dst_collection.padding = destination_padding

View file

@ -9,7 +9,14 @@ from ayon_api import (
class IntegrateInputLinksAYON(pyblish.api.ContextPlugin):
"""Connecting version level dependency links"""
"""Connecting version level dependency links
Handles links:
- generative - what gets produced from workfile
- reference - what was loaded into workfile
It expects workfile instance is being published.
"""
order = pyblish.api.IntegratorOrder + 0.2
label = "Connect Dependency InputLinks AYON"
@ -47,6 +54,11 @@ class IntegrateInputLinksAYON(pyblish.api.ContextPlugin):
self.create_links_on_server(context, new_links_by_type)
def split_instances(self, context):
"""Separates published instances into workfile and other
Returns:
(tuple(pyblish.plugin.Instance), list(pyblish.plugin.Instance))
"""
workfile_instance = None
other_instances = []
@ -83,6 +95,15 @@ class IntegrateInputLinksAYON(pyblish.api.ContextPlugin):
def create_workfile_links(
self, workfile_instance, other_instances, new_links_by_type
):
"""Adds links (generative and reference) for workfile.
Args:
workfile_instance (pyblish.plugin.Instance): published workfile
other_instances (list[pyblish.plugin.Instance]): other published
instances
new_links_by_type (dict[str, list[str]]): dictionary collecting new
created links by its type
"""
if workfile_instance is None:
self.log.warn("No workfile in this publish session.")
return
@ -97,7 +118,7 @@ class IntegrateInputLinksAYON(pyblish.api.ContextPlugin):
instance.data["versionEntity"]["id"],
)
loaded_versions = workfile_instance.context.get("loadedVersions")
loaded_versions = workfile_instance.context.data.get("loadedVersions")
if not loaded_versions:
return

View file

@ -1,17 +1,60 @@
import inspect
import pyblish.api
from ayon_core.pipeline.publish import PublishValidationError
from ayon_core.tools.utils.host_tools import show_workfiles
from ayon_core.pipeline.context_tools import version_up_current_workfile
class SaveByVersionUpAction(pyblish.api.Action):
"""Save Workfile."""
label = "Save Workfile"
on = "failed"
icon = "save"
def process(self, context, plugin):
version_up_current_workfile()
class ShowWorkfilesAction(pyblish.api.Action):
"""Save Workfile."""
label = "Show Workfiles Tool..."
on = "failed"
icon = "files-o"
def process(self, context, plugin):
show_workfiles()
class ValidateCurrentSaveFile(pyblish.api.ContextPlugin):
"""File must be saved before publishing"""
"""File must be saved before publishing
This does not validate for unsaved changes. It only validates whether
the current context was able to identify any 'currentFile'.
"""
label = "Validate File Saved"
order = pyblish.api.ValidatorOrder - 0.1
hosts = ["maya", "houdini", "nuke"]
hosts = ["fusion", "houdini", "max", "maya", "nuke", "substancepainter",
"cinema4d"]
actions = [SaveByVersionUpAction, ShowWorkfilesAction]
def process(self, context):
current_file = context.data["currentFile"]
if not current_file:
raise PublishValidationError("File not saved")
raise PublishValidationError(
"Workfile is not saved. Please save your scene to continue.",
title="File not saved",
description=self.get_description())
def get_description(self):
return inspect.cleandoc("""
### File not saved
Your workfile must be saved to continue publishing.
The **Save Workfile** action will save it for you with the first
available workfile version number in your current context.
""")

View file

@ -70,19 +70,3 @@ def get_ayon_splash_filepath(staging=None):
else:
splash_file_name = "AYON_splash.png"
return get_resource("icons", splash_file_name)
def get_openpype_production_icon_filepath():
return get_ayon_production_icon_filepath()
def get_openpype_staging_icon_filepath():
return get_ayon_staging_icon_filepath()
def get_openpype_icon_filepath(staging=None):
return get_ayon_icon_filepath(staging)
def get_openpype_splash_filepath(staging=None):
return get_ayon_splash_filepath(staging)

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.1 KiB

View file

@ -486,11 +486,11 @@ class TableField(BaseItem):
line = self.ellide_text
break
for idx, char in enumerate(_word):
for char_index, char in enumerate(_word):
_line = line + char + self.ellide_text
_line_width = font.getsize(_line)[0]
if _line_width > max_width:
if idx == 0:
if char_index == 0:
line = _line
break
line = line + char

View file

@ -1,79 +0,0 @@
# Structure of local settings
- local settings do not have any validation schemas right now this should help to see what is stored to local settings and how it works
- they are stored by identifier site_id which should be unified identifier of workstation
- all keys may and may not available on load
- contain main categories: `general`, `applications`, `projects`
## Categories
### General
- ATM contain only label of site
```json
{
"general": {
"site_label": "MySite"
}
}
```
### Applications
- modifications of application executables
- output should match application groups and variants
```json
{
"applications": {
"<app group>": {
"<app name>": {
"executable": "/my/path/to/nuke_12_2"
}
}
}
}
```
### Projects
- project specific modifications
- default project is stored under constant key defined in `pype.settings.contants`
```json
{
"projects": {
"<project name>": {
"active_site": "<name of active site>",
"remote_site": "<name of remote site>",
"roots": {
"<site name>": {
"<root name>": "<root dir path>"
}
}
}
}
}
```
## Final document
```json
{
"_id": "<ObjectId(...)>",
"site_id": "<site id>",
"general": {
"site_label": "MySite"
},
"applications": {
"<app group>": {
"<app name>": {
"executable": "<path to app executable>"
}
}
},
"projects": {
"<project name>": {
"active_site": "<name of active site>",
"remote_site": "<name of remote site>",
"roots": {
"<site name>": {
"<root name>": "<root dir path>"
}
}
}
}
}
```

View file

@ -60,7 +60,11 @@
"icon-alert-tools": "#AA5050",
"icon-entity-default": "#bfccd6",
"icon-entity-disabled": "#808080",
"font-entity-deprecated": "#666666",
"font-overridden": "#91CDFC",
"overlay-messages": {
"close-btn": "#D3D8DE",
"bg-success": "#458056",

View file

@ -739,6 +739,31 @@ OverlayMessageWidget QWidget {
background: transparent;
}
/* Hinted Line Edit */
HintedLineEditInput {
border-radius: 0.2em;
border-top-right-radius: 0px;
border-bottom-right-radius: 0px;
border: 1px solid {color:border};
}
HintedLineEditInput:hover {
border-color: {color:border-hover};
}
HintedLineEditInput:focus{
border-color: {color:border-focus};
}
HintedLineEditInput:disabled {
background: {color:bg-inputs-disabled};
}
HintedLineEditButton {
border: none;
border-radius: 0.2em;
border-bottom-left-radius: 0px;
border-top-left-radius: 0px;
padding: 0px;
qproperty-iconSize: 11px 11px;
}
/* Password dialog*/
#PasswordBtn {
border: none;
@ -969,17 +994,6 @@ PixmapButton:disabled {
#PublishLogConsole {
font-family: "Noto Sans Mono";
}
#VariantInputsWidget QLineEdit {
border-bottom-right-radius: 0px;
border-top-right-radius: 0px;
}
#VariantInputsWidget QToolButton {
border-bottom-left-radius: 0px;
border-top-left-radius: 0px;
padding-top: 0.5em;
padding-bottom: 0.5em;
width: 0.5em;
}
#VariantInput[state="new"], #VariantInput[state="new"]:focus, #VariantInput[state="new"]:hover {
border-color: {color:publisher:success};
}
@ -1104,39 +1118,39 @@ ValidationArtistMessage QLabel {
font-weight: bold;
}
#ValidationActionButton {
#PublishActionButton {
border-radius: 0.2em;
padding: 4px 6px 4px 6px;
background: {color:bg-buttons};
}
#ValidationActionButton:hover {
#PublishActionButton:hover {
background: {color:bg-buttons-hover};
color: {color:font-hover};
}
#ValidationActionButton:disabled {
#PublishActionButton:disabled {
background: {color:bg-buttons-disabled};
}
#ValidationErrorTitleFrame {
#PublishErrorTitleFrame {
border-radius: 0.2em;
background: {color:bg-buttons};
}
#ValidationErrorTitleFrame:hover {
#PublishErrorTitleFrame:hover {
background: {color:bg-buttons-hover};
}
#ValidationErrorTitleFrame[selected="1"] {
#PublishErrorTitleFrame[selected="1"] {
background: {color:bg-view-selection};
}
#ValidationErrorInstanceList {
#PublishErrorInstanceList {
border-radius: 0;
}
#ValidationErrorInstanceList::item {
#PublishErrorInstanceList::item {
border-bottom: 1px solid {color:border};
border-left: 1px solid {color:border};
}
@ -1231,6 +1245,15 @@ ValidationArtistMessage QLabel {
background: transparent;
}
#PluginDetailsContent {
background: {color:bg-inputs};
border-radius: 0.2em;
}
#PluginDetailsContent #PluginLabel {
font-size: 14pt;
font-weight: bold;
}
CreateNextPageOverlay {
font-size: 32pt;
}
@ -1449,14 +1472,6 @@ CreateNextPageOverlay {
border-radius: 5px;
}
#OpenPypeVersionLabel[state="success"] {
color: {color:settings:version-exists};
}
#OpenPypeVersionLabel[state="warning"] {
color: {color:settings:version-not-found};
}
#ShadowWidget {
font-size: 36pt;
}
@ -1570,6 +1585,10 @@ CreateNextPageOverlay {
}
/* Attribute Definition widgets */
AttributeDefinitionsLabel[overridden="1"] {
color: {color:font-overridden};
}
AttributeDefinitionsWidget QAbstractSpinBox, QLineEdit, QPlainTextEdit, QTextEdit {
padding: 1px;
}

View file

@ -0,0 +1,16 @@
import pytest
from pathlib import Path
collect_ignore = ["vendor", "resources"]
RESOURCES_PATH = 'resources'
@pytest.fixture
def resources_path_factory():
def factory(*args):
dirpath = Path(__file__).parent / RESOURCES_PATH
for arg in args:
dirpath = dirpath / arg
return dirpath
return factory

View file

@ -0,0 +1,52 @@
import pytest
import logging
from pathlib import Path
from ayon_core.plugins.load.export_otio import get_image_info_metadata
logger = logging.getLogger('test_transcoding')
@pytest.mark.parametrize(
"resources_path_factory, metadata, expected, test_id",
[
(
Path(__file__).parent.parent
/ "resources"
/ "lib"
/ "transcoding"
/ "a01vfxd_sh010_plateP01_v002.1013.exr",
["timecode", "framerate"],
{"timecode": "01:00:06:03", "framerate": 23.976023976023978},
"test_01",
),
(
Path(__file__).parent.parent
/ "resources"
/ "lib"
/ "transcoding"
/ "a01vfxd_sh010_plateP01_v002.1013.exr",
["timecode", "width", "height", "duration"],
{"timecode": "01:00:06:03", "width": 1920, "height": 1080},
"test_02",
),
(
Path(__file__).parent.parent
/ "resources"
/ "lib"
/ "transcoding"
/ "a01vfxd_sh010_plateP01_v002.mov",
["width", "height", "duration"],
{"width": 1920, "height": 1080, "duration": "0.041708"},
"test_03",
),
],
)
def test_get_image_info_metadata_happy_path(
resources_path_factory, metadata, expected, test_id
):
path_to_file = resources_path_factory.as_posix()
returned_data = get_image_info_metadata(path_to_file, metadata, logger)
logger.info(f"Returned data: {returned_data}")
assert returned_data == expected

View file

@ -1,6 +1,7 @@
from .widgets import (
create_widget_for_attr_def,
AttributeDefinitionsWidget,
AttributeDefinitionsLabel,
)
from .dialog import (
@ -11,6 +12,7 @@ from .dialog import (
__all__ = (
"create_widget_for_attr_def",
"AttributeDefinitionsWidget",
"AttributeDefinitionsLabel",
"AttributeDefinitionsDialog",
)

View file

@ -0,0 +1 @@
REVERT_TO_DEFAULT_LABEL = "Revert to default"

View file

@ -17,6 +17,8 @@ from ayon_core.tools.utils import (
PixmapLabel
)
from ._constants import REVERT_TO_DEFAULT_LABEL
ITEM_ID_ROLE = QtCore.Qt.UserRole + 1
ITEM_LABEL_ROLE = QtCore.Qt.UserRole + 2
ITEM_ICON_ROLE = QtCore.Qt.UserRole + 3
@ -598,7 +600,7 @@ class FilesView(QtWidgets.QListView):
"""View showing instances and their groups."""
remove_requested = QtCore.Signal()
context_menu_requested = QtCore.Signal(QtCore.QPoint)
context_menu_requested = QtCore.Signal(QtCore.QPoint, bool)
def __init__(self, *args, **kwargs):
super(FilesView, self).__init__(*args, **kwargs)
@ -690,9 +692,8 @@ class FilesView(QtWidgets.QListView):
def _on_context_menu_request(self, pos):
index = self.indexAt(pos)
if index.isValid():
point = self.viewport().mapToGlobal(pos)
self.context_menu_requested.emit(point)
point = self.viewport().mapToGlobal(pos)
self.context_menu_requested.emit(point, index.isValid())
def _on_selection_change(self):
self._remove_btn.setEnabled(self.has_selected_item_ids())
@ -721,27 +722,34 @@ class FilesView(QtWidgets.QListView):
class FilesWidget(QtWidgets.QFrame):
value_changed = QtCore.Signal()
revert_requested = QtCore.Signal()
def __init__(self, single_item, allow_sequences, extensions_label, parent):
super(FilesWidget, self).__init__(parent)
super().__init__(parent)
self.setAcceptDrops(True)
wrapper_widget = QtWidgets.QWidget(self)
empty_widget = DropEmpty(
single_item, allow_sequences, extensions_label, self
single_item, allow_sequences, extensions_label, wrapper_widget
)
files_model = FilesModel(single_item, allow_sequences)
files_proxy_model = FilesProxyModel()
files_proxy_model.setSourceModel(files_model)
files_view = FilesView(self)
files_view = FilesView(wrapper_widget)
files_view.setModel(files_proxy_model)
layout = QtWidgets.QStackedLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setStackingMode(QtWidgets.QStackedLayout.StackAll)
layout.addWidget(empty_widget)
layout.addWidget(files_view)
layout.setCurrentWidget(empty_widget)
wrapper_layout = QtWidgets.QStackedLayout(wrapper_widget)
wrapper_layout.setContentsMargins(0, 0, 0, 0)
wrapper_layout.setStackingMode(QtWidgets.QStackedLayout.StackAll)
wrapper_layout.addWidget(empty_widget)
wrapper_layout.addWidget(files_view)
wrapper_layout.setCurrentWidget(empty_widget)
main_layout = QtWidgets.QHBoxLayout(self)
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.addWidget(wrapper_widget, 1)
files_proxy_model.rowsInserted.connect(self._on_rows_inserted)
files_proxy_model.rowsRemoved.connect(self._on_rows_removed)
@ -761,7 +769,11 @@ class FilesWidget(QtWidgets.QFrame):
self._widgets_by_id = {}
self._layout = layout
self._wrapper_widget = wrapper_widget
self._wrapper_layout = wrapper_layout
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self._on_context_menu)
def _set_multivalue(self, multivalue):
if self._multivalue is multivalue:
@ -770,7 +782,7 @@ class FilesWidget(QtWidgets.QFrame):
self._files_view.set_multivalue(multivalue)
self._files_model.set_multivalue(multivalue)
self._files_proxy_model.set_multivalue(multivalue)
self.setEnabled(not multivalue)
self._wrapper_widget.setEnabled(not multivalue)
def set_value(self, value, multivalue):
self._in_set_value = True
@ -888,22 +900,28 @@ class FilesWidget(QtWidgets.QFrame):
if items_to_delete:
self._remove_item_by_ids(items_to_delete)
def _on_context_menu_requested(self, pos):
if self._multivalue:
return
def _on_context_menu(self, pos):
self._on_context_menu_requested(pos, False)
def _on_context_menu_requested(self, pos, valid_index):
menu = QtWidgets.QMenu(self._files_view)
if valid_index and not self._multivalue:
if self._files_view.has_selected_sequence():
split_action = QtWidgets.QAction("Split sequence", menu)
split_action.triggered.connect(self._on_split_request)
menu.addAction(split_action)
if self._files_view.has_selected_sequence():
split_action = QtWidgets.QAction("Split sequence", menu)
split_action.triggered.connect(self._on_split_request)
menu.addAction(split_action)
remove_action = QtWidgets.QAction("Remove", menu)
remove_action.triggered.connect(self._on_remove_requested)
menu.addAction(remove_action)
remove_action = QtWidgets.QAction("Remove", menu)
remove_action.triggered.connect(self._on_remove_requested)
menu.addAction(remove_action)
if not valid_index:
revert_action = QtWidgets.QAction(REVERT_TO_DEFAULT_LABEL, menu)
revert_action.triggered.connect(self.revert_requested)
menu.addAction(revert_action)
menu.popup(pos)
if menu.actions():
menu.popup(pos)
def dragEnterEvent(self, event):
if self._multivalue:
@ -1011,5 +1029,5 @@ class FilesWidget(QtWidgets.QFrame):
current_widget = self._files_view
else:
current_widget = self._empty_widget
self._layout.setCurrentWidget(current_widget)
self._wrapper_layout.setCurrentWidget(current_widget)
self._files_view.update_remove_btn_visibility()

View file

@ -1,4 +1,6 @@
import copy
import typing
from typing import Optional
from qtpy import QtWidgets, QtCore
@ -20,58 +22,123 @@ from ayon_core.tools.utils import (
FocusSpinBox,
FocusDoubleSpinBox,
MultiSelectionComboBox,
set_style_property,
)
from ayon_core.tools.utils import NiceCheckbox
from ._constants import REVERT_TO_DEFAULT_LABEL
from .files_widget import FilesWidget
if typing.TYPE_CHECKING:
from typing import Union
def create_widget_for_attr_def(attr_def, parent=None):
widget = _create_widget_for_attr_def(attr_def, parent)
if attr_def.hidden:
def create_widget_for_attr_def(
attr_def: AbstractAttrDef,
parent: Optional[QtWidgets.QWidget] = None,
handle_revert_to_default: Optional[bool] = True,
):
widget = _create_widget_for_attr_def(
attr_def, parent, handle_revert_to_default
)
if not attr_def.visible:
widget.setVisible(False)
if attr_def.disabled:
if not attr_def.enabled:
widget.setEnabled(False)
return widget
def _create_widget_for_attr_def(attr_def, parent=None):
def _create_widget_for_attr_def(
attr_def: AbstractAttrDef,
parent: "Union[QtWidgets.QWidget, None]",
handle_revert_to_default: bool,
):
if not isinstance(attr_def, AbstractAttrDef):
raise TypeError("Unexpected type \"{}\" expected \"{}\"".format(
str(type(attr_def)), AbstractAttrDef
))
cls = None
if isinstance(attr_def, NumberDef):
return NumberAttrWidget(attr_def, parent)
cls = NumberAttrWidget
if isinstance(attr_def, TextDef):
return TextAttrWidget(attr_def, parent)
elif isinstance(attr_def, TextDef):
cls = TextAttrWidget
if isinstance(attr_def, EnumDef):
return EnumAttrWidget(attr_def, parent)
elif isinstance(attr_def, EnumDef):
cls = EnumAttrWidget
if isinstance(attr_def, BoolDef):
return BoolAttrWidget(attr_def, parent)
elif isinstance(attr_def, BoolDef):
cls = BoolAttrWidget
if isinstance(attr_def, UnknownDef):
return UnknownAttrWidget(attr_def, parent)
elif isinstance(attr_def, UnknownDef):
cls = UnknownAttrWidget
if isinstance(attr_def, HiddenDef):
return HiddenAttrWidget(attr_def, parent)
elif isinstance(attr_def, HiddenDef):
cls = HiddenAttrWidget
if isinstance(attr_def, FileDef):
return FileAttrWidget(attr_def, parent)
elif isinstance(attr_def, FileDef):
cls = FileAttrWidget
if isinstance(attr_def, UISeparatorDef):
return SeparatorAttrWidget(attr_def, parent)
elif isinstance(attr_def, UISeparatorDef):
cls = SeparatorAttrWidget
if isinstance(attr_def, UILabelDef):
return LabelAttrWidget(attr_def, parent)
elif isinstance(attr_def, UILabelDef):
cls = LabelAttrWidget
raise ValueError("Unknown attribute definition \"{}\"".format(
str(type(attr_def))
))
if cls is None:
raise ValueError("Unknown attribute definition \"{}\"".format(
str(type(attr_def))
))
return cls(attr_def, parent, handle_revert_to_default)
class AttributeDefinitionsLabel(QtWidgets.QLabel):
"""Label related to value attribute definition.
Label is used to show attribute definition label and to show if value
is overridden.
Label can be right-clicked to revert value to default.
"""
revert_to_default_requested = QtCore.Signal(str)
def __init__(
self,
attr_id: str,
label: str,
parent: QtWidgets.QWidget,
):
super().__init__(label, parent)
self._attr_id = attr_id
self._overridden = False
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self._on_context_menu)
def set_overridden(self, overridden: bool):
if self._overridden == overridden:
return
self._overridden = overridden
set_style_property(
self,
"overridden",
"1" if overridden else ""
)
def _on_context_menu(self, point: QtCore.QPoint):
menu = QtWidgets.QMenu(self)
action = QtWidgets.QAction(menu)
action.setText(REVERT_TO_DEFAULT_LABEL)
action.triggered.connect(self._request_revert_to_default)
menu.addAction(action)
menu.exec_(self.mapToGlobal(point))
def _request_revert_to_default(self):
self.revert_to_default_requested.emit(self._attr_id)
class AttributeDefinitionsWidget(QtWidgets.QWidget):
@ -83,16 +150,18 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
"""
def __init__(self, attr_defs=None, parent=None):
super(AttributeDefinitionsWidget, self).__init__(parent)
super().__init__(parent)
self._widgets = []
self._widgets_by_id = {}
self._labels_by_id = {}
self._current_keys = set()
self.set_attr_defs(attr_defs)
def clear_attr_defs(self):
"""Remove all existing widgets and reset layout if needed."""
self._widgets = []
self._widgets_by_id = {}
self._labels_by_id = {}
self._current_keys = set()
layout = self.layout()
@ -133,9 +202,9 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
self._current_keys.add(attr_def.key)
widget = create_widget_for_attr_def(attr_def, self)
self._widgets.append(widget)
self._widgets_by_id[attr_def.id] = widget
if attr_def.hidden:
if not attr_def.visible:
continue
expand_cols = 2
@ -145,7 +214,13 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
col_num = 2 - expand_cols
if attr_def.is_value_def and attr_def.label:
label_widget = QtWidgets.QLabel(attr_def.label, self)
label_widget = AttributeDefinitionsLabel(
attr_def.id, attr_def.label, self
)
label_widget.revert_to_default_requested.connect(
self._on_revert_request
)
self._labels_by_id[attr_def.id] = label_widget
tooltip = attr_def.tooltip
if tooltip:
label_widget.setToolTip(tooltip)
@ -160,6 +235,9 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
if not attr_def.is_label_horizontal:
row += 1
if attr_def.is_value_def:
widget.value_changed.connect(self._on_value_change)
layout.addWidget(
widget, row, col_num, 1, expand_cols
)
@ -168,7 +246,7 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
def set_value(self, value):
new_value = copy.deepcopy(value)
unused_keys = set(new_value.keys())
for widget in self._widgets:
for widget in self._widgets_by_id.values():
attr_def = widget.attr_def
if attr_def.key not in new_value:
continue
@ -181,22 +259,42 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
def current_value(self):
output = {}
for widget in self._widgets:
for widget in self._widgets_by_id.values():
attr_def = widget.attr_def
if not isinstance(attr_def, UIDef):
output[attr_def.key] = widget.current_value()
return output
def _on_revert_request(self, attr_id):
widget = self._widgets_by_id.get(attr_id)
if widget is not None:
widget.set_value(widget.attr_def.default)
def _on_value_change(self, value, attr_id):
widget = self._widgets_by_id.get(attr_id)
if widget is None:
return
label = self._labels_by_id.get(attr_id)
if label is not None:
label.set_overridden(value != widget.attr_def.default)
class _BaseAttrDefWidget(QtWidgets.QWidget):
# Type 'object' may not work with older PySide versions
value_changed = QtCore.Signal(object, str)
revert_to_default_requested = QtCore.Signal(str)
def __init__(self, attr_def, parent):
super(_BaseAttrDefWidget, self).__init__(parent)
def __init__(
self,
attr_def: AbstractAttrDef,
parent: "Union[QtWidgets.QWidget, None]",
handle_revert_to_default: Optional[bool] = True,
):
super().__init__(parent)
self.attr_def = attr_def
self.attr_def: AbstractAttrDef = attr_def
self._handle_revert_to_default: bool = handle_revert_to_default
main_layout = QtWidgets.QHBoxLayout(self)
main_layout.setContentsMargins(0, 0, 0, 0)
@ -205,6 +303,15 @@ class _BaseAttrDefWidget(QtWidgets.QWidget):
self._ui_init()
def revert_to_default_value(self):
if not self.attr_def.is_value_def:
return
if self._handle_revert_to_default:
self.set_value(self.attr_def.default)
else:
self.revert_to_default_requested.emit(self.attr_def.id)
def _ui_init(self):
raise NotImplementedError(
"Method '_ui_init' is not implemented. {}".format(
@ -255,7 +362,7 @@ class ClickableLineEdit(QtWidgets.QLineEdit):
clicked = QtCore.Signal()
def __init__(self, text, parent):
super(ClickableLineEdit, self).__init__(parent)
super().__init__(parent)
self.setText(text)
self.setReadOnly(True)
@ -264,7 +371,7 @@ class ClickableLineEdit(QtWidgets.QLineEdit):
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self._mouse_pressed = True
super(ClickableLineEdit, self).mousePressEvent(event)
super().mousePressEvent(event)
def mouseReleaseEvent(self, event):
if self._mouse_pressed:
@ -272,7 +379,7 @@ class ClickableLineEdit(QtWidgets.QLineEdit):
if self.rect().contains(event.pos()):
self.clicked.emit()
super(ClickableLineEdit, self).mouseReleaseEvent(event)
super().mouseReleaseEvent(event)
class NumberAttrWidget(_BaseAttrDefWidget):
@ -284,6 +391,9 @@ class NumberAttrWidget(_BaseAttrDefWidget):
else:
input_widget = FocusSpinBox(self)
# Override context menu event to add revert to default action
input_widget.contextMenuEvent = self._input_widget_context_event
if self.attr_def.tooltip:
input_widget.setToolTip(self.attr_def.tooltip)
@ -321,6 +431,16 @@ class NumberAttrWidget(_BaseAttrDefWidget):
self._set_multiselection_visible(True)
return False
def _input_widget_context_event(self, event):
line_edit = self._input_widget.lineEdit()
menu = line_edit.createStandardContextMenu()
menu.setAttribute(QtCore.Qt.WA_DeleteOnClose)
action = QtWidgets.QAction(menu)
action.setText(REVERT_TO_DEFAULT_LABEL)
action.triggered.connect(self.revert_to_default_value)
menu.addAction(action)
menu.popup(event.globalPos())
def current_value(self):
return self._input_widget.value()
@ -386,6 +506,9 @@ class TextAttrWidget(_BaseAttrDefWidget):
else:
input_widget = QtWidgets.QLineEdit(self)
# Override context menu event to add revert to default action
input_widget.contextMenuEvent = self._input_widget_context_event
if (
self.attr_def.placeholder
and hasattr(input_widget, "setPlaceholderText")
@ -407,6 +530,15 @@ class TextAttrWidget(_BaseAttrDefWidget):
self.main_layout.addWidget(input_widget, 0)
def _input_widget_context_event(self, event):
menu = self._input_widget.createStandardContextMenu()
menu.setAttribute(QtCore.Qt.WA_DeleteOnClose)
action = QtWidgets.QAction(menu)
action.setText(REVERT_TO_DEFAULT_LABEL)
action.triggered.connect(self.revert_to_default_value)
menu.addAction(action)
menu.popup(event.globalPos())
def _on_value_change(self):
if self.multiline:
new_value = self._input_widget.toPlainText()
@ -459,6 +591,20 @@ class BoolAttrWidget(_BaseAttrDefWidget):
self.main_layout.addWidget(input_widget, 0)
self.main_layout.addStretch(1)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self._on_context_menu)
def _on_context_menu(self, pos):
self._menu = QtWidgets.QMenu(self)
action = QtWidgets.QAction(self._menu)
action.setText(REVERT_TO_DEFAULT_LABEL)
action.triggered.connect(self.revert_to_default_value)
self._menu.addAction(action)
global_pos = self.mapToGlobal(pos)
self._menu.exec_(global_pos)
def _on_value_change(self):
new_value = self._input_widget.isChecked()
self.value_changed.emit(new_value, self.attr_def.id)
@ -487,7 +633,7 @@ class BoolAttrWidget(_BaseAttrDefWidget):
class EnumAttrWidget(_BaseAttrDefWidget):
def __init__(self, *args, **kwargs):
self._multivalue = False
super(EnumAttrWidget, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
@property
def multiselection(self):
@ -522,6 +668,20 @@ class EnumAttrWidget(_BaseAttrDefWidget):
self.main_layout.addWidget(input_widget, 0)
input_widget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
input_widget.customContextMenuRequested.connect(self._on_context_menu)
def _on_context_menu(self, pos):
menu = QtWidgets.QMenu(self)
action = QtWidgets.QAction(menu)
action.setText(REVERT_TO_DEFAULT_LABEL)
action.triggered.connect(self.revert_to_default_value)
menu.addAction(action)
global_pos = self.mapToGlobal(pos)
menu.exec_(global_pos)
def _on_value_change(self):
new_value = self.current_value()
if self._multivalue:
@ -614,7 +774,7 @@ class HiddenAttrWidget(_BaseAttrDefWidget):
def setVisible(self, visible):
if visible:
visible = False
super(HiddenAttrWidget, self).setVisible(visible)
super().setVisible(visible)
def current_value(self):
if self._multivalue:
@ -650,10 +810,25 @@ class FileAttrWidget(_BaseAttrDefWidget):
self.main_layout.addWidget(input_widget, 0)
input_widget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
input_widget.customContextMenuRequested.connect(self._on_context_menu)
input_widget.revert_requested.connect(self.revert_to_default_value)
def _on_value_change(self):
new_value = self.current_value()
self.value_changed.emit(new_value, self.attr_def.id)
def _on_context_menu(self, pos):
menu = QtWidgets.QMenu(self)
action = QtWidgets.QAction(menu)
action.setText(REVERT_TO_DEFAULT_LABEL)
action.triggered.connect(self.revert_to_default_value)
menu.addAction(action)
global_pos = self.mapToGlobal(pos)
menu.exec_(global_pos)
def current_value(self):
return self._input_widget.current_value()

View file

@ -104,7 +104,7 @@ class ProductNameValidator(RegularExpressionValidatorClass):
def validate(self, text, pos):
results = super(ProductNameValidator, self).validate(text, pos)
if results[0] == self.Invalid:
if results[0] == RegularExpressionValidatorClass.Invalid:
self.invalid.emit(self.invalid_chars(text))
return results

View file

@ -0,0 +1,273 @@
"""
Brought from https://gist.github.com/BigRoy/1972822065e38f8fae7521078e44eca2
Code Credits: [BigRoy](https://github.com/BigRoy)
Requirement:
It requires pyblish version >= 1.8.12
How it works:
This tool makes use of pyblish event `pluginProcessed` to:
1. Pause the publishing.
2. Collect some info about the plugin.
3. Show that info to the tool's window.
4. Continue publishing on clicking `step` button.
How to use it:
1. Launch the tool from AYON experimental tools window.
2. Launch the publisher tool and click validate.
3. Click Step to run plugins one by one.
Note :
Pyblish debugger also works when triggering the validation or
publishing from code.
Here's an example about validating from code:
https://github.com/MustafaJafar/ayon-recipes/blob/main/validate_from_code.py
"""
import copy
import json
from qtpy import QtWidgets, QtCore, QtGui
import pyblish.api
from ayon_core import style
TAB = 4* "&nbsp;"
HEADER_SIZE = "15px"
KEY_COLOR = QtGui.QColor("#ffffff")
NEW_KEY_COLOR = QtGui.QColor("#00ff00")
VALUE_TYPE_COLOR = QtGui.QColor("#ffbbbb")
NEW_VALUE_TYPE_COLOR = QtGui.QColor("#ff4444")
VALUE_COLOR = QtGui.QColor("#777799")
NEW_VALUE_COLOR = QtGui.QColor("#DDDDCC")
CHANGED_VALUE_COLOR = QtGui.QColor("#CCFFCC")
MAX_VALUE_STR_LEN = 100
def failsafe_deepcopy(data):
"""Allow skipping the deepcopy for unsupported types"""
try:
return copy.deepcopy(data)
except TypeError:
if isinstance(data, dict):
return {
key: failsafe_deepcopy(value)
for key, value in data.items()
}
elif isinstance(data, list):
return data.copy()
return data
class DictChangesModel(QtGui.QStandardItemModel):
# TODO: Replace this with a QAbstractItemModel
def __init__(self, *args, **kwargs):
super(DictChangesModel, self).__init__(*args, **kwargs)
self._data = {}
columns = ["Key", "Type", "Value"]
self.setColumnCount(len(columns))
for i, label in enumerate(columns):
self.setHeaderData(i, QtCore.Qt.Horizontal, label)
def _update_recursive(self, data, parent, previous_data):
for key, value in data.items():
# Find existing item or add new row
parent_index = parent.index()
for row in range(self.rowCount(parent_index)):
# Update existing item if it exists
index = self.index(row, 0, parent_index)
if index.data() == key:
item = self.itemFromIndex(index)
type_item = self.itemFromIndex(self.index(row, 1, parent_index)) # noqa
value_item = self.itemFromIndex(self.index(row, 2, parent_index)) # noqa
break
else:
item = QtGui.QStandardItem(key)
type_item = QtGui.QStandardItem()
value_item = QtGui.QStandardItem()
parent.appendRow([item, type_item, value_item])
# Key
key_color = NEW_KEY_COLOR if key not in previous_data else KEY_COLOR # noqa
item.setData(key_color, QtCore.Qt.ForegroundRole)
# Type
type_str = type(value).__name__
type_color = VALUE_TYPE_COLOR
if (
key in previous_data
and type(previous_data[key]).__name__ != type_str
):
type_color = NEW_VALUE_TYPE_COLOR
type_item.setText(type_str)
type_item.setData(type_color, QtCore.Qt.ForegroundRole)
# Value
value_changed = False
if key not in previous_data or previous_data[key] != value:
value_changed = True
value_color = NEW_VALUE_COLOR if value_changed else VALUE_COLOR
value_item.setData(value_color, QtCore.Qt.ForegroundRole)
if value_changed:
value_str = str(value)
if len(value_str) > MAX_VALUE_STR_LEN:
value_str = value_str[:MAX_VALUE_STR_LEN] + "..."
value_item.setText(value_str)
# Preferably this is deferred to only when the data gets
# requested since this formatting can be slow for very large
# data sets like project settings and system settings
# This will also be MUCH faster if we don't clear the
# items on each update but only updated/add/remove changed
# items so that this also runs much less often
value_item.setData(
json.dumps(value, default=str, indent=4),
QtCore.Qt.ToolTipRole
)
if isinstance(value, dict):
previous_value = previous_data.get(key, {})
if previous_data.get(key) != value:
# Update children if the value is not the same as before
self._update_recursive(value,
parent=item,
previous_data=previous_value)
else:
# TODO: Ensure all children are updated to be not marked
# as 'changed' in the most optimal way possible
self._update_recursive(value,
parent=item,
previous_data=previous_value)
self._data = data
def update(self, data):
parent = self.invisibleRootItem()
data = failsafe_deepcopy(data)
previous_data = self._data
self._update_recursive(data, parent, previous_data)
self._data = data # store previous data for next update
class DebugUI(QtWidgets.QDialog):
def __init__(self, parent=None):
super(DebugUI, self).__init__(parent=parent)
self.setStyleSheet(style.load_stylesheet())
self._set_window_title()
self.setWindowFlags(
QtCore.Qt.Window
| QtCore.Qt.CustomizeWindowHint
| QtCore.Qt.WindowTitleHint
| QtCore.Qt.WindowMinimizeButtonHint
| QtCore.Qt.WindowCloseButtonHint
| QtCore.Qt.WindowStaysOnTopHint
)
layout = QtWidgets.QVBoxLayout(self)
text_edit = QtWidgets.QTextEdit()
text_edit.setFixedHeight(65)
font = QtGui.QFont("NONEXISTENTFONT")
font.setStyleHint(QtGui.QFont.TypeWriter)
text_edit.setFont(font)
text_edit.setLineWrapMode(QtWidgets.QTextEdit.NoWrap)
step = QtWidgets.QPushButton("Step")
step.setEnabled(False)
model = DictChangesModel()
proxy = QtCore.QSortFilterProxyModel()
proxy.setRecursiveFilteringEnabled(True)
proxy.setSourceModel(model)
view = QtWidgets.QTreeView()
view.setModel(proxy)
view.setSortingEnabled(True)
filter_field = QtWidgets.QLineEdit()
filter_field.setPlaceholderText("Filter keys...")
filter_field.textChanged.connect(proxy.setFilterFixedString)
layout.addWidget(text_edit)
layout.addWidget(filter_field)
layout.addWidget(view)
layout.addWidget(step)
step.clicked.connect(self.on_step)
self._pause = False
self.model = model
self.filter = filter_field
self.proxy = proxy
self.view = view
self.text = text_edit
self.step = step
self.resize(700, 500)
self._previous_data = {}
def _set_window_title(self, plugin=None):
title = "Pyblish Debug Stepper"
if plugin is not None:
plugin_label = plugin.label or plugin.__name__
title += f" | {plugin_label}"
self.setWindowTitle(title)
def pause(self, state):
self._pause = state
self.step.setEnabled(state)
def on_step(self):
self.pause(False)
def showEvent(self, event):
print("Registering callback..")
pyblish.api.register_callback("pluginProcessed",
self.on_plugin_processed)
def hideEvent(self, event):
self.pause(False)
print("Deregistering callback..")
pyblish.api.deregister_callback("pluginProcessed",
self.on_plugin_processed)
def on_plugin_processed(self, result):
self.pause(True)
self._set_window_title(plugin=result["plugin"])
print(10*"<", result["plugin"].__name__, 10*">")
plugin_order = result["plugin"].order
plugin_name = result["plugin"].__name__
duration = result['duration']
plugin_instance = result["instance"]
context = result["context"]
msg = ""
msg += f"Order: {plugin_order}<br>"
msg += f"Plugin: {plugin_name}"
if plugin_instance is not None:
msg += f" -> instance: {plugin_instance}"
msg += "<br>"
msg += f"Duration: {duration} ms<br>"
self.text.setHtml(msg)
data = {
"context": context.data
}
for instance in context:
data[instance.name] = instance.data
self.model.update(data)
app = QtWidgets.QApplication.instance()
while self._pause:
# Allow user interaction with the UI
app.processEvents()

View file

@ -1,4 +1,5 @@
import os
from .pyblish_debug_stepper import DebugUI
# Constant key under which local settings are stored
LOCAL_EXPERIMENTAL_KEY = "experimental_tools"
@ -95,6 +96,12 @@ class ExperimentalTools:
"hiero",
"resolve",
]
),
ExperimentalHostTool(
"pyblish_debug_stepper",
"Pyblish Debug Stepper",
"Debug Pyblish plugins step by step.",
self._show_pyblish_debugger,
)
]
@ -162,9 +169,16 @@ class ExperimentalTools:
local_settings.get(LOCAL_EXPERIMENTAL_KEY)
) or {}
for identifier, eperimental_tool in self.tools_by_identifier.items():
# Enable the following tools by default.
# Because they will always be disabled due
# to the fact their settings don't exist.
experimental_settings.update({
"pyblish_debug_stepper": True,
})
for identifier, experimental_tool in self.tools_by_identifier.items():
enabled = experimental_settings.get(identifier, False)
eperimental_tool.set_enabled(enabled)
experimental_tool.set_enabled(enabled)
def _show_publisher(self):
if self._publisher_tool is None:
@ -175,3 +189,7 @@ class ExperimentalTools:
)
self._publisher_tool.show()
def _show_pyblish_debugger(self):
window = DebugUI(parent=self._parent_widget)
window.show()

View file

@ -1,6 +1,9 @@
import collections
from ayon_api import get_representations, get_versions_links
from ayon_api import (
get_representations,
get_versions_links,
)
from ayon_core.lib import Logger, NestedCacheItem
from ayon_core.addon import AddonsManager
@ -509,18 +512,19 @@ class SiteSyncModel:
"reference"
)
for link_repre_id in links:
try:
if not self._sitesync_addon.is_representation_on_site(
project_name,
link_repre_id,
site_name
):
print("Adding {} to linked representation: {}".format(
site_name, link_repre_id))
self._sitesync_addon.add_site(
project_name,
link_repre_id,
site_name,
force=False
force=True
)
except Exception:
# do not add/reset working site for references
log.debug("Site present", exc_info=True)
def _get_linked_representation_id(
self,
@ -575,7 +579,7 @@ class SiteSyncModel:
project_name,
versions_to_check,
link_types=link_types,
link_direction="out")
link_direction="in") # looking for 'in'puts for version
versions_to_check = set()
for links in versions_links.values():
@ -584,9 +588,6 @@ class SiteSyncModel:
if link["entityType"] != "version":
continue
entity_id = link["entityId"]
# Skip already found linked version ids
if entity_id in linked_version_ids:
continue
linked_version_ids.add(entity_id)
versions_to_check.add(entity_id)

View file

@ -222,6 +222,7 @@ class VersionDelegate(QtWidgets.QStyledItemDelegate):
editor = VersionComboBox(product_id, parent)
editor.setProperty("itemId", item_id)
editor.setFocusPolicy(QtCore.Qt.NoFocus)
editor.value_changed.connect(self._on_editor_change)
editor.destroyed.connect(self._on_destroy)

Some files were not shown because too many files have changed in this diff Show more