mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 12:54:40 +01:00
Merge branch 'develop' of https://github.com/ynput/ayon-core into bugfix/transcode_ignore_conversion_on_unknown_channel
# Conflicts: # client/ayon_core/plugins/publish/extract_color_transcode.py
This commit is contained in:
commit
5f82473a26
215 changed files with 12575 additions and 12122 deletions
14
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
14
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
|
|
@ -35,6 +35,20 @@ body:
|
|||
label: Version
|
||||
description: What version are you running? Look to AYON Tray
|
||||
options:
|
||||
- 1.0.14
|
||||
- 1.0.13
|
||||
- 1.0.12
|
||||
- 1.0.11
|
||||
- 1.0.10
|
||||
- 1.0.9
|
||||
- 1.0.8
|
||||
- 1.0.7
|
||||
- 1.0.6
|
||||
- 1.0.5
|
||||
- 1.0.4
|
||||
- 1.0.3
|
||||
- 1.0.2
|
||||
- 1.0.1
|
||||
- 1.0.0
|
||||
- 0.4.4
|
||||
- 0.4.3
|
||||
|
|
|
|||
48
.github/workflows/assign_pr_to_project.yml
vendored
Normal file
48
.github/workflows/assign_pr_to_project.yml
vendored
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
name: 🔸Auto assign pr
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_number:
|
||||
type: string
|
||||
description: "Run workflow for this PR number"
|
||||
required: true
|
||||
project_id:
|
||||
type: string
|
||||
description: "Github Project Number"
|
||||
required: true
|
||||
default: "16"
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
jobs:
|
||||
get-pr-repo:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
pr_repo_name: ${{ steps.get-repo-name.outputs.repo_name || github.event.pull_request.head.repo.full_name }}
|
||||
|
||||
# INFO `github.event.pull_request.head.repo.full_name` is not available on manual triggered (dispatched) runs
|
||||
steps:
|
||||
- name: Get PR repo name
|
||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||
id: get-repo-name
|
||||
run: |
|
||||
repo_name=$(gh pr view ${{ inputs.pr_number }} --json headRepository,headRepositoryOwner --repo ${{ github.repository }} | jq -r '.headRepositoryOwner.login + "/" + .headRepository.name')
|
||||
echo "repo_name=$repo_name" >> $GITHUB_OUTPUT
|
||||
|
||||
auto-assign-pr:
|
||||
needs:
|
||||
- get-pr-repo
|
||||
if: ${{ needs.get-pr-repo.outputs.pr_repo_name == github.repository }}
|
||||
uses: ynput/ops-repo-automation/.github/workflows/pr_to_project.yml@main
|
||||
with:
|
||||
repo: "${{ github.repository }}"
|
||||
project_id: ${{ inputs.project_id != '' && fromJSON(inputs.project_id) || 16 }}
|
||||
pull_request_number: ${{ github.event.pull_request.number || fromJSON(inputs.pr_number) }}
|
||||
secrets:
|
||||
# INFO fallback to default `github.token` is required for PRs from forks
|
||||
# INFO organization secrets won't be available to forks
|
||||
token: ${{ secrets.YNPUT_BOT_TOKEN || github.token}}
|
||||
4
.github/workflows/pr_linting.yml
vendored
4
.github/workflows/pr_linting.yml
vendored
|
|
@ -21,4 +21,6 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: chartboost/ruff-action@v1
|
||||
- uses: astral-sh/ruff-action@v1
|
||||
with:
|
||||
changed-files: "true"
|
||||
|
|
|
|||
31
.github/workflows/pr_unittests.yaml
vendored
Normal file
31
.github/workflows/pr_unittests.yaml
vendored
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
name: 🧐 Run Unit Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ develop ]
|
||||
pull_request:
|
||||
branches: [ develop ]
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number}}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.9'
|
||||
- name: Install requirements
|
||||
run: ./tools/manage.sh create-env
|
||||
- name: Run tests
|
||||
run: ./tools/manage.sh run-tests
|
||||
18
.github/workflows/validate_pr_labels.yml
vendored
Normal file
18
.github/workflows/validate_pr_labels.yml
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
name: 🔎 Validate PR Labels
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
jobs:
|
||||
validate-type-label:
|
||||
uses: ynput/ops-repo-automation/.github/workflows/validate_pr_labels.yml@main
|
||||
with:
|
||||
repo: "${{ github.repository }}"
|
||||
pull_request_number: ${{ github.event.pull_request.number }}
|
||||
query_prefix: "type: "
|
||||
secrets:
|
||||
token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
|
|
@ -370,67 +370,11 @@ def _load_ayon_addons(log):
|
|||
return all_addon_modules
|
||||
|
||||
|
||||
def _load_addons_in_core(log):
|
||||
# Add current directory at first place
|
||||
# - has small differences in import logic
|
||||
addon_modules = []
|
||||
modules_dir = os.path.join(AYON_CORE_ROOT, "modules")
|
||||
if not os.path.exists(modules_dir):
|
||||
log.warning(
|
||||
f"Could not find path when loading AYON addons \"{modules_dir}\""
|
||||
)
|
||||
return addon_modules
|
||||
|
||||
ignored_filenames = IGNORED_FILENAMES | IGNORED_DEFAULT_FILENAMES
|
||||
for filename in os.listdir(modules_dir):
|
||||
# Ignore filenames
|
||||
if filename in ignored_filenames:
|
||||
continue
|
||||
|
||||
fullpath = os.path.join(modules_dir, filename)
|
||||
basename, ext = os.path.splitext(filename)
|
||||
|
||||
# Validations
|
||||
if os.path.isdir(fullpath):
|
||||
# Check existence of init file
|
||||
init_path = os.path.join(fullpath, "__init__.py")
|
||||
if not os.path.exists(init_path):
|
||||
log.debug((
|
||||
"Addon directory does not contain __init__.py"
|
||||
f" file {fullpath}"
|
||||
))
|
||||
continue
|
||||
|
||||
elif ext != ".py":
|
||||
continue
|
||||
|
||||
# TODO add more logic how to define if folder is addon or not
|
||||
# - check manifest and content of manifest
|
||||
try:
|
||||
# Don't import dynamically current directory modules
|
||||
import_str = f"ayon_core.modules.{basename}"
|
||||
default_module = __import__(import_str, fromlist=("", ))
|
||||
addon_modules.append(default_module)
|
||||
|
||||
except Exception:
|
||||
log.error(
|
||||
f"Failed to import in-core addon '{basename}'.",
|
||||
exc_info=True
|
||||
)
|
||||
return addon_modules
|
||||
|
||||
|
||||
def _load_addons():
|
||||
log = Logger.get_logger("AddonsLoader")
|
||||
|
||||
addon_modules = _load_ayon_addons(log)
|
||||
# All addon in 'modules' folder are tray actions and should be moved
|
||||
# to tray tool.
|
||||
# TODO remove
|
||||
addon_modules.extend(_load_addons_in_core(log))
|
||||
|
||||
# Store modules to local cache
|
||||
_LoadCache.addon_modules = addon_modules
|
||||
_LoadCache.addon_modules = _load_ayon_addons(log)
|
||||
|
||||
|
||||
class AYONAddon(ABC):
|
||||
|
|
@ -535,8 +479,8 @@ class AYONAddon(ABC):
|
|||
Implementation of this method is optional.
|
||||
|
||||
Note:
|
||||
The logic can be similar to logic in tray, but tray does not require
|
||||
to be logged in.
|
||||
The logic can be similar to logic in tray, but tray does not
|
||||
require to be logged in.
|
||||
|
||||
Args:
|
||||
process_context (ProcessContext): Context of child
|
||||
|
|
@ -950,6 +894,21 @@ class AddonsManager:
|
|||
output.extend(paths)
|
||||
return output
|
||||
|
||||
def collect_launcher_action_paths(self):
|
||||
"""Helper to collect launcher action paths from addons.
|
||||
|
||||
Returns:
|
||||
list: List of paths to launcher actions.
|
||||
|
||||
"""
|
||||
output = self._collect_plugin_paths(
|
||||
"get_launcher_action_paths"
|
||||
)
|
||||
# Add default core actions
|
||||
actions_dir = os.path.join(AYON_CORE_ROOT, "plugins", "actions")
|
||||
output.insert(0, actions_dir)
|
||||
return output
|
||||
|
||||
def collect_create_plugin_paths(self, host_name):
|
||||
"""Helper to collect creator plugin paths from addons.
|
||||
|
||||
|
|
|
|||
|
|
@ -54,6 +54,13 @@ class IPluginPaths(AYONInterface):
|
|||
paths = [paths]
|
||||
return paths
|
||||
|
||||
def get_launcher_action_paths(self):
|
||||
"""Receive launcher actions paths.
|
||||
|
||||
Give addons ability to add launcher actions paths.
|
||||
"""
|
||||
return self._get_plugin_paths_by_type("actions")
|
||||
|
||||
def get_create_plugin_paths(self, host_name):
|
||||
"""Receive create plugin paths.
|
||||
|
||||
|
|
@ -125,6 +132,7 @@ class ITrayAddon(AYONInterface):
|
|||
|
||||
tray_initialized = False
|
||||
_tray_manager = None
|
||||
_admin_submenu = None
|
||||
|
||||
@abstractmethod
|
||||
def tray_init(self):
|
||||
|
|
@ -198,6 +206,27 @@ class ITrayAddon(AYONInterface):
|
|||
if hasattr(self.manager, "add_doubleclick_callback"):
|
||||
self.manager.add_doubleclick_callback(self, callback)
|
||||
|
||||
@staticmethod
|
||||
def admin_submenu(tray_menu):
|
||||
if ITrayAddon._admin_submenu is None:
|
||||
from qtpy import QtWidgets
|
||||
|
||||
admin_submenu = QtWidgets.QMenu("Admin", tray_menu)
|
||||
admin_submenu.menuAction().setVisible(False)
|
||||
ITrayAddon._admin_submenu = admin_submenu
|
||||
return ITrayAddon._admin_submenu
|
||||
|
||||
@staticmethod
|
||||
def add_action_to_admin_submenu(label, tray_menu):
|
||||
from qtpy import QtWidgets
|
||||
|
||||
menu = ITrayAddon.admin_submenu(tray_menu)
|
||||
action = QtWidgets.QAction(label, menu)
|
||||
menu.addAction(action)
|
||||
if not menu.menuAction().isVisible():
|
||||
menu.menuAction().setVisible(True)
|
||||
return action
|
||||
|
||||
|
||||
class ITrayAction(ITrayAddon):
|
||||
"""Implementation of Tray action.
|
||||
|
|
@ -211,7 +240,6 @@ class ITrayAction(ITrayAddon):
|
|||
"""
|
||||
|
||||
admin_action = False
|
||||
_admin_submenu = None
|
||||
_action_item = None
|
||||
|
||||
@property
|
||||
|
|
@ -229,12 +257,7 @@ class ITrayAction(ITrayAddon):
|
|||
from qtpy import QtWidgets
|
||||
|
||||
if self.admin_action:
|
||||
menu = self.admin_submenu(tray_menu)
|
||||
action = QtWidgets.QAction(self.label, menu)
|
||||
menu.addAction(action)
|
||||
if not menu.menuAction().isVisible():
|
||||
menu.menuAction().setVisible(True)
|
||||
|
||||
action = self.add_action_to_admin_submenu(self.label, tray_menu)
|
||||
else:
|
||||
action = QtWidgets.QAction(self.label, tray_menu)
|
||||
tray_menu.addAction(action)
|
||||
|
|
@ -248,16 +271,6 @@ class ITrayAction(ITrayAddon):
|
|||
def tray_exit(self):
|
||||
return
|
||||
|
||||
@staticmethod
|
||||
def admin_submenu(tray_menu):
|
||||
if ITrayAction._admin_submenu is None:
|
||||
from qtpy import QtWidgets
|
||||
|
||||
admin_submenu = QtWidgets.QMenu("Admin", tray_menu)
|
||||
admin_submenu.menuAction().setVisible(False)
|
||||
ITrayAction._admin_submenu = admin_submenu
|
||||
return ITrayAction._admin_submenu
|
||||
|
||||
|
||||
class ITrayService(ITrayAddon):
|
||||
# Module's property
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ from pathlib import Path
|
|||
import warnings
|
||||
|
||||
import click
|
||||
import acre
|
||||
|
||||
from ayon_core import AYON_CORE_ROOT
|
||||
from ayon_core.addon import AddonsManager
|
||||
|
|
@ -18,6 +17,11 @@ from ayon_core.lib import (
|
|||
is_running_from_build,
|
||||
Logger,
|
||||
)
|
||||
from ayon_core.lib.env_tools import (
|
||||
parse_env_variables_structure,
|
||||
compute_env_variables_structure,
|
||||
merge_env_variables,
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
|
@ -146,7 +150,8 @@ def publish_report_viewer():
|
|||
@main_cli.command()
|
||||
@click.argument("output_path")
|
||||
@click.option("--project", help="Define project context")
|
||||
@click.option("--folder", help="Define folder in project (project must be set)")
|
||||
@click.option(
|
||||
"--folder", help="Define folder in project (project must be set)")
|
||||
@click.option(
|
||||
"--strict",
|
||||
is_flag=True,
|
||||
|
|
@ -234,24 +239,19 @@ def version(build):
|
|||
|
||||
def _set_global_environments() -> None:
|
||||
"""Set global AYON environments."""
|
||||
general_env = get_general_environments()
|
||||
# First resolve general environment
|
||||
general_env = parse_env_variables_structure(get_general_environments())
|
||||
|
||||
# first resolve general environment because merge doesn't expect
|
||||
# values to be list.
|
||||
# TODO: switch to AYON environment functions
|
||||
merged_env = acre.merge(
|
||||
acre.compute(acre.parse(general_env), cleanup=False),
|
||||
# Merge environments with current environments and update values
|
||||
merged_env = merge_env_variables(
|
||||
compute_env_variables_structure(general_env),
|
||||
dict(os.environ)
|
||||
)
|
||||
env = acre.compute(
|
||||
merged_env,
|
||||
cleanup=False
|
||||
)
|
||||
env = compute_env_variables_structure(merged_env)
|
||||
os.environ.clear()
|
||||
os.environ.update(env)
|
||||
|
||||
# Hardcoded default values
|
||||
os.environ["PYBLISH_GUI"] = "pyblish_pype"
|
||||
# Change scale factor only if is not set
|
||||
if "QT_AUTO_SCREEN_SCALE_FACTOR" not in os.environ:
|
||||
os.environ["QT_AUTO_SCREEN_SCALE_FACTOR"] = "1"
|
||||
|
|
@ -262,8 +262,8 @@ def _set_addons_environments(addons_manager):
|
|||
|
||||
# Merge environments with current environments and update values
|
||||
if module_envs := addons_manager.collect_global_environments():
|
||||
parsed_envs = acre.parse(module_envs)
|
||||
env = acre.merge(parsed_envs, dict(os.environ))
|
||||
parsed_envs = parse_env_variables_structure(module_envs)
|
||||
env = merge_env_variables(parsed_envs, dict(os.environ))
|
||||
os.environ.clear()
|
||||
os.environ.update(env)
|
||||
|
||||
|
|
@ -289,8 +289,6 @@ def main(*args, **kwargs):
|
|||
split_paths = python_path.split(os.pathsep)
|
||||
|
||||
additional_paths = [
|
||||
# add AYON tools for 'pyblish_pype'
|
||||
os.path.join(AYON_CORE_ROOT, "tools"),
|
||||
# add common AYON vendor
|
||||
# (common for multiple Python interpreter versions)
|
||||
os.path.join(AYON_CORE_ROOT, "vendor", "python")
|
||||
|
|
|
|||
|
|
@ -26,10 +26,12 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
|
|||
"photoshop",
|
||||
"tvpaint",
|
||||
"substancepainter",
|
||||
"substancedesigner",
|
||||
"aftereffects",
|
||||
"wrap",
|
||||
"openrv",
|
||||
"cinema4d"
|
||||
"cinema4d",
|
||||
"silhouette",
|
||||
}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ class OCIOEnvHook(PreLaunchHook):
|
|||
order = 0
|
||||
hosts = {
|
||||
"substancepainter",
|
||||
"substancedesigner",
|
||||
"fusion",
|
||||
"blender",
|
||||
"aftereffects",
|
||||
|
|
@ -20,7 +21,8 @@ class OCIOEnvHook(PreLaunchHook):
|
|||
"hiero",
|
||||
"resolve",
|
||||
"openrv",
|
||||
"cinema4d"
|
||||
"cinema4d",
|
||||
"silhouette",
|
||||
}
|
||||
launch_types = set()
|
||||
|
||||
|
|
|
|||
|
|
@ -117,10 +117,7 @@ class HostDirmap(ABC):
|
|||
It checks if Site Sync is enabled and user chose to use local
|
||||
site, in that case configuration in Local Settings takes precedence
|
||||
"""
|
||||
|
||||
dirmap_label = "{}-dirmap".format(self.host_name)
|
||||
mapping_sett = self.project_settings[self.host_name].get(dirmap_label,
|
||||
{})
|
||||
mapping_sett = self.project_settings[self.host_name].get("dirmap", {})
|
||||
local_mapping = self._get_local_sync_dirmap()
|
||||
mapping_enabled = mapping_sett.get("enabled") or bool(local_mapping)
|
||||
if not mapping_enabled:
|
||||
|
|
|
|||
|
|
@ -6,82 +6,58 @@ import json
|
|||
import copy
|
||||
import warnings
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from typing import Any, Optional
|
||||
import typing
|
||||
from typing import (
|
||||
Any,
|
||||
Optional,
|
||||
List,
|
||||
Set,
|
||||
Dict,
|
||||
Iterable,
|
||||
TypeVar,
|
||||
)
|
||||
|
||||
import clique
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Self, Tuple, Union, TypedDict, Pattern
|
||||
|
||||
|
||||
class EnumItemDict(TypedDict):
|
||||
label: str
|
||||
value: Any
|
||||
|
||||
|
||||
EnumItemsInputType = Union[
|
||||
Dict[Any, str],
|
||||
List[Tuple[Any, str]],
|
||||
List[Any],
|
||||
List[EnumItemDict]
|
||||
]
|
||||
|
||||
|
||||
class FileDefItemDict(TypedDict):
|
||||
directory: str
|
||||
filenames: List[str]
|
||||
frames: Optional[List[int]]
|
||||
template: Optional[str]
|
||||
is_sequence: Optional[bool]
|
||||
|
||||
|
||||
# Global variable which store attribute definitions by type
|
||||
# - default types are registered on import
|
||||
_attr_defs_by_type = {}
|
||||
|
||||
|
||||
def register_attr_def_class(cls):
|
||||
"""Register attribute definition.
|
||||
|
||||
Currently registered definitions are used to deserialize data to objects.
|
||||
|
||||
Attrs:
|
||||
cls (AbstractAttrDef): Non-abstract class to be registered with unique
|
||||
'type' attribute.
|
||||
|
||||
Raises:
|
||||
KeyError: When type was already registered.
|
||||
"""
|
||||
|
||||
if cls.type in _attr_defs_by_type:
|
||||
raise KeyError("Type \"{}\" was already registered".format(cls.type))
|
||||
_attr_defs_by_type[cls.type] = cls
|
||||
|
||||
|
||||
def get_attributes_keys(attribute_definitions):
|
||||
"""Collect keys from list of attribute definitions.
|
||||
|
||||
Args:
|
||||
attribute_definitions (List[AbstractAttrDef]): Objects of attribute
|
||||
definitions.
|
||||
|
||||
Returns:
|
||||
Set[str]: Keys that will be created using passed attribute definitions.
|
||||
"""
|
||||
|
||||
keys = set()
|
||||
if not attribute_definitions:
|
||||
return keys
|
||||
|
||||
for attribute_def in attribute_definitions:
|
||||
if not isinstance(attribute_def, UIDef):
|
||||
keys.add(attribute_def.key)
|
||||
return keys
|
||||
|
||||
|
||||
def get_default_values(attribute_definitions):
|
||||
"""Receive default values for attribute definitions.
|
||||
|
||||
Args:
|
||||
attribute_definitions (List[AbstractAttrDef]): Attribute definitions
|
||||
for which default values should be collected.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Default values for passed attribute definitions.
|
||||
"""
|
||||
|
||||
output = {}
|
||||
if not attribute_definitions:
|
||||
return output
|
||||
|
||||
for attr_def in attribute_definitions:
|
||||
# Skip UI definitions
|
||||
if not isinstance(attr_def, UIDef):
|
||||
output[attr_def.key] = attr_def.default
|
||||
return output
|
||||
# Type hint helpers
|
||||
IntFloatType = "Union[int, float]"
|
||||
|
||||
|
||||
class AbstractAttrDefMeta(ABCMeta):
|
||||
"""Metaclass to validate the existence of 'key' attribute.
|
||||
|
||||
Each object of `AbstractAttrDef` must have defined 'key' attribute.
|
||||
"""
|
||||
|
||||
"""
|
||||
def __call__(cls, *args, **kwargs):
|
||||
obj = super(AbstractAttrDefMeta, cls).__call__(*args, **kwargs)
|
||||
init_class = getattr(obj, "__init__class__", None)
|
||||
|
|
@ -93,8 +69,12 @@ class AbstractAttrDefMeta(ABCMeta):
|
|||
|
||||
|
||||
def _convert_reversed_attr(
|
||||
main_value, depr_value, main_label, depr_label, default
|
||||
):
|
||||
main_value: Any,
|
||||
depr_value: Any,
|
||||
main_label: str,
|
||||
depr_label: str,
|
||||
default: Any,
|
||||
) -> Any:
|
||||
if main_value is not None and depr_value is not None:
|
||||
if main_value == depr_value:
|
||||
print(
|
||||
|
|
@ -140,8 +120,8 @@ class AbstractAttrDef(metaclass=AbstractAttrDefMeta):
|
|||
enabled (Optional[bool]): Item is enabled (for UI purposes).
|
||||
hidden (Optional[bool]): DEPRECATED: Use 'visible' instead.
|
||||
disabled (Optional[bool]): DEPRECATED: Use 'enabled' instead.
|
||||
"""
|
||||
|
||||
"""
|
||||
type_attributes = []
|
||||
|
||||
is_value_def = True
|
||||
|
|
@ -183,7 +163,7 @@ class AbstractAttrDef(metaclass=AbstractAttrDefMeta):
|
|||
def id(self) -> str:
|
||||
return self._id
|
||||
|
||||
def clone(self):
|
||||
def clone(self) -> "Self":
|
||||
data = self.serialize()
|
||||
data.pop("type")
|
||||
return self.deserialize(data)
|
||||
|
|
@ -251,28 +231,28 @@ class AbstractAttrDef(metaclass=AbstractAttrDefMeta):
|
|||
|
||||
Returns:
|
||||
str: Type of attribute definition.
|
||||
"""
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def convert_value(self, value):
|
||||
def convert_value(self, value: Any) -> Any:
|
||||
"""Convert value to a valid one.
|
||||
|
||||
Convert passed value to a valid type. Use default if value can't be
|
||||
converted.
|
||||
"""
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def serialize(self):
|
||||
def serialize(self) -> Dict[str, Any]:
|
||||
"""Serialize object to data so it's possible to recreate it.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Serialized object that can be passed to
|
||||
'deserialize' method.
|
||||
"""
|
||||
|
||||
"""
|
||||
data = {
|
||||
"type": self.type,
|
||||
"key": self.key,
|
||||
|
|
@ -288,7 +268,7 @@ class AbstractAttrDef(metaclass=AbstractAttrDefMeta):
|
|||
return data
|
||||
|
||||
@classmethod
|
||||
def deserialize(cls, data):
|
||||
def deserialize(cls, data: Dict[str, Any]) -> "Self":
|
||||
"""Recreate object from data.
|
||||
|
||||
Data can be received using 'serialize' method.
|
||||
|
|
@ -299,10 +279,12 @@ class AbstractAttrDef(metaclass=AbstractAttrDefMeta):
|
|||
|
||||
return cls(**data)
|
||||
|
||||
def _def_type_compare(self, other: "AbstractAttrDef") -> bool:
|
||||
def _def_type_compare(self, other: "Self") -> bool:
|
||||
return True
|
||||
|
||||
|
||||
AttrDefType = TypeVar("AttrDefType", bound=AbstractAttrDef)
|
||||
|
||||
# -----------------------------------------
|
||||
# UI attribute definitions won't hold value
|
||||
# -----------------------------------------
|
||||
|
|
@ -310,13 +292,19 @@ class AbstractAttrDef(metaclass=AbstractAttrDefMeta):
|
|||
class UIDef(AbstractAttrDef):
|
||||
is_value_def = False
|
||||
|
||||
def __init__(self, key=None, default=None, *args, **kwargs):
|
||||
def __init__(
|
||||
self,
|
||||
key: Optional[str] = None,
|
||||
default: Optional[Any] = None,
|
||||
*args,
|
||||
**kwargs
|
||||
):
|
||||
super().__init__(key, default, *args, **kwargs)
|
||||
|
||||
def is_value_valid(self, value: Any) -> bool:
|
||||
return True
|
||||
|
||||
def convert_value(self, value):
|
||||
def convert_value(self, value: Any) -> Any:
|
||||
return value
|
||||
|
||||
|
||||
|
|
@ -343,18 +331,18 @@ class UnknownDef(AbstractAttrDef):
|
|||
|
||||
This attribute can be used to keep existing data unchanged but does not
|
||||
have known definition of type.
|
||||
"""
|
||||
|
||||
"""
|
||||
type = "unknown"
|
||||
|
||||
def __init__(self, key, default=None, **kwargs):
|
||||
def __init__(self, key: str, default: Optional[Any] = None, **kwargs):
|
||||
kwargs["default"] = default
|
||||
super().__init__(key, **kwargs)
|
||||
|
||||
def is_value_valid(self, value: Any) -> bool:
|
||||
return True
|
||||
|
||||
def convert_value(self, value):
|
||||
def convert_value(self, value: Any) -> Any:
|
||||
return value
|
||||
|
||||
|
||||
|
|
@ -365,11 +353,11 @@ class HiddenDef(AbstractAttrDef):
|
|||
to other attributes (e.g. in multi-page UIs).
|
||||
|
||||
Keep in mind the value should be possible to parse by json parser.
|
||||
"""
|
||||
|
||||
"""
|
||||
type = "hidden"
|
||||
|
||||
def __init__(self, key, default=None, **kwargs):
|
||||
def __init__(self, key: str, default: Optional[Any] = None, **kwargs):
|
||||
kwargs["default"] = default
|
||||
kwargs["visible"] = False
|
||||
super().__init__(key, **kwargs)
|
||||
|
|
@ -377,7 +365,7 @@ class HiddenDef(AbstractAttrDef):
|
|||
def is_value_valid(self, value: Any) -> bool:
|
||||
return True
|
||||
|
||||
def convert_value(self, value):
|
||||
def convert_value(self, value: Any) -> Any:
|
||||
return value
|
||||
|
||||
|
||||
|
|
@ -392,8 +380,8 @@ class NumberDef(AbstractAttrDef):
|
|||
maximum(int, float): Maximum possible value.
|
||||
decimals(int): Maximum decimal points of value.
|
||||
default(int, float): Default value for conversion.
|
||||
"""
|
||||
|
||||
"""
|
||||
type = "number"
|
||||
type_attributes = [
|
||||
"minimum",
|
||||
|
|
@ -402,7 +390,12 @@ class NumberDef(AbstractAttrDef):
|
|||
]
|
||||
|
||||
def __init__(
|
||||
self, key, minimum=None, maximum=None, decimals=None, default=None,
|
||||
self,
|
||||
key: str,
|
||||
minimum: Optional[IntFloatType] = None,
|
||||
maximum: Optional[IntFloatType] = None,
|
||||
decimals: Optional[int] = None,
|
||||
default: Optional[IntFloatType] = None,
|
||||
**kwargs
|
||||
):
|
||||
minimum = 0 if minimum is None else minimum
|
||||
|
|
@ -428,9 +421,9 @@ class NumberDef(AbstractAttrDef):
|
|||
|
||||
super().__init__(key, default=default, **kwargs)
|
||||
|
||||
self.minimum = minimum
|
||||
self.maximum = maximum
|
||||
self.decimals = 0 if decimals is None else decimals
|
||||
self.minimum: IntFloatType = minimum
|
||||
self.maximum: IntFloatType = maximum
|
||||
self.decimals: int = 0 if decimals is None else decimals
|
||||
|
||||
def is_value_valid(self, value: Any) -> bool:
|
||||
if self.decimals == 0:
|
||||
|
|
@ -442,7 +435,7 @@ class NumberDef(AbstractAttrDef):
|
|||
return False
|
||||
return True
|
||||
|
||||
def convert_value(self, value):
|
||||
def convert_value(self, value: Any) -> IntFloatType:
|
||||
if isinstance(value, str):
|
||||
try:
|
||||
value = float(value)
|
||||
|
|
@ -477,8 +470,8 @@ class TextDef(AbstractAttrDef):
|
|||
regex(str, re.Pattern): Regex validation.
|
||||
placeholder(str): UI placeholder for attribute.
|
||||
default(str, None): Default value. Empty string used when not defined.
|
||||
"""
|
||||
|
||||
"""
|
||||
type = "text"
|
||||
type_attributes = [
|
||||
"multiline",
|
||||
|
|
@ -486,7 +479,12 @@ class TextDef(AbstractAttrDef):
|
|||
]
|
||||
|
||||
def __init__(
|
||||
self, key, multiline=None, regex=None, placeholder=None, default=None,
|
||||
self,
|
||||
key: str,
|
||||
multiline: Optional[bool] = None,
|
||||
regex: Optional[str] = None,
|
||||
placeholder: Optional[str] = None,
|
||||
default: Optional[str] = None,
|
||||
**kwargs
|
||||
):
|
||||
if default is None:
|
||||
|
|
@ -505,9 +503,9 @@ class TextDef(AbstractAttrDef):
|
|||
if isinstance(regex, str):
|
||||
regex = re.compile(regex)
|
||||
|
||||
self.multiline = multiline
|
||||
self.placeholder = placeholder
|
||||
self.regex = regex
|
||||
self.multiline: bool = multiline
|
||||
self.placeholder: Optional[str] = placeholder
|
||||
self.regex: Optional["Pattern"] = regex
|
||||
|
||||
def is_value_valid(self, value: Any) -> bool:
|
||||
if not isinstance(value, str):
|
||||
|
|
@ -516,12 +514,12 @@ class TextDef(AbstractAttrDef):
|
|||
return False
|
||||
return True
|
||||
|
||||
def convert_value(self, value):
|
||||
def convert_value(self, value: Any) -> str:
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
return self.default
|
||||
|
||||
def serialize(self):
|
||||
def serialize(self) -> Dict[str, Any]:
|
||||
data = super().serialize()
|
||||
regex = None
|
||||
if self.regex is not None:
|
||||
|
|
@ -545,28 +543,46 @@ class EnumDef(AbstractAttrDef):
|
|||
is enabled.
|
||||
|
||||
Args:
|
||||
items (Union[list[str], list[dict[str, Any]]): Items definition that
|
||||
can be converted using 'prepare_enum_items'.
|
||||
key (str): Key under which value is stored.
|
||||
items (EnumItemsInputType): Items definition that can be converted
|
||||
using 'prepare_enum_items'.
|
||||
default (Optional[Any]): Default value. Must be one key(value) from
|
||||
passed items or list of values for multiselection.
|
||||
multiselection (Optional[bool]): If True, multiselection is allowed.
|
||||
Output is list of selected items.
|
||||
"""
|
||||
placeholder (Optional[str]): Placeholder for UI purposes, only for
|
||||
multiselection enumeration.
|
||||
|
||||
"""
|
||||
type = "enum"
|
||||
|
||||
type_attributes = [
|
||||
"multiselection",
|
||||
"placeholder",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self, key, items, default=None, multiselection=False, **kwargs
|
||||
self,
|
||||
key: str,
|
||||
items: "EnumItemsInputType",
|
||||
default: "Union[str, List[Any]]" = None,
|
||||
multiselection: Optional[bool] = False,
|
||||
placeholder: Optional[str] = None,
|
||||
**kwargs
|
||||
):
|
||||
if not items:
|
||||
raise ValueError((
|
||||
"Empty 'items' value. {} must have"
|
||||
if multiselection is None:
|
||||
multiselection = False
|
||||
|
||||
if not items and not multiselection:
|
||||
raise ValueError(
|
||||
f"Empty 'items' value. {self.__class__.__name__} must have"
|
||||
" defined values on initialization."
|
||||
).format(self.__class__.__name__))
|
||||
)
|
||||
|
||||
items = self.prepare_enum_items(items)
|
||||
item_values = [item["value"] for item in items]
|
||||
item_values_set = set(item_values)
|
||||
|
||||
if multiselection:
|
||||
if default is None:
|
||||
default = []
|
||||
|
|
@ -577,9 +593,10 @@ class EnumDef(AbstractAttrDef):
|
|||
|
||||
super().__init__(key, default=default, **kwargs)
|
||||
|
||||
self.items = items
|
||||
self._item_values = item_values_set
|
||||
self.multiselection = multiselection
|
||||
self.items: List["EnumItemDict"] = items
|
||||
self._item_values: Set[Any] = item_values_set
|
||||
self.multiselection: bool = multiselection
|
||||
self.placeholder: Optional[str] = placeholder
|
||||
|
||||
def convert_value(self, value):
|
||||
if not self.multiselection:
|
||||
|
|
@ -605,11 +622,12 @@ class EnumDef(AbstractAttrDef):
|
|||
def serialize(self):
|
||||
data = super().serialize()
|
||||
data["items"] = copy.deepcopy(self.items)
|
||||
data["multiselection"] = self.multiselection
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def prepare_enum_items(items):
|
||||
def prepare_enum_items(
|
||||
items: "EnumItemsInputType"
|
||||
) -> List["EnumItemDict"]:
|
||||
"""Convert items to unified structure.
|
||||
|
||||
Output is a list where each item is dictionary with 'value'
|
||||
|
|
@ -625,13 +643,12 @@ class EnumDef(AbstractAttrDef):
|
|||
```
|
||||
|
||||
Args:
|
||||
items (Union[Dict[str, Any], List[Any], List[Dict[str, Any]]): The
|
||||
items to convert.
|
||||
items (EnumItemsInputType): The items to convert.
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: Unified structure of items.
|
||||
"""
|
||||
List[EnumItemDict]: Unified structure of items.
|
||||
|
||||
"""
|
||||
output = []
|
||||
if isinstance(items, dict):
|
||||
for value, label in items.items():
|
||||
|
|
@ -682,11 +699,11 @@ class BoolDef(AbstractAttrDef):
|
|||
|
||||
Args:
|
||||
default(bool): Default value. Set to `False` if not defined.
|
||||
"""
|
||||
|
||||
"""
|
||||
type = "bool"
|
||||
|
||||
def __init__(self, key, default=None, **kwargs):
|
||||
def __init__(self, key: str, default: Optional[bool] = None, **kwargs):
|
||||
if default is None:
|
||||
default = False
|
||||
super().__init__(key, default=default, **kwargs)
|
||||
|
|
@ -694,7 +711,7 @@ class BoolDef(AbstractAttrDef):
|
|||
def is_value_valid(self, value: Any) -> bool:
|
||||
return isinstance(value, bool)
|
||||
|
||||
def convert_value(self, value):
|
||||
def convert_value(self, value: Any) -> bool:
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
return self.default
|
||||
|
|
@ -702,7 +719,11 @@ class BoolDef(AbstractAttrDef):
|
|||
|
||||
class FileDefItem:
|
||||
def __init__(
|
||||
self, directory, filenames, frames=None, template=None
|
||||
self,
|
||||
directory: str,
|
||||
filenames: List[str],
|
||||
frames: Optional[List[int]] = None,
|
||||
template: Optional[str] = None,
|
||||
):
|
||||
self.directory = directory
|
||||
|
||||
|
|
@ -731,7 +752,7 @@ class FileDefItem:
|
|||
)
|
||||
|
||||
@property
|
||||
def label(self):
|
||||
def label(self) -> Optional[str]:
|
||||
if self.is_empty:
|
||||
return None
|
||||
|
||||
|
|
@ -774,7 +795,7 @@ class FileDefItem:
|
|||
filename_template, ",".join(ranges)
|
||||
)
|
||||
|
||||
def split_sequence(self):
|
||||
def split_sequence(self) -> List["Self"]:
|
||||
if not self.is_sequence:
|
||||
raise ValueError("Cannot split single file item")
|
||||
|
||||
|
|
@ -785,7 +806,7 @@ class FileDefItem:
|
|||
return self.from_paths(paths, False)
|
||||
|
||||
@property
|
||||
def ext(self):
|
||||
def ext(self) -> Optional[str]:
|
||||
if self.is_empty:
|
||||
return None
|
||||
_, ext = os.path.splitext(self.filenames[0])
|
||||
|
|
@ -794,14 +815,14 @@ class FileDefItem:
|
|||
return None
|
||||
|
||||
@property
|
||||
def lower_ext(self):
|
||||
def lower_ext(self) -> Optional[str]:
|
||||
ext = self.ext
|
||||
if ext is not None:
|
||||
return ext.lower()
|
||||
return ext
|
||||
|
||||
@property
|
||||
def is_dir(self):
|
||||
def is_dir(self) -> bool:
|
||||
if self.is_empty:
|
||||
return False
|
||||
|
||||
|
|
@ -810,10 +831,15 @@ class FileDefItem:
|
|||
return False
|
||||
return True
|
||||
|
||||
def set_directory(self, directory):
|
||||
def set_directory(self, directory: str):
|
||||
self.directory = directory
|
||||
|
||||
def set_filenames(self, filenames, frames=None, template=None):
|
||||
def set_filenames(
|
||||
self,
|
||||
filenames: List[str],
|
||||
frames: Optional[List[int]] = None,
|
||||
template: Optional[str] = None,
|
||||
):
|
||||
if frames is None:
|
||||
frames = []
|
||||
is_sequence = False
|
||||
|
|
@ -830,17 +856,21 @@ class FileDefItem:
|
|||
self.is_sequence = is_sequence
|
||||
|
||||
@classmethod
|
||||
def create_empty_item(cls):
|
||||
def create_empty_item(cls) -> "Self":
|
||||
return cls("", "")
|
||||
|
||||
@classmethod
|
||||
def from_value(cls, value, allow_sequences):
|
||||
def from_value(
|
||||
cls,
|
||||
value: "Union[List[FileDefItemDict], FileDefItemDict]",
|
||||
allow_sequences: bool,
|
||||
) -> List["Self"]:
|
||||
"""Convert passed value to FileDefItem objects.
|
||||
|
||||
Returns:
|
||||
list: Created FileDefItem objects.
|
||||
"""
|
||||
|
||||
"""
|
||||
# Convert single item to iterable
|
||||
if not isinstance(value, (list, tuple, set)):
|
||||
value = [value]
|
||||
|
|
@ -872,7 +902,7 @@ class FileDefItem:
|
|||
return output
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data):
|
||||
def from_dict(cls, data: "FileDefItemDict") -> "Self":
|
||||
return cls(
|
||||
data["directory"],
|
||||
data["filenames"],
|
||||
|
|
@ -881,7 +911,11 @@ class FileDefItem:
|
|||
)
|
||||
|
||||
@classmethod
|
||||
def from_paths(cls, paths, allow_sequences):
|
||||
def from_paths(
|
||||
cls,
|
||||
paths: List[str],
|
||||
allow_sequences: bool,
|
||||
) -> List["Self"]:
|
||||
filenames_by_dir = collections.defaultdict(list)
|
||||
for path in paths:
|
||||
normalized = os.path.normpath(path)
|
||||
|
|
@ -910,7 +944,7 @@ class FileDefItem:
|
|||
|
||||
return output
|
||||
|
||||
def to_dict(self):
|
||||
def to_dict(self) -> "FileDefItemDict":
|
||||
output = {
|
||||
"is_sequence": self.is_sequence,
|
||||
"directory": self.directory,
|
||||
|
|
@ -948,8 +982,15 @@ class FileDef(AbstractAttrDef):
|
|||
]
|
||||
|
||||
def __init__(
|
||||
self, key, single_item=True, folders=None, extensions=None,
|
||||
allow_sequences=True, extensions_label=None, default=None, **kwargs
|
||||
self,
|
||||
key: str,
|
||||
single_item: Optional[bool] = True,
|
||||
folders: Optional[bool] = None,
|
||||
extensions: Optional[Iterable[str]] = None,
|
||||
allow_sequences: Optional[bool] = True,
|
||||
extensions_label: Optional[str] = None,
|
||||
default: Optional["Union[FileDefItemDict, List[str]]"] = None,
|
||||
**kwargs
|
||||
):
|
||||
if folders is None and extensions is None:
|
||||
folders = True
|
||||
|
|
@ -966,7 +1007,9 @@ class FileDef(AbstractAttrDef):
|
|||
FileDefItem.from_dict(default)
|
||||
|
||||
elif isinstance(default, str):
|
||||
default = FileDefItem.from_paths([default.strip()])[0]
|
||||
default = FileDefItem.from_paths(
|
||||
[default.strip()], allow_sequences
|
||||
)[0]
|
||||
|
||||
else:
|
||||
raise TypeError((
|
||||
|
|
@ -985,14 +1028,14 @@ class FileDef(AbstractAttrDef):
|
|||
if is_label_horizontal is None:
|
||||
kwargs["is_label_horizontal"] = False
|
||||
|
||||
self.single_item = single_item
|
||||
self.folders = folders
|
||||
self.extensions = set(extensions)
|
||||
self.allow_sequences = allow_sequences
|
||||
self.extensions_label = extensions_label
|
||||
self.single_item: bool = single_item
|
||||
self.folders: bool = folders
|
||||
self.extensions: Set[str] = set(extensions)
|
||||
self.allow_sequences: bool = allow_sequences
|
||||
self.extensions_label: Optional[str] = extensions_label
|
||||
super().__init__(key, default=default, **kwargs)
|
||||
|
||||
def __eq__(self, other):
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
if not super().__eq__(other):
|
||||
return False
|
||||
|
||||
|
|
@ -1026,7 +1069,9 @@ class FileDef(AbstractAttrDef):
|
|||
return False
|
||||
return True
|
||||
|
||||
def convert_value(self, value):
|
||||
def convert_value(
|
||||
self, value: Any
|
||||
) -> "Union[FileDefItemDict, List[FileDefItemDict]]":
|
||||
if isinstance(value, (str, dict)):
|
||||
value = [value]
|
||||
|
||||
|
|
@ -1044,7 +1089,9 @@ class FileDef(AbstractAttrDef):
|
|||
pass
|
||||
|
||||
if string_paths:
|
||||
file_items = FileDefItem.from_paths(string_paths)
|
||||
file_items = FileDefItem.from_paths(
|
||||
string_paths, self.allow_sequences
|
||||
)
|
||||
dict_items.extend([
|
||||
file_item.to_dict()
|
||||
for file_item in file_items
|
||||
|
|
@ -1062,55 +1109,124 @@ class FileDef(AbstractAttrDef):
|
|||
return []
|
||||
|
||||
|
||||
def serialize_attr_def(attr_def):
|
||||
def register_attr_def_class(cls: AttrDefType):
|
||||
"""Register attribute definition.
|
||||
|
||||
Currently registered definitions are used to deserialize data to objects.
|
||||
|
||||
Attrs:
|
||||
cls (AttrDefType): Non-abstract class to be registered with unique
|
||||
'type' attribute.
|
||||
|
||||
Raises:
|
||||
KeyError: When type was already registered.
|
||||
|
||||
"""
|
||||
if cls.type in _attr_defs_by_type:
|
||||
raise KeyError("Type \"{}\" was already registered".format(cls.type))
|
||||
_attr_defs_by_type[cls.type] = cls
|
||||
|
||||
|
||||
def get_attributes_keys(
|
||||
attribute_definitions: List[AttrDefType]
|
||||
) -> Set[str]:
|
||||
"""Collect keys from list of attribute definitions.
|
||||
|
||||
Args:
|
||||
attribute_definitions (List[AttrDefType]): Objects of attribute
|
||||
definitions.
|
||||
|
||||
Returns:
|
||||
Set[str]: Keys that will be created using passed attribute definitions.
|
||||
|
||||
"""
|
||||
keys = set()
|
||||
if not attribute_definitions:
|
||||
return keys
|
||||
|
||||
for attribute_def in attribute_definitions:
|
||||
if not isinstance(attribute_def, UIDef):
|
||||
keys.add(attribute_def.key)
|
||||
return keys
|
||||
|
||||
|
||||
def get_default_values(
|
||||
attribute_definitions: List[AttrDefType]
|
||||
) -> Dict[str, Any]:
|
||||
"""Receive default values for attribute definitions.
|
||||
|
||||
Args:
|
||||
attribute_definitions (List[AttrDefType]): Attribute definitions
|
||||
for which default values should be collected.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Default values for passed attribute definitions.
|
||||
|
||||
"""
|
||||
output = {}
|
||||
if not attribute_definitions:
|
||||
return output
|
||||
|
||||
for attr_def in attribute_definitions:
|
||||
# Skip UI definitions
|
||||
if not isinstance(attr_def, UIDef):
|
||||
output[attr_def.key] = attr_def.default
|
||||
return output
|
||||
|
||||
|
||||
def serialize_attr_def(attr_def: AttrDefType) -> Dict[str, Any]:
|
||||
"""Serialize attribute definition to data.
|
||||
|
||||
Args:
|
||||
attr_def (AbstractAttrDef): Attribute definition to serialize.
|
||||
attr_def (AttrDefType): Attribute definition to serialize.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Serialized data.
|
||||
"""
|
||||
|
||||
"""
|
||||
return attr_def.serialize()
|
||||
|
||||
|
||||
def serialize_attr_defs(attr_defs):
|
||||
def serialize_attr_defs(
|
||||
attr_defs: List[AttrDefType]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Serialize attribute definitions to data.
|
||||
|
||||
Args:
|
||||
attr_defs (List[AbstractAttrDef]): Attribute definitions to serialize.
|
||||
attr_defs (List[AttrDefType]): Attribute definitions to serialize.
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: Serialized data.
|
||||
"""
|
||||
|
||||
"""
|
||||
return [
|
||||
serialize_attr_def(attr_def)
|
||||
for attr_def in attr_defs
|
||||
]
|
||||
|
||||
|
||||
def deserialize_attr_def(attr_def_data):
|
||||
def deserialize_attr_def(attr_def_data: Dict[str, Any]) -> AttrDefType:
|
||||
"""Deserialize attribute definition from data.
|
||||
|
||||
Args:
|
||||
attr_def_data (Dict[str, Any]): Attribute definition data to
|
||||
deserialize.
|
||||
"""
|
||||
|
||||
"""
|
||||
attr_type = attr_def_data.pop("type")
|
||||
cls = _attr_defs_by_type[attr_type]
|
||||
return cls.deserialize(attr_def_data)
|
||||
|
||||
|
||||
def deserialize_attr_defs(attr_defs_data):
|
||||
def deserialize_attr_defs(
|
||||
attr_defs_data: List[Dict[str, Any]]
|
||||
) -> List[AttrDefType]:
|
||||
"""Deserialize attribute definitions.
|
||||
|
||||
Args:
|
||||
List[Dict[str, Any]]: List of attribute definitions.
|
||||
"""
|
||||
|
||||
"""
|
||||
return [
|
||||
deserialize_attr_def(attr_def_data)
|
||||
for attr_def_data in attr_defs_data
|
||||
|
|
|
|||
|
|
@ -1,7 +1,34 @@
|
|||
from __future__ import annotations
|
||||
import os
|
||||
import re
|
||||
import platform
|
||||
import typing
|
||||
import collections
|
||||
from string import Formatter
|
||||
from typing import Optional
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Union, Literal
|
||||
|
||||
PlatformName = Literal["windows", "linux", "darwin"]
|
||||
EnvValue = Union[str, list[str], dict[str, str], dict[str, list[str]]]
|
||||
|
||||
|
||||
def env_value_to_bool(env_key=None, value=None, default=False):
|
||||
class CycleError(ValueError):
|
||||
"""Raised when a cycle is detected in dynamic env variables compute."""
|
||||
pass
|
||||
|
||||
|
||||
class DynamicKeyClashError(Exception):
|
||||
"""Raised when dynamic key clashes with an existing key."""
|
||||
pass
|
||||
|
||||
|
||||
def env_value_to_bool(
|
||||
env_key: Optional[str] = None,
|
||||
value: Optional[str] = None,
|
||||
default: bool = False,
|
||||
) -> bool:
|
||||
"""Convert environment variable value to boolean.
|
||||
|
||||
Function is based on value of the environemt variable. Value is lowered
|
||||
|
|
@ -11,6 +38,7 @@ def env_value_to_bool(env_key=None, value=None, default=False):
|
|||
bool: If value match to one of ["true", "yes", "1"] result if True
|
||||
but if value match to ["false", "no", "0"] result is False else
|
||||
default value is returned.
|
||||
|
||||
"""
|
||||
if value is None and env_key is None:
|
||||
return default
|
||||
|
|
@ -27,18 +55,23 @@ def env_value_to_bool(env_key=None, value=None, default=False):
|
|||
return default
|
||||
|
||||
|
||||
def get_paths_from_environ(env_key=None, env_value=None, return_first=False):
|
||||
def get_paths_from_environ(
|
||||
env_key: Optional[str] = None,
|
||||
env_value: Optional[str] = None,
|
||||
return_first: bool = False,
|
||||
) -> Optional[Union[str, list[str]]]:
|
||||
"""Return existing paths from specific environment variable.
|
||||
|
||||
Args:
|
||||
env_key (str): Environment key where should look for paths.
|
||||
env_value (str): Value of environment variable. Argument `env_key` is
|
||||
skipped if this argument is entered.
|
||||
env_key (Optional[str]): Environment key where should look for paths.
|
||||
env_value (Optional[str]): Value of environment variable.
|
||||
Argument `env_key` is skipped if this argument is entered.
|
||||
return_first (bool): Return first found value or return list of found
|
||||
paths. `None` or empty list returned if nothing found.
|
||||
|
||||
Returns:
|
||||
str, list, None: Result of found path/s.
|
||||
Optional[Union[str, list[str]]]: Result of found path/s.
|
||||
|
||||
"""
|
||||
existing_paths = []
|
||||
if not env_key and not env_value:
|
||||
|
|
@ -69,3 +102,225 @@ def get_paths_from_environ(env_key=None, env_value=None, return_first=False):
|
|||
return None
|
||||
# Return all existing paths from environment variable
|
||||
return existing_paths
|
||||
|
||||
|
||||
def parse_env_variables_structure(
|
||||
env: dict[str, EnvValue],
|
||||
platform_name: Optional[PlatformName] = None
|
||||
) -> dict[str, str]:
|
||||
"""Parse environment for platform-specific values and paths as lists.
|
||||
|
||||
Args:
|
||||
env (dict): The source environment to read.
|
||||
platform_name (Optional[PlatformName]): Name of platform to parse for.
|
||||
Defaults to current platform.
|
||||
|
||||
Returns:
|
||||
dict: The flattened environment for a platform.
|
||||
|
||||
"""
|
||||
if platform_name is None:
|
||||
platform_name = platform.system().lower()
|
||||
|
||||
# Separator based on OS 'os.pathsep' is ';' on Windows and ':' on Unix
|
||||
sep = ";" if platform_name == "windows" else ":"
|
||||
|
||||
result = {}
|
||||
for variable, value in env.items():
|
||||
# Platform specific values
|
||||
if isinstance(value, dict):
|
||||
value = value.get(platform_name)
|
||||
|
||||
# Allow to have lists as values in the tool data
|
||||
if isinstance(value, (list, tuple)):
|
||||
value = sep.join(value)
|
||||
|
||||
if not value:
|
||||
continue
|
||||
|
||||
if not isinstance(value, str):
|
||||
raise TypeError(f"Expected 'str' got '{type(value)}'")
|
||||
|
||||
result[variable] = value
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _topological_sort(
|
||||
dependencies: dict[str, set[str]]
|
||||
) -> tuple[list[str], list[str]]:
|
||||
"""Sort values subject to dependency constraints.
|
||||
|
||||
Args:
|
||||
dependencies (dict[str, set[str]): Mapping of environment variable
|
||||
keys to a set of keys they depend on.
|
||||
|
||||
Returns:
|
||||
tuple[list[str], list[str]]: A tuple of two lists. The first list
|
||||
contains the ordered keys in which order should be environment
|
||||
keys filled, the second list contains the keys that would cause
|
||||
cyclic fill of values.
|
||||
|
||||
"""
|
||||
num_heads = collections.defaultdict(int) # num arrows pointing in
|
||||
tails = collections.defaultdict(list) # list of arrows going out
|
||||
heads = [] # unique list of heads in order first seen
|
||||
for head, tail_values in dependencies.items():
|
||||
for tail_value in tail_values:
|
||||
num_heads[tail_value] += 1
|
||||
if head not in tails:
|
||||
heads.append(head)
|
||||
tails[head].append(tail_value)
|
||||
|
||||
ordered = [head for head in heads if head not in num_heads]
|
||||
for head in ordered:
|
||||
for tail in tails[head]:
|
||||
num_heads[tail] -= 1
|
||||
if not num_heads[tail]:
|
||||
ordered.append(tail)
|
||||
cyclic = [tail for tail, heads in num_heads.items() if heads]
|
||||
return ordered, cyclic
|
||||
|
||||
|
||||
class _PartialFormatDict(dict):
|
||||
"""This supports partial formatting.
|
||||
|
||||
Missing keys are replaced with the return value of __missing__.
|
||||
|
||||
"""
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self._missing_template: str = "{{{key}}}"
|
||||
|
||||
def set_missing_template(self, template: str):
|
||||
self._missing_template = template
|
||||
|
||||
def __missing__(self, key: str) -> str:
|
||||
return self._missing_template.format(key=key)
|
||||
|
||||
|
||||
def _partial_format(
|
||||
value: str,
|
||||
data: dict[str, str],
|
||||
missing_template: Optional[str] = None,
|
||||
) -> str:
|
||||
"""Return string `s` formatted by `data` allowing a partial format
|
||||
|
||||
Arguments:
|
||||
value (str): The string that will be formatted
|
||||
data (dict): The dictionary used to format with.
|
||||
missing_template (Optional[str]): The template to use when a key is
|
||||
missing from the data. If `None`, the key will remain unformatted.
|
||||
|
||||
Example:
|
||||
>>> _partial_format("{d} {a} {b} {c} {d}", {'b': "and", 'd': "left"})
|
||||
'left {a} and {c} left'
|
||||
|
||||
"""
|
||||
|
||||
mapping = _PartialFormatDict(**data)
|
||||
if missing_template is not None:
|
||||
mapping.set_missing_template(missing_template)
|
||||
|
||||
formatter = Formatter()
|
||||
try:
|
||||
output = formatter.vformat(value, (), mapping)
|
||||
except Exception:
|
||||
r_token = re.compile(r"({.*?})")
|
||||
output = value
|
||||
for match in re.findall(r_token, value):
|
||||
try:
|
||||
output = re.sub(match, match.format(**data), output)
|
||||
except (KeyError, ValueError, IndexError):
|
||||
continue
|
||||
return output
|
||||
|
||||
|
||||
def compute_env_variables_structure(
|
||||
env: dict[str, str],
|
||||
fill_dynamic_keys: bool = True,
|
||||
) -> dict[str, str]:
|
||||
"""Compute the result from recursive dynamic environment.
|
||||
|
||||
Note: Keys that are not present in the data will remain unformatted as the
|
||||
original keys. So they can be formatted against the current user
|
||||
environment when merging. So {"A": "{key}"} will remain {key} if not
|
||||
present in the dynamic environment.
|
||||
|
||||
"""
|
||||
env = env.copy()
|
||||
|
||||
# Collect dependencies
|
||||
dependencies = collections.defaultdict(set)
|
||||
for key, value in env.items():
|
||||
dependent_keys = re.findall("{(.+?)}", value)
|
||||
for dependent_key in dependent_keys:
|
||||
# Ignore reference to itself or key is not in env
|
||||
if dependent_key != key and dependent_key in env:
|
||||
dependencies[key].add(dependent_key)
|
||||
|
||||
ordered, cyclic = _topological_sort(dependencies)
|
||||
|
||||
# Check cycle
|
||||
if cyclic:
|
||||
raise CycleError(f"A cycle is detected on: {cyclic}")
|
||||
|
||||
# Format dynamic values
|
||||
for key in reversed(ordered):
|
||||
if key in env:
|
||||
if not isinstance(env[key], str):
|
||||
continue
|
||||
data = env.copy()
|
||||
data.pop(key) # format without itself
|
||||
env[key] = _partial_format(env[key], data=data)
|
||||
|
||||
# Format dynamic keys
|
||||
if fill_dynamic_keys:
|
||||
formatted = {}
|
||||
for key, value in env.items():
|
||||
if not isinstance(value, str):
|
||||
formatted[key] = value
|
||||
continue
|
||||
|
||||
new_key = _partial_format(key, data=env)
|
||||
if new_key in formatted:
|
||||
raise DynamicKeyClashError(
|
||||
f"Key clashes on: {new_key} (source: {key})"
|
||||
)
|
||||
|
||||
formatted[new_key] = value
|
||||
env = formatted
|
||||
|
||||
return env
|
||||
|
||||
|
||||
def merge_env_variables(
|
||||
src_env: dict[str, str],
|
||||
dst_env: dict[str, str],
|
||||
missing_template: Optional[str] = None,
|
||||
) -> dict[str, str]:
|
||||
"""Merge the tools environment with the 'current_env'.
|
||||
|
||||
This finalizes the join with a current environment by formatting the
|
||||
remainder of dynamic variables with that from the current environment.
|
||||
|
||||
Remaining missing variables result in an empty value.
|
||||
|
||||
Args:
|
||||
src_env (dict): The dynamic environment
|
||||
dst_env (dict): The target environment variables mapping to merge
|
||||
the dynamic environment into.
|
||||
missing_template (str): Argument passed to '_partial_format' during
|
||||
merging. `None` should keep missing keys unchanged.
|
||||
|
||||
Returns:
|
||||
dict[str, str]: The resulting environment after the merge.
|
||||
|
||||
"""
|
||||
result = dst_env.copy()
|
||||
for key, value in src_env.items():
|
||||
result[key] = _partial_format(
|
||||
str(value), dst_env, missing_template
|
||||
)
|
||||
|
||||
return result
|
||||
|
|
|
|||
|
|
@ -108,21 +108,29 @@ def run_subprocess(*args, **kwargs):
|
|||
| getattr(subprocess, "CREATE_NO_WINDOW", 0)
|
||||
)
|
||||
|
||||
# Escape parentheses for bash
|
||||
# Escape special characters in certain shells
|
||||
if (
|
||||
kwargs.get("shell") is True
|
||||
and len(args) == 1
|
||||
and isinstance(args[0], str)
|
||||
and os.getenv("SHELL") in ("/bin/bash", "/bin/sh")
|
||||
):
|
||||
new_arg = (
|
||||
args[0]
|
||||
.replace("(", "\\(")
|
||||
.replace(")", "\\)")
|
||||
)
|
||||
args = (new_arg, )
|
||||
# Escape parentheses for bash
|
||||
if os.getenv("SHELL") in ("/bin/bash", "/bin/sh"):
|
||||
new_arg = (
|
||||
args[0]
|
||||
.replace("(", "\\(")
|
||||
.replace(")", "\\)")
|
||||
)
|
||||
args = (new_arg,)
|
||||
# Escape & on Windows in shell with `cmd.exe` using ^&
|
||||
elif (
|
||||
platform.system().lower() == "windows"
|
||||
and os.getenv("COMSPEC").endswith("cmd.exe")
|
||||
):
|
||||
new_arg = args[0].replace("&", "^&")
|
||||
args = (new_arg, )
|
||||
|
||||
# Get environents from kwarg or use current process environments if were
|
||||
# Get environments from kwarg or use current process environments if were
|
||||
# not passed.
|
||||
env = kwargs.get("env") or os.environ
|
||||
# Make sure environment contains only strings
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from datetime import datetime
|
|||
from abc import ABC, abstractmethod
|
||||
from functools import lru_cache
|
||||
|
||||
import appdirs
|
||||
import platformdirs
|
||||
import ayon_api
|
||||
|
||||
_PLACEHOLDER = object()
|
||||
|
|
@ -17,7 +17,7 @@ _PLACEHOLDER = object()
|
|||
|
||||
def _get_ayon_appdirs(*args):
|
||||
return os.path.join(
|
||||
appdirs.user_data_dir("AYON", "Ynput"),
|
||||
platformdirs.user_data_dir("AYON", "Ynput"),
|
||||
*args
|
||||
)
|
||||
|
||||
|
|
@ -276,12 +276,7 @@ class ASettingRegistry(ABC):
|
|||
@abstractmethod
|
||||
def _delete_item(self, name):
|
||||
# type: (str) -> None
|
||||
"""Delete item from settings.
|
||||
|
||||
Note:
|
||||
see :meth:`ayon_core.lib.user_settings.ARegistrySettings.delete_item`
|
||||
|
||||
"""
|
||||
"""Delete item from settings."""
|
||||
pass
|
||||
|
||||
def __delitem__(self, name):
|
||||
|
|
@ -433,12 +428,7 @@ class IniSettingRegistry(ASettingRegistry):
|
|||
config.write(cfg)
|
||||
|
||||
def _delete_item(self, name):
|
||||
"""Delete item from default section.
|
||||
|
||||
Note:
|
||||
See :meth:`~ayon_core.lib.IniSettingsRegistry.delete_item_from_section`
|
||||
|
||||
"""
|
||||
"""Delete item from default section."""
|
||||
self.delete_item_from_section("MAIN", name)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,15 @@
|
|||
import os
|
||||
import re
|
||||
import copy
|
||||
import numbers
|
||||
import warnings
|
||||
from string import Formatter
|
||||
import typing
|
||||
from typing import List, Dict, Any, Set
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Union
|
||||
|
||||
KEY_PATTERN = re.compile(r"(\{.*?[^{0]*\})")
|
||||
KEY_PADDING_PATTERN = re.compile(r"([^:]+)\S+[><]\S+")
|
||||
SUB_DICT_PATTERN = re.compile(r"([^\[\]]+)")
|
||||
OPTIONAL_PATTERN = re.compile(r"(<.*?[^{0]*>)[^0-9]*?")
|
||||
|
||||
|
|
@ -18,9 +24,7 @@ class TemplateUnsolved(Exception):
|
|||
def __init__(self, template, missing_keys, invalid_types):
|
||||
invalid_type_items = []
|
||||
for _key, _type in invalid_types.items():
|
||||
invalid_type_items.append(
|
||||
"\"{0}\" {1}".format(_key, str(_type))
|
||||
)
|
||||
invalid_type_items.append(f"\"{_key}\" {str(_type)}")
|
||||
|
||||
invalid_types_msg = ""
|
||||
if invalid_type_items:
|
||||
|
|
@ -33,31 +37,32 @@ class TemplateUnsolved(Exception):
|
|||
missing_keys_msg = self.missing_keys_msg.format(
|
||||
", ".join(missing_keys)
|
||||
)
|
||||
super(TemplateUnsolved, self).__init__(
|
||||
super().__init__(
|
||||
self.msg.format(template, missing_keys_msg, invalid_types_msg)
|
||||
)
|
||||
|
||||
|
||||
class StringTemplate:
|
||||
"""String that can be formatted."""
|
||||
def __init__(self, template):
|
||||
def __init__(self, template: str):
|
||||
if not isinstance(template, str):
|
||||
raise TypeError("<{}> argument must be a string, not {}.".format(
|
||||
self.__class__.__name__, str(type(template))
|
||||
))
|
||||
raise TypeError(
|
||||
f"<{self.__class__.__name__}> argument must be a string,"
|
||||
f" not {str(type(template))}."
|
||||
)
|
||||
|
||||
self._template = template
|
||||
self._template: str = template
|
||||
parts = []
|
||||
last_end_idx = 0
|
||||
for item in KEY_PATTERN.finditer(template):
|
||||
start, end = item.span()
|
||||
if start > last_end_idx:
|
||||
parts.append(template[last_end_idx:start])
|
||||
parts.append(FormattingPart(template[start:end]))
|
||||
last_end_idx = end
|
||||
formatter = Formatter()
|
||||
|
||||
if last_end_idx < len(template):
|
||||
parts.append(template[last_end_idx:len(template)])
|
||||
for item in formatter.parse(template):
|
||||
literal_text, field_name, format_spec, conversion = item
|
||||
if literal_text:
|
||||
parts.append(literal_text)
|
||||
if field_name:
|
||||
parts.append(
|
||||
FormattingPart(field_name, format_spec, conversion)
|
||||
)
|
||||
|
||||
new_parts = []
|
||||
for part in parts:
|
||||
|
|
@ -77,15 +82,17 @@ class StringTemplate:
|
|||
if substr:
|
||||
new_parts.append(substr)
|
||||
|
||||
self._parts = self.find_optional_parts(new_parts)
|
||||
self._parts: List["Union[str, OptionalPart, FormattingPart]"] = (
|
||||
self.find_optional_parts(new_parts)
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
return self.template
|
||||
|
||||
def __repr__(self):
|
||||
return "<{}> {}".format(self.__class__.__name__, self.template)
|
||||
def __repr__(self) -> str:
|
||||
return f"<{self.__class__.__name__}> {self.template}"
|
||||
|
||||
def __contains__(self, other):
|
||||
def __contains__(self, other: str) -> bool:
|
||||
return other in self.template
|
||||
|
||||
def replace(self, *args, **kwargs):
|
||||
|
|
@ -93,10 +100,10 @@ class StringTemplate:
|
|||
return self
|
||||
|
||||
@property
|
||||
def template(self):
|
||||
def template(self) -> str:
|
||||
return self._template
|
||||
|
||||
def format(self, data):
|
||||
def format(self, data: Dict[str, Any]) -> "TemplateResult":
|
||||
""" Figure out with whole formatting.
|
||||
|
||||
Separate advanced keys (*Like '{project[name]}') from string which must
|
||||
|
|
@ -108,6 +115,7 @@ class StringTemplate:
|
|||
Returns:
|
||||
TemplateResult: Filled or partially filled template containing all
|
||||
data needed or missing for filling template.
|
||||
|
||||
"""
|
||||
result = TemplatePartResult()
|
||||
for part in self._parts:
|
||||
|
|
@ -135,23 +143,29 @@ class StringTemplate:
|
|||
invalid_types
|
||||
)
|
||||
|
||||
def format_strict(self, *args, **kwargs):
|
||||
result = self.format(*args, **kwargs)
|
||||
def format_strict(self, data: Dict[str, Any]) -> "TemplateResult":
|
||||
result = self.format(data)
|
||||
result.validate()
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def format_template(cls, template, data):
|
||||
def format_template(
|
||||
cls, template: str, data: Dict[str, Any]
|
||||
) -> "TemplateResult":
|
||||
objected_template = cls(template)
|
||||
return objected_template.format(data)
|
||||
|
||||
@classmethod
|
||||
def format_strict_template(cls, template, data):
|
||||
def format_strict_template(
|
||||
cls, template: str, data: Dict[str, Any]
|
||||
) -> "TemplateResult":
|
||||
objected_template = cls(template)
|
||||
return objected_template.format_strict(data)
|
||||
|
||||
@staticmethod
|
||||
def find_optional_parts(parts):
|
||||
def find_optional_parts(
|
||||
parts: List["Union[str, FormattingPart]"]
|
||||
) -> List["Union[str, OptionalPart, FormattingPart]"]:
|
||||
new_parts = []
|
||||
tmp_parts = {}
|
||||
counted_symb = -1
|
||||
|
|
@ -216,11 +230,11 @@ class TemplateResult(str):
|
|||
of number.
|
||||
"""
|
||||
|
||||
used_values = None
|
||||
solved = None
|
||||
template = None
|
||||
missing_keys = None
|
||||
invalid_types = None
|
||||
used_values: Dict[str, Any] = None
|
||||
solved: bool = None
|
||||
template: str = None
|
||||
missing_keys: List[str] = None
|
||||
invalid_types: Dict[str, Any] = None
|
||||
|
||||
def __new__(
|
||||
cls, filled_template, template, solved,
|
||||
|
|
@ -248,7 +262,7 @@ class TemplateResult(str):
|
|||
self.invalid_types
|
||||
)
|
||||
|
||||
def copy(self):
|
||||
def copy(self) -> "TemplateResult":
|
||||
cls = self.__class__
|
||||
return cls(
|
||||
str(self),
|
||||
|
|
@ -259,7 +273,7 @@ class TemplateResult(str):
|
|||
self.invalid_types
|
||||
)
|
||||
|
||||
def normalized(self):
|
||||
def normalized(self) -> "TemplateResult":
|
||||
"""Convert to normalized path."""
|
||||
|
||||
cls = self.__class__
|
||||
|
|
@ -275,27 +289,28 @@ class TemplateResult(str):
|
|||
|
||||
class TemplatePartResult:
|
||||
"""Result to store result of template parts."""
|
||||
def __init__(self, optional=False):
|
||||
def __init__(self, optional: bool = False):
|
||||
# Missing keys or invalid value types of required keys
|
||||
self._missing_keys = set()
|
||||
self._invalid_types = {}
|
||||
self._missing_keys: Set[str] = set()
|
||||
self._invalid_types: Dict[str, Any] = {}
|
||||
# Missing keys or invalid value types of optional keys
|
||||
self._missing_optional_keys = set()
|
||||
self._invalid_optional_types = {}
|
||||
self._missing_optional_keys: Set[str] = set()
|
||||
self._invalid_optional_types: Dict[str, Any] = {}
|
||||
|
||||
# Used values stored by key with origin type
|
||||
# - key without any padding or key modifiers
|
||||
# - value from filling data
|
||||
# Example: {"version": 1}
|
||||
self._used_values = {}
|
||||
self._used_values: Dict[str, Any] = {}
|
||||
# Used values stored by key with all modifirs
|
||||
# - value is already formatted string
|
||||
# Example: {"version:0>3": "001"}
|
||||
self._realy_used_values = {}
|
||||
self._really_used_values: Dict[str, Any] = {}
|
||||
# Concatenated string output after formatting
|
||||
self._output = ""
|
||||
self._output: str = ""
|
||||
# Is this result from optional part
|
||||
self._optional = True
|
||||
# TODO find out why we don't use 'optional' from args
|
||||
self._optional: bool = True
|
||||
|
||||
def add_output(self, other):
|
||||
if isinstance(other, str):
|
||||
|
|
@ -313,7 +328,7 @@ class TemplatePartResult:
|
|||
if other.optional and not other.solved:
|
||||
return
|
||||
self._used_values.update(other.used_values)
|
||||
self._realy_used_values.update(other.realy_used_values)
|
||||
self._really_used_values.update(other.really_used_values)
|
||||
|
||||
else:
|
||||
raise TypeError("Cannot add data from \"{}\" to \"{}\"".format(
|
||||
|
|
@ -321,7 +336,7 @@ class TemplatePartResult:
|
|||
)
|
||||
|
||||
@property
|
||||
def solved(self):
|
||||
def solved(self) -> bool:
|
||||
if self.optional:
|
||||
if (
|
||||
len(self.missing_optional_keys) > 0
|
||||
|
|
@ -334,45 +349,53 @@ class TemplatePartResult:
|
|||
)
|
||||
|
||||
@property
|
||||
def optional(self):
|
||||
def optional(self) -> bool:
|
||||
return self._optional
|
||||
|
||||
@property
|
||||
def output(self):
|
||||
def output(self) -> str:
|
||||
return self._output
|
||||
|
||||
@property
|
||||
def missing_keys(self):
|
||||
def missing_keys(self) -> Set[str]:
|
||||
return self._missing_keys
|
||||
|
||||
@property
|
||||
def missing_optional_keys(self):
|
||||
def missing_optional_keys(self) -> Set[str]:
|
||||
return self._missing_optional_keys
|
||||
|
||||
@property
|
||||
def invalid_types(self):
|
||||
def invalid_types(self) -> Dict[str, Any]:
|
||||
return self._invalid_types
|
||||
|
||||
@property
|
||||
def invalid_optional_types(self):
|
||||
def invalid_optional_types(self) -> Dict[str, Any]:
|
||||
return self._invalid_optional_types
|
||||
|
||||
@property
|
||||
def realy_used_values(self):
|
||||
return self._realy_used_values
|
||||
def really_used_values(self) -> Dict[str, Any]:
|
||||
return self._really_used_values
|
||||
|
||||
@property
|
||||
def used_values(self):
|
||||
def realy_used_values(self) -> Dict[str, Any]:
|
||||
warnings.warn(
|
||||
"Property 'realy_used_values' is deprecated."
|
||||
" Use 'really_used_values' instead.",
|
||||
DeprecationWarning
|
||||
)
|
||||
return self._really_used_values
|
||||
|
||||
@property
|
||||
def used_values(self) -> Dict[str, Any]:
|
||||
return self._used_values
|
||||
|
||||
@staticmethod
|
||||
def split_keys_to_subdicts(values):
|
||||
def split_keys_to_subdicts(values: Dict[str, Any]) -> Dict[str, Any]:
|
||||
output = {}
|
||||
formatter = Formatter()
|
||||
for key, value in values.items():
|
||||
key_padding = list(KEY_PADDING_PATTERN.findall(key))
|
||||
if key_padding:
|
||||
key = key_padding[0]
|
||||
key_subdict = list(SUB_DICT_PATTERN.findall(key))
|
||||
_, field_name, _, _ = next(formatter.parse(f"{{{key}}}"))
|
||||
key_subdict = list(SUB_DICT_PATTERN.findall(field_name))
|
||||
data = output
|
||||
last_key = key_subdict.pop(-1)
|
||||
for subkey in key_subdict:
|
||||
|
|
@ -382,7 +405,7 @@ class TemplatePartResult:
|
|||
data[last_key] = value
|
||||
return output
|
||||
|
||||
def get_clean_used_values(self):
|
||||
def get_clean_used_values(self) -> Dict[str, Any]:
|
||||
new_used_values = {}
|
||||
for key, value in self.used_values.items():
|
||||
if isinstance(value, FormatObject):
|
||||
|
|
@ -391,19 +414,27 @@ class TemplatePartResult:
|
|||
|
||||
return self.split_keys_to_subdicts(new_used_values)
|
||||
|
||||
def add_realy_used_value(self, key, value):
|
||||
self._realy_used_values[key] = value
|
||||
def add_really_used_value(self, key: str, value: Any):
|
||||
self._really_used_values[key] = value
|
||||
|
||||
def add_used_value(self, key, value):
|
||||
def add_realy_used_value(self, key: str, value: Any):
|
||||
warnings.warn(
|
||||
"Method 'add_realy_used_value' is deprecated."
|
||||
" Use 'add_really_used_value' instead.",
|
||||
DeprecationWarning
|
||||
)
|
||||
self.add_really_used_value(key, value)
|
||||
|
||||
def add_used_value(self, key: str, value: Any):
|
||||
self._used_values[key] = value
|
||||
|
||||
def add_missing_key(self, key):
|
||||
def add_missing_key(self, key: str):
|
||||
if self._optional:
|
||||
self._missing_optional_keys.add(key)
|
||||
else:
|
||||
self._missing_keys.add(key)
|
||||
|
||||
def add_invalid_type(self, key, value):
|
||||
def add_invalid_type(self, key: str, value: Any):
|
||||
if self._optional:
|
||||
self._invalid_optional_types[key] = type(value)
|
||||
else:
|
||||
|
|
@ -421,10 +452,10 @@ class FormatObject:
|
|||
def __format__(self, *args, **kwargs):
|
||||
return self.value.__format__(*args, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
return str(self.value)
|
||||
|
||||
def __repr__(self):
|
||||
def __repr__(self) -> str:
|
||||
return self.__str__()
|
||||
|
||||
|
||||
|
|
@ -434,23 +465,44 @@ class FormattingPart:
|
|||
Containt only single key to format e.g. "{project[name]}".
|
||||
|
||||
Args:
|
||||
template(str): String containing the formatting key.
|
||||
field_name (str): Name of key.
|
||||
format_spec (str): Format specification.
|
||||
conversion (Union[str, None]): Conversion type.
|
||||
|
||||
"""
|
||||
def __init__(self, template):
|
||||
self._template = template
|
||||
def __init__(
|
||||
self,
|
||||
field_name: str,
|
||||
format_spec: str,
|
||||
conversion: "Union[str, None]",
|
||||
):
|
||||
format_spec_v = ""
|
||||
if format_spec:
|
||||
format_spec_v = f":{format_spec}"
|
||||
conversion_v = ""
|
||||
if conversion:
|
||||
conversion_v = f"!{conversion}"
|
||||
|
||||
self._field_name: str = field_name
|
||||
self._format_spec: str = format_spec_v
|
||||
self._conversion: str = conversion_v
|
||||
|
||||
template_base = f"{field_name}{format_spec_v}{conversion_v}"
|
||||
self._template_base: str = template_base
|
||||
self._template: str = f"{{{template_base}}}"
|
||||
|
||||
@property
|
||||
def template(self):
|
||||
def template(self) -> str:
|
||||
return self._template
|
||||
|
||||
def __repr__(self):
|
||||
def __repr__(self) -> str:
|
||||
return "<Format:{}>".format(self._template)
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
return self._template
|
||||
|
||||
@staticmethod
|
||||
def validate_value_type(value):
|
||||
def validate_value_type(value: Any) -> bool:
|
||||
"""Check if value can be used for formatting of single key."""
|
||||
if isinstance(value, (numbers.Number, FormatObject)):
|
||||
return True
|
||||
|
|
@ -461,7 +513,7 @@ class FormattingPart:
|
|||
return False
|
||||
|
||||
@staticmethod
|
||||
def validate_key_is_matched(key):
|
||||
def validate_key_is_matched(key: str) -> bool:
|
||||
"""Validate that opening has closing at correct place.
|
||||
Future-proof, only square brackets are currently used in keys.
|
||||
|
||||
|
|
@ -488,17 +540,27 @@ class FormattingPart:
|
|||
return False
|
||||
return not queue
|
||||
|
||||
def format(self, data, result):
|
||||
@staticmethod
|
||||
def keys_to_template_base(keys: List[str]):
|
||||
if not keys:
|
||||
return None
|
||||
# Create copy of keys
|
||||
keys = list(keys)
|
||||
template_base = keys.pop(0)
|
||||
joined_keys = "".join([f"[{key}]" for key in keys])
|
||||
return f"{template_base}{joined_keys}"
|
||||
|
||||
def format(
|
||||
self, data: Dict[str, Any], result: TemplatePartResult
|
||||
) -> TemplatePartResult:
|
||||
"""Format the formattings string.
|
||||
|
||||
Args:
|
||||
data(dict): Data that should be used for formatting.
|
||||
result(TemplatePartResult): Object where result is stored.
|
||||
|
||||
"""
|
||||
key = self.template[1:-1]
|
||||
if key in result.realy_used_values:
|
||||
result.add_output(result.realy_used_values[key])
|
||||
return result
|
||||
key = self._template_base
|
||||
|
||||
# ensure key is properly formed [({})] properly closed.
|
||||
if not self.validate_key_is_matched(key):
|
||||
|
|
@ -507,17 +569,38 @@ class FormattingPart:
|
|||
return result
|
||||
|
||||
# check if key expects subdictionary keys (e.g. project[name])
|
||||
existence_check = key
|
||||
key_padding = list(KEY_PADDING_PATTERN.findall(existence_check))
|
||||
if key_padding:
|
||||
existence_check = key_padding[0]
|
||||
key_subdict = list(SUB_DICT_PATTERN.findall(existence_check))
|
||||
key_subdict = list(SUB_DICT_PATTERN.findall(self._field_name))
|
||||
|
||||
value = data
|
||||
missing_key = False
|
||||
invalid_type = False
|
||||
used_keys = []
|
||||
keys_to_value = None
|
||||
used_value = None
|
||||
|
||||
for sub_key in key_subdict:
|
||||
if isinstance(value, list):
|
||||
if not sub_key.lstrip("-").isdigit():
|
||||
invalid_type = True
|
||||
break
|
||||
sub_key = int(sub_key)
|
||||
if sub_key < 0:
|
||||
sub_key = len(value) + sub_key
|
||||
|
||||
valid = 0 <= sub_key < len(value)
|
||||
if not valid:
|
||||
used_keys.append(sub_key)
|
||||
missing_key = True
|
||||
break
|
||||
|
||||
used_keys.append(sub_key)
|
||||
if keys_to_value is None:
|
||||
keys_to_value = list(used_keys)
|
||||
keys_to_value.pop(-1)
|
||||
used_value = copy.deepcopy(value)
|
||||
value = value[sub_key]
|
||||
continue
|
||||
|
||||
if (
|
||||
value is None
|
||||
or (hasattr(value, "items") and sub_key not in value)
|
||||
|
|
@ -533,45 +616,57 @@ class FormattingPart:
|
|||
used_keys.append(sub_key)
|
||||
value = value.get(sub_key)
|
||||
|
||||
if missing_key or invalid_type:
|
||||
if len(used_keys) == 0:
|
||||
invalid_key = key_subdict[0]
|
||||
else:
|
||||
invalid_key = used_keys[0]
|
||||
for idx, sub_key in enumerate(used_keys):
|
||||
if idx == 0:
|
||||
continue
|
||||
invalid_key += "[{0}]".format(sub_key)
|
||||
field_name = key_subdict[0]
|
||||
if used_keys:
|
||||
field_name = self.keys_to_template_base(used_keys)
|
||||
|
||||
if missing_key or invalid_type:
|
||||
if missing_key:
|
||||
result.add_missing_key(invalid_key)
|
||||
result.add_missing_key(field_name)
|
||||
|
||||
elif invalid_type:
|
||||
result.add_invalid_type(invalid_key, value)
|
||||
result.add_invalid_type(field_name, value)
|
||||
|
||||
result.add_output(self.template)
|
||||
return result
|
||||
|
||||
if self.validate_value_type(value):
|
||||
fill_data = {}
|
||||
first_value = True
|
||||
for used_key in reversed(used_keys):
|
||||
if first_value:
|
||||
first_value = False
|
||||
fill_data[used_key] = value
|
||||
else:
|
||||
_fill_data = {used_key: fill_data}
|
||||
fill_data = _fill_data
|
||||
|
||||
formatted_value = self.template.format(**fill_data)
|
||||
result.add_realy_used_value(key, formatted_value)
|
||||
result.add_used_value(existence_check, formatted_value)
|
||||
result.add_output(formatted_value)
|
||||
if not self.validate_value_type(value):
|
||||
result.add_invalid_type(key, value)
|
||||
result.add_output(self.template)
|
||||
return result
|
||||
|
||||
result.add_invalid_type(key, value)
|
||||
result.add_output(self.template)
|
||||
fill_data = root_fill_data = {}
|
||||
parent_fill_data = None
|
||||
parent_key = None
|
||||
fill_value = data
|
||||
value_filled = False
|
||||
for used_key in used_keys:
|
||||
if isinstance(fill_value, list):
|
||||
parent_fill_data[parent_key] = fill_value
|
||||
value_filled = True
|
||||
break
|
||||
fill_value = fill_value[used_key]
|
||||
parent_fill_data = fill_data
|
||||
fill_data = parent_fill_data.setdefault(used_key, {})
|
||||
parent_key = used_key
|
||||
|
||||
if not value_filled:
|
||||
parent_fill_data[used_keys[-1]] = value
|
||||
|
||||
template = f"{{{field_name}{self._format_spec}{self._conversion}}}"
|
||||
formatted_value = template.format(**root_fill_data)
|
||||
used_key = key
|
||||
if keys_to_value is not None:
|
||||
used_key = self.keys_to_template_base(keys_to_value)
|
||||
|
||||
if used_value is None:
|
||||
if isinstance(value, numbers.Number):
|
||||
used_value = value
|
||||
else:
|
||||
used_value = formatted_value
|
||||
result.add_really_used_value(self._field_name, used_value)
|
||||
result.add_used_value(used_key, used_value)
|
||||
result.add_output(formatted_value)
|
||||
return result
|
||||
|
||||
|
||||
|
|
@ -585,20 +680,27 @@ class OptionalPart:
|
|||
'FormattingPart'.
|
||||
"""
|
||||
|
||||
def __init__(self, parts):
|
||||
self._parts = parts
|
||||
def __init__(
|
||||
self,
|
||||
parts: List["Union[str, OptionalPart, FormattingPart]"]
|
||||
):
|
||||
self._parts: List["Union[str, OptionalPart, FormattingPart]"] = parts
|
||||
|
||||
@property
|
||||
def parts(self):
|
||||
def parts(self) -> List["Union[str, OptionalPart, FormattingPart]"]:
|
||||
return self._parts
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
return "<{}>".format("".join([str(p) for p in self._parts]))
|
||||
|
||||
def __repr__(self):
|
||||
def __repr__(self) -> str:
|
||||
return "<Optional:{}>".format("".join([str(p) for p in self._parts]))
|
||||
|
||||
def format(self, data, result):
|
||||
def format(
|
||||
self,
|
||||
data: Dict[str, Any],
|
||||
result: TemplatePartResult,
|
||||
) -> TemplatePartResult:
|
||||
new_result = TemplatePartResult(True)
|
||||
for part in self._parts:
|
||||
if isinstance(part, str):
|
||||
|
|
|
|||
|
|
@ -1,17 +0,0 @@
|
|||
# Deprecated file
|
||||
# - the file container 'WeakMethod' implementation for Python 2 which is not
|
||||
# needed anymore.
|
||||
import warnings
|
||||
import weakref
|
||||
|
||||
|
||||
WeakMethod = weakref.WeakMethod
|
||||
|
||||
warnings.warn(
|
||||
(
|
||||
"'ayon_core.lib.python_2_comp' is deprecated."
|
||||
"Please use 'weakref.WeakMethod'."
|
||||
),
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
|
|
@ -53,7 +53,7 @@ IMAGE_EXTENSIONS = {
|
|||
".kra", ".logluv", ".mng", ".miff", ".nrrd", ".ora",
|
||||
".pam", ".pbm", ".pgm", ".ppm", ".pnm", ".pcx", ".pgf",
|
||||
".pictor", ".png", ".psd", ".psb", ".psp", ".qtvr",
|
||||
".ras", ".rgbe", ".sgi", ".tga",
|
||||
".ras", ".rgbe", ".sgi", ".sxr", ".tga",
|
||||
".tif", ".tiff", ".tiff/ep", ".tiff/it", ".ufo", ".ufp",
|
||||
".wbmp", ".webp", ".xr", ".xt", ".xbm", ".xcf", ".xpm", ".xwd"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,60 +0,0 @@
|
|||
import os
|
||||
|
||||
from ayon_core import AYON_CORE_ROOT
|
||||
from ayon_core.addon import AYONAddon, ITrayAction
|
||||
|
||||
|
||||
class LauncherAction(AYONAddon, ITrayAction):
|
||||
label = "Launcher"
|
||||
name = "launcher_tool"
|
||||
version = "1.0.0"
|
||||
|
||||
def initialize(self, settings):
|
||||
|
||||
# Tray attributes
|
||||
self._window = None
|
||||
|
||||
def tray_init(self):
|
||||
self._create_window()
|
||||
|
||||
self.add_doubleclick_callback(self._show_launcher)
|
||||
|
||||
def tray_start(self):
|
||||
return
|
||||
|
||||
def connect_with_addons(self, enabled_modules):
|
||||
# Register actions
|
||||
if not self.tray_initialized:
|
||||
return
|
||||
|
||||
from ayon_core.pipeline.actions import register_launcher_action_path
|
||||
|
||||
actions_dir = os.path.join(AYON_CORE_ROOT, "plugins", "actions")
|
||||
if os.path.exists(actions_dir):
|
||||
register_launcher_action_path(actions_dir)
|
||||
|
||||
actions_paths = self.manager.collect_plugin_paths()["actions"]
|
||||
for path in actions_paths:
|
||||
if path and os.path.exists(path):
|
||||
register_launcher_action_path(path)
|
||||
|
||||
def on_action_trigger(self):
|
||||
"""Implementation for ITrayAction interface.
|
||||
|
||||
Show launcher tool on action trigger.
|
||||
"""
|
||||
|
||||
self._show_launcher()
|
||||
|
||||
def _create_window(self):
|
||||
if self._window:
|
||||
return
|
||||
from ayon_core.tools.launcher.ui import LauncherWindow
|
||||
self._window = LauncherWindow()
|
||||
|
||||
def _show_launcher(self):
|
||||
if self._window is None:
|
||||
return
|
||||
self._window.show()
|
||||
self._window.raise_()
|
||||
self._window.activateWindow()
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
from ayon_core.addon import AYONAddon, ITrayAddon
|
||||
|
||||
|
||||
class LoaderAddon(AYONAddon, ITrayAddon):
|
||||
name = "loader_tool"
|
||||
version = "1.0.0"
|
||||
|
||||
def initialize(self, settings):
|
||||
# Tray attributes
|
||||
self._loader_imported = None
|
||||
self._loader_window = None
|
||||
|
||||
def tray_init(self):
|
||||
# Add library tool
|
||||
self._loader_imported = False
|
||||
try:
|
||||
from ayon_core.tools.loader.ui import LoaderWindow # noqa F401
|
||||
|
||||
self._loader_imported = True
|
||||
except Exception:
|
||||
self.log.warning(
|
||||
"Couldn't load Loader tool for tray.",
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
# Definition of Tray menu
|
||||
def tray_menu(self, tray_menu):
|
||||
if not self._loader_imported:
|
||||
return
|
||||
|
||||
from qtpy import QtWidgets
|
||||
# Actions
|
||||
action_loader = QtWidgets.QAction(
|
||||
"Loader", tray_menu
|
||||
)
|
||||
|
||||
action_loader.triggered.connect(self.show_loader)
|
||||
|
||||
tray_menu.addAction(action_loader)
|
||||
|
||||
def tray_start(self, *_a, **_kw):
|
||||
return
|
||||
|
||||
def tray_exit(self, *_a, **_kw):
|
||||
return
|
||||
|
||||
def show_loader(self):
|
||||
if self._loader_window is None:
|
||||
from ayon_core.pipeline import install_ayon_plugins
|
||||
|
||||
self._init_loader()
|
||||
|
||||
install_ayon_plugins()
|
||||
|
||||
self._loader_window.show()
|
||||
|
||||
# Raise and activate the window
|
||||
# for MacOS
|
||||
self._loader_window.raise_()
|
||||
# for Windows
|
||||
self._loader_window.activateWindow()
|
||||
|
||||
def _init_loader(self):
|
||||
from ayon_core.tools.loader.ui import LoaderWindow
|
||||
|
||||
libraryloader = LoaderWindow()
|
||||
|
||||
self._loader_window = libraryloader
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
from .addon import (
|
||||
PythonInterpreterAction
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"PythonInterpreterAction",
|
||||
)
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
from ayon_core.addon import AYONAddon, ITrayAction
|
||||
|
||||
|
||||
class PythonInterpreterAction(AYONAddon, ITrayAction):
|
||||
label = "Console"
|
||||
name = "python_interpreter"
|
||||
version = "1.0.0"
|
||||
admin_action = True
|
||||
|
||||
def initialize(self, settings):
|
||||
self._interpreter_window = None
|
||||
|
||||
def tray_init(self):
|
||||
self.create_interpreter_window()
|
||||
|
||||
def tray_exit(self):
|
||||
if self._interpreter_window is not None:
|
||||
self._interpreter_window.save_registry()
|
||||
|
||||
def create_interpreter_window(self):
|
||||
"""Initializa Settings Qt window."""
|
||||
if self._interpreter_window:
|
||||
return
|
||||
|
||||
from ayon_core.modules.python_console_interpreter.window import (
|
||||
PythonInterpreterWidget
|
||||
)
|
||||
|
||||
self._interpreter_window = PythonInterpreterWidget()
|
||||
|
||||
def on_action_trigger(self):
|
||||
self.show_interpreter_window()
|
||||
|
||||
def show_interpreter_window(self):
|
||||
self.create_interpreter_window()
|
||||
|
||||
if self._interpreter_window.isVisible():
|
||||
self._interpreter_window.activateWindow()
|
||||
self._interpreter_window.raise_()
|
||||
return
|
||||
|
||||
self._interpreter_window.show()
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
from .widgets import (
|
||||
PythonInterpreterWidget
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"PythonInterpreterWidget",
|
||||
)
|
||||
|
|
@ -1,660 +0,0 @@
|
|||
import os
|
||||
import re
|
||||
import sys
|
||||
import collections
|
||||
from code import InteractiveInterpreter
|
||||
|
||||
import appdirs
|
||||
from qtpy import QtCore, QtWidgets, QtGui
|
||||
|
||||
from ayon_core import resources
|
||||
from ayon_core.style import load_stylesheet
|
||||
from ayon_core.lib import JSONSettingRegistry
|
||||
|
||||
|
||||
ayon_art = r"""
|
||||
|
||||
▄██▄
|
||||
▄███▄ ▀██▄ ▀██▀ ▄██▀ ▄██▀▀▀██▄ ▀███▄ █▄
|
||||
▄▄ ▀██▄ ▀██▄ ▄██▀ ██▀ ▀██▄ ▄ ▀██▄ ███
|
||||
▄██▀ ██▄ ▀ ▄▄ ▀ ██ ▄██ ███ ▀██▄ ███
|
||||
▄██▀ ▀██▄ ██ ▀██▄ ▄██▀ ███ ▀██ ▀█▀
|
||||
▄██▀ ▀██▄ ▀█ ▀██▄▄▄▄██▀ █▀ ▀██▄
|
||||
|
||||
· · - =[ by YNPUT ]:[ http://ayon.ynput.io ]= - · ·
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class PythonInterpreterRegistry(JSONSettingRegistry):
|
||||
"""Class handling OpenPype general settings registry.
|
||||
|
||||
Attributes:
|
||||
vendor (str): Name used for path construction.
|
||||
product (str): Additional name used for path construction.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.vendor = "Ynput"
|
||||
self.product = "AYON"
|
||||
name = "python_interpreter_tool"
|
||||
path = appdirs.user_data_dir(self.product, self.vendor)
|
||||
super(PythonInterpreterRegistry, self).__init__(name, path)
|
||||
|
||||
|
||||
class StdOEWrap:
|
||||
def __init__(self):
|
||||
self._origin_stdout_write = None
|
||||
self._origin_stderr_write = None
|
||||
self._listening = False
|
||||
self.lines = collections.deque()
|
||||
|
||||
if not sys.stdout:
|
||||
sys.stdout = open(os.devnull, "w")
|
||||
|
||||
if not sys.stderr:
|
||||
sys.stderr = open(os.devnull, "w")
|
||||
|
||||
if self._origin_stdout_write is None:
|
||||
self._origin_stdout_write = sys.stdout.write
|
||||
|
||||
if self._origin_stderr_write is None:
|
||||
self._origin_stderr_write = sys.stderr.write
|
||||
|
||||
self._listening = True
|
||||
sys.stdout.write = self._stdout_listener
|
||||
sys.stderr.write = self._stderr_listener
|
||||
|
||||
def stop_listen(self):
|
||||
self._listening = False
|
||||
|
||||
def _stdout_listener(self, text):
|
||||
if self._listening:
|
||||
self.lines.append(text)
|
||||
if self._origin_stdout_write is not None:
|
||||
self._origin_stdout_write(text)
|
||||
|
||||
def _stderr_listener(self, text):
|
||||
if self._listening:
|
||||
self.lines.append(text)
|
||||
if self._origin_stderr_write is not None:
|
||||
self._origin_stderr_write(text)
|
||||
|
||||
|
||||
class PythonCodeEditor(QtWidgets.QPlainTextEdit):
|
||||
execute_requested = QtCore.Signal()
|
||||
|
||||
def __init__(self, parent):
|
||||
super(PythonCodeEditor, self).__init__(parent)
|
||||
|
||||
self.setObjectName("PythonCodeEditor")
|
||||
|
||||
self._indent = 4
|
||||
|
||||
def _tab_shift_right(self):
|
||||
cursor = self.textCursor()
|
||||
selected_text = cursor.selectedText()
|
||||
if not selected_text:
|
||||
cursor.insertText(" " * self._indent)
|
||||
return
|
||||
|
||||
sel_start = cursor.selectionStart()
|
||||
sel_end = cursor.selectionEnd()
|
||||
cursor.setPosition(sel_end)
|
||||
end_line = cursor.blockNumber()
|
||||
cursor.setPosition(sel_start)
|
||||
while True:
|
||||
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
|
||||
text = cursor.block().text()
|
||||
spaces = len(text) - len(text.lstrip(" "))
|
||||
new_spaces = spaces % self._indent
|
||||
if not new_spaces:
|
||||
new_spaces = self._indent
|
||||
|
||||
cursor.insertText(" " * new_spaces)
|
||||
if cursor.blockNumber() == end_line:
|
||||
break
|
||||
|
||||
cursor.movePosition(QtGui.QTextCursor.NextBlock)
|
||||
|
||||
def _tab_shift_left(self):
|
||||
tmp_cursor = self.textCursor()
|
||||
sel_start = tmp_cursor.selectionStart()
|
||||
sel_end = tmp_cursor.selectionEnd()
|
||||
|
||||
cursor = QtGui.QTextCursor(self.document())
|
||||
cursor.setPosition(sel_end)
|
||||
end_line = cursor.blockNumber()
|
||||
cursor.setPosition(sel_start)
|
||||
while True:
|
||||
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
|
||||
text = cursor.block().text()
|
||||
spaces = len(text) - len(text.lstrip(" "))
|
||||
if spaces:
|
||||
spaces_to_remove = (spaces % self._indent) or self._indent
|
||||
if spaces_to_remove > spaces:
|
||||
spaces_to_remove = spaces
|
||||
|
||||
cursor.setPosition(
|
||||
cursor.position() + spaces_to_remove,
|
||||
QtGui.QTextCursor.KeepAnchor
|
||||
)
|
||||
cursor.removeSelectedText()
|
||||
|
||||
if cursor.blockNumber() == end_line:
|
||||
break
|
||||
|
||||
cursor.movePosition(QtGui.QTextCursor.NextBlock)
|
||||
|
||||
def keyPressEvent(self, event):
|
||||
if event.key() == QtCore.Qt.Key_Backtab:
|
||||
self._tab_shift_left()
|
||||
event.accept()
|
||||
return
|
||||
|
||||
if event.key() == QtCore.Qt.Key_Tab:
|
||||
if event.modifiers() == QtCore.Qt.NoModifier:
|
||||
self._tab_shift_right()
|
||||
event.accept()
|
||||
return
|
||||
|
||||
if (
|
||||
event.key() == QtCore.Qt.Key_Return
|
||||
and event.modifiers() == QtCore.Qt.ControlModifier
|
||||
):
|
||||
self.execute_requested.emit()
|
||||
event.accept()
|
||||
return
|
||||
|
||||
super(PythonCodeEditor, self).keyPressEvent(event)
|
||||
|
||||
|
||||
class PythonTabWidget(QtWidgets.QWidget):
|
||||
add_tab_requested = QtCore.Signal()
|
||||
before_execute = QtCore.Signal(str)
|
||||
|
||||
def __init__(self, parent):
|
||||
super(PythonTabWidget, self).__init__(parent)
|
||||
|
||||
code_input = PythonCodeEditor(self)
|
||||
|
||||
self.setFocusProxy(code_input)
|
||||
|
||||
add_tab_btn = QtWidgets.QPushButton("Add tab...", self)
|
||||
add_tab_btn.setToolTip("Add new tab")
|
||||
|
||||
execute_btn = QtWidgets.QPushButton("Execute", self)
|
||||
execute_btn.setToolTip("Execute command (Ctrl + Enter)")
|
||||
|
||||
btns_layout = QtWidgets.QHBoxLayout()
|
||||
btns_layout.setContentsMargins(0, 0, 0, 0)
|
||||
btns_layout.addWidget(add_tab_btn)
|
||||
btns_layout.addStretch(1)
|
||||
btns_layout.addWidget(execute_btn)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
layout.addWidget(code_input, 1)
|
||||
layout.addLayout(btns_layout, 0)
|
||||
|
||||
add_tab_btn.clicked.connect(self._on_add_tab_clicked)
|
||||
execute_btn.clicked.connect(self._on_execute_clicked)
|
||||
code_input.execute_requested.connect(self.execute)
|
||||
|
||||
self._code_input = code_input
|
||||
self._interpreter = InteractiveInterpreter()
|
||||
|
||||
def _on_add_tab_clicked(self):
|
||||
self.add_tab_requested.emit()
|
||||
|
||||
def _on_execute_clicked(self):
|
||||
self.execute()
|
||||
|
||||
def get_code(self):
|
||||
return self._code_input.toPlainText()
|
||||
|
||||
def set_code(self, code_text):
|
||||
self._code_input.setPlainText(code_text)
|
||||
|
||||
def execute(self):
|
||||
code_text = self._code_input.toPlainText()
|
||||
self.before_execute.emit(code_text)
|
||||
self._interpreter.runcode(code_text)
|
||||
|
||||
|
||||
class TabNameDialog(QtWidgets.QDialog):
|
||||
default_width = 330
|
||||
default_height = 85
|
||||
|
||||
def __init__(self, parent):
|
||||
super(TabNameDialog, self).__init__(parent)
|
||||
|
||||
self.setWindowTitle("Enter tab name")
|
||||
|
||||
name_label = QtWidgets.QLabel("Tab name:", self)
|
||||
name_input = QtWidgets.QLineEdit(self)
|
||||
|
||||
inputs_layout = QtWidgets.QHBoxLayout()
|
||||
inputs_layout.addWidget(name_label)
|
||||
inputs_layout.addWidget(name_input)
|
||||
|
||||
ok_btn = QtWidgets.QPushButton("Ok", self)
|
||||
cancel_btn = QtWidgets.QPushButton("Cancel", self)
|
||||
btns_layout = QtWidgets.QHBoxLayout()
|
||||
btns_layout.addStretch(1)
|
||||
btns_layout.addWidget(ok_btn)
|
||||
btns_layout.addWidget(cancel_btn)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.addLayout(inputs_layout)
|
||||
layout.addStretch(1)
|
||||
layout.addLayout(btns_layout)
|
||||
|
||||
ok_btn.clicked.connect(self._on_ok_clicked)
|
||||
cancel_btn.clicked.connect(self._on_cancel_clicked)
|
||||
|
||||
self._name_input = name_input
|
||||
self._ok_btn = ok_btn
|
||||
self._cancel_btn = cancel_btn
|
||||
|
||||
self._result = None
|
||||
|
||||
self.resize(self.default_width, self.default_height)
|
||||
|
||||
def set_tab_name(self, name):
|
||||
self._name_input.setText(name)
|
||||
|
||||
def result(self):
|
||||
return self._result
|
||||
|
||||
def showEvent(self, event):
|
||||
super(TabNameDialog, self).showEvent(event)
|
||||
btns_width = max(
|
||||
self._ok_btn.width(),
|
||||
self._cancel_btn.width()
|
||||
)
|
||||
|
||||
self._ok_btn.setMinimumWidth(btns_width)
|
||||
self._cancel_btn.setMinimumWidth(btns_width)
|
||||
|
||||
def _on_ok_clicked(self):
|
||||
self._result = self._name_input.text()
|
||||
self.accept()
|
||||
|
||||
def _on_cancel_clicked(self):
|
||||
self._result = None
|
||||
self.reject()
|
||||
|
||||
|
||||
class OutputTextWidget(QtWidgets.QTextEdit):
|
||||
v_max_offset = 4
|
||||
|
||||
def vertical_scroll_at_max(self):
|
||||
v_scroll = self.verticalScrollBar()
|
||||
return v_scroll.value() > v_scroll.maximum() - self.v_max_offset
|
||||
|
||||
def scroll_to_bottom(self):
|
||||
v_scroll = self.verticalScrollBar()
|
||||
return v_scroll.setValue(v_scroll.maximum())
|
||||
|
||||
|
||||
class EnhancedTabBar(QtWidgets.QTabBar):
|
||||
double_clicked = QtCore.Signal(QtCore.QPoint)
|
||||
right_clicked = QtCore.Signal(QtCore.QPoint)
|
||||
mid_clicked = QtCore.Signal(QtCore.QPoint)
|
||||
|
||||
def __init__(self, parent):
|
||||
super(EnhancedTabBar, self).__init__(parent)
|
||||
|
||||
self.setDrawBase(False)
|
||||
|
||||
def mouseDoubleClickEvent(self, event):
|
||||
self.double_clicked.emit(event.globalPos())
|
||||
event.accept()
|
||||
|
||||
def mouseReleaseEvent(self, event):
|
||||
if event.button() == QtCore.Qt.RightButton:
|
||||
self.right_clicked.emit(event.globalPos())
|
||||
event.accept()
|
||||
return
|
||||
|
||||
elif event.button() == QtCore.Qt.MidButton:
|
||||
self.mid_clicked.emit(event.globalPos())
|
||||
event.accept()
|
||||
|
||||
else:
|
||||
super(EnhancedTabBar, self).mouseReleaseEvent(event)
|
||||
|
||||
|
||||
class PythonInterpreterWidget(QtWidgets.QWidget):
|
||||
default_width = 1000
|
||||
default_height = 600
|
||||
|
||||
def __init__(self, allow_save_registry=True, parent=None):
|
||||
super(PythonInterpreterWidget, self).__init__(parent)
|
||||
|
||||
self.setWindowTitle("AYON Console")
|
||||
self.setWindowIcon(QtGui.QIcon(resources.get_ayon_icon_filepath()))
|
||||
|
||||
self.ansi_escape = re.compile(
|
||||
r"(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]"
|
||||
)
|
||||
|
||||
self._tabs = []
|
||||
|
||||
self._stdout_err_wrapper = StdOEWrap()
|
||||
|
||||
output_widget = OutputTextWidget(self)
|
||||
output_widget.setObjectName("PythonInterpreterOutput")
|
||||
output_widget.setLineWrapMode(QtWidgets.QTextEdit.NoWrap)
|
||||
output_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction)
|
||||
|
||||
tab_widget = QtWidgets.QTabWidget(self)
|
||||
tab_bar = EnhancedTabBar(tab_widget)
|
||||
tab_widget.setTabBar(tab_bar)
|
||||
tab_widget.setTabsClosable(False)
|
||||
tab_widget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
|
||||
|
||||
widgets_splitter = QtWidgets.QSplitter(self)
|
||||
widgets_splitter.setOrientation(QtCore.Qt.Vertical)
|
||||
widgets_splitter.addWidget(output_widget)
|
||||
widgets_splitter.addWidget(tab_widget)
|
||||
widgets_splitter.setStretchFactor(0, 1)
|
||||
widgets_splitter.setStretchFactor(1, 1)
|
||||
height = int(self.default_height / 2)
|
||||
widgets_splitter.setSizes([height, self.default_height - height])
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.addWidget(widgets_splitter)
|
||||
|
||||
line_check_timer = QtCore.QTimer()
|
||||
line_check_timer.setInterval(200)
|
||||
|
||||
line_check_timer.timeout.connect(self._on_timer_timeout)
|
||||
tab_bar.right_clicked.connect(self._on_tab_right_click)
|
||||
tab_bar.double_clicked.connect(self._on_tab_double_click)
|
||||
tab_bar.mid_clicked.connect(self._on_tab_mid_click)
|
||||
tab_widget.tabCloseRequested.connect(self._on_tab_close_req)
|
||||
|
||||
self._widgets_splitter = widgets_splitter
|
||||
self._output_widget = output_widget
|
||||
self._tab_widget = tab_widget
|
||||
self._line_check_timer = line_check_timer
|
||||
|
||||
self._append_lines([ayon_art])
|
||||
|
||||
self._first_show = True
|
||||
self._splitter_size_ratio = None
|
||||
self._allow_save_registry = allow_save_registry
|
||||
self._registry_saved = True
|
||||
|
||||
self._init_from_registry()
|
||||
|
||||
if self._tab_widget.count() < 1:
|
||||
self.add_tab("Python")
|
||||
|
||||
def _init_from_registry(self):
|
||||
setting_registry = PythonInterpreterRegistry()
|
||||
width = None
|
||||
height = None
|
||||
try:
|
||||
width = setting_registry.get_item("width")
|
||||
height = setting_registry.get_item("height")
|
||||
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
if width is None or width < 200:
|
||||
width = self.default_width
|
||||
|
||||
if height is None or height < 200:
|
||||
height = self.default_height
|
||||
|
||||
self.resize(width, height)
|
||||
|
||||
try:
|
||||
self._splitter_size_ratio = (
|
||||
setting_registry.get_item("splitter_sizes")
|
||||
)
|
||||
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
tab_defs = setting_registry.get_item("tabs") or []
|
||||
for tab_def in tab_defs:
|
||||
widget = self.add_tab(tab_def["name"])
|
||||
widget.set_code(tab_def["code"])
|
||||
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def save_registry(self):
|
||||
# Window was not showed
|
||||
if not self._allow_save_registry or self._registry_saved:
|
||||
return
|
||||
|
||||
self._registry_saved = True
|
||||
setting_registry = PythonInterpreterRegistry()
|
||||
|
||||
setting_registry.set_item("width", self.width())
|
||||
setting_registry.set_item("height", self.height())
|
||||
|
||||
setting_registry.set_item(
|
||||
"splitter_sizes", self._widgets_splitter.sizes()
|
||||
)
|
||||
|
||||
tabs = []
|
||||
for tab_idx in range(self._tab_widget.count()):
|
||||
widget = self._tab_widget.widget(tab_idx)
|
||||
tab_code = widget.get_code()
|
||||
tab_name = self._tab_widget.tabText(tab_idx)
|
||||
tabs.append({
|
||||
"name": tab_name,
|
||||
"code": tab_code
|
||||
})
|
||||
|
||||
setting_registry.set_item("tabs", tabs)
|
||||
|
||||
def _on_tab_right_click(self, global_point):
|
||||
point = self._tab_widget.mapFromGlobal(global_point)
|
||||
tab_bar = self._tab_widget.tabBar()
|
||||
tab_idx = tab_bar.tabAt(point)
|
||||
last_index = tab_bar.count() - 1
|
||||
if tab_idx < 0 or tab_idx > last_index:
|
||||
return
|
||||
|
||||
menu = QtWidgets.QMenu(self._tab_widget)
|
||||
|
||||
add_tab_action = QtWidgets.QAction("Add tab...", menu)
|
||||
add_tab_action.setToolTip("Add new tab")
|
||||
|
||||
rename_tab_action = QtWidgets.QAction("Rename...", menu)
|
||||
rename_tab_action.setToolTip("Rename tab")
|
||||
|
||||
duplicate_tab_action = QtWidgets.QAction("Duplicate...", menu)
|
||||
duplicate_tab_action.setToolTip("Duplicate code to new tab")
|
||||
|
||||
close_tab_action = QtWidgets.QAction("Close", menu)
|
||||
close_tab_action.setToolTip("Close tab and lose content")
|
||||
close_tab_action.setEnabled(self._tab_widget.tabsClosable())
|
||||
|
||||
menu.addAction(add_tab_action)
|
||||
menu.addAction(rename_tab_action)
|
||||
menu.addAction(duplicate_tab_action)
|
||||
menu.addAction(close_tab_action)
|
||||
|
||||
result = menu.exec_(global_point)
|
||||
if result is None:
|
||||
return
|
||||
|
||||
if result is rename_tab_action:
|
||||
self._rename_tab_req(tab_idx)
|
||||
|
||||
elif result is add_tab_action:
|
||||
self._on_add_requested()
|
||||
|
||||
elif result is duplicate_tab_action:
|
||||
self._duplicate_requested(tab_idx)
|
||||
|
||||
elif result is close_tab_action:
|
||||
self._on_tab_close_req(tab_idx)
|
||||
|
||||
def _rename_tab_req(self, tab_idx):
|
||||
dialog = TabNameDialog(self)
|
||||
dialog.set_tab_name(self._tab_widget.tabText(tab_idx))
|
||||
dialog.exec_()
|
||||
tab_name = dialog.result()
|
||||
if tab_name:
|
||||
self._tab_widget.setTabText(tab_idx, tab_name)
|
||||
|
||||
def _duplicate_requested(self, tab_idx=None):
|
||||
if tab_idx is None:
|
||||
tab_idx = self._tab_widget.currentIndex()
|
||||
|
||||
src_widget = self._tab_widget.widget(tab_idx)
|
||||
dst_widget = self._add_tab()
|
||||
if dst_widget is None:
|
||||
return
|
||||
dst_widget.set_code(src_widget.get_code())
|
||||
|
||||
def _on_tab_mid_click(self, global_point):
|
||||
point = self._tab_widget.mapFromGlobal(global_point)
|
||||
tab_bar = self._tab_widget.tabBar()
|
||||
tab_idx = tab_bar.tabAt(point)
|
||||
last_index = tab_bar.count() - 1
|
||||
if tab_idx < 0 or tab_idx > last_index:
|
||||
return
|
||||
|
||||
self._on_tab_close_req(tab_idx)
|
||||
|
||||
def _on_tab_double_click(self, global_point):
|
||||
point = self._tab_widget.mapFromGlobal(global_point)
|
||||
tab_bar = self._tab_widget.tabBar()
|
||||
tab_idx = tab_bar.tabAt(point)
|
||||
last_index = tab_bar.count() - 1
|
||||
if tab_idx < 0 or tab_idx > last_index:
|
||||
return
|
||||
|
||||
self._rename_tab_req(tab_idx)
|
||||
|
||||
def _on_tab_close_req(self, tab_index):
|
||||
if self._tab_widget.count() == 1:
|
||||
return
|
||||
|
||||
widget = self._tab_widget.widget(tab_index)
|
||||
if widget in self._tabs:
|
||||
self._tabs.remove(widget)
|
||||
self._tab_widget.removeTab(tab_index)
|
||||
|
||||
if self._tab_widget.count() == 1:
|
||||
self._tab_widget.setTabsClosable(False)
|
||||
|
||||
def _append_lines(self, lines):
|
||||
at_max = self._output_widget.vertical_scroll_at_max()
|
||||
tmp_cursor = QtGui.QTextCursor(self._output_widget.document())
|
||||
tmp_cursor.movePosition(QtGui.QTextCursor.End)
|
||||
for line in lines:
|
||||
tmp_cursor.insertText(line)
|
||||
|
||||
if at_max:
|
||||
self._output_widget.scroll_to_bottom()
|
||||
|
||||
def _on_timer_timeout(self):
|
||||
if self._stdout_err_wrapper.lines:
|
||||
lines = []
|
||||
while self._stdout_err_wrapper.lines:
|
||||
line = self._stdout_err_wrapper.lines.popleft()
|
||||
lines.append(self.ansi_escape.sub("", line))
|
||||
self._append_lines(lines)
|
||||
|
||||
def _on_add_requested(self):
|
||||
self._add_tab()
|
||||
|
||||
def _add_tab(self):
|
||||
dialog = TabNameDialog(self)
|
||||
dialog.exec_()
|
||||
tab_name = dialog.result()
|
||||
if tab_name:
|
||||
return self.add_tab(tab_name)
|
||||
|
||||
return None
|
||||
|
||||
def _on_before_execute(self, code_text):
|
||||
at_max = self._output_widget.vertical_scroll_at_max()
|
||||
document = self._output_widget.document()
|
||||
tmp_cursor = QtGui.QTextCursor(document)
|
||||
tmp_cursor.movePosition(QtGui.QTextCursor.End)
|
||||
tmp_cursor.insertText("{}\nExecuting command:\n".format(20 * "-"))
|
||||
|
||||
code_block_format = QtGui.QTextFrameFormat()
|
||||
code_block_format.setBackground(QtGui.QColor(27, 27, 27))
|
||||
code_block_format.setPadding(4)
|
||||
|
||||
tmp_cursor.insertFrame(code_block_format)
|
||||
char_format = tmp_cursor.charFormat()
|
||||
char_format.setForeground(
|
||||
QtGui.QBrush(QtGui.QColor(114, 224, 198))
|
||||
)
|
||||
tmp_cursor.setCharFormat(char_format)
|
||||
tmp_cursor.insertText(code_text)
|
||||
|
||||
# Create new cursor
|
||||
tmp_cursor = QtGui.QTextCursor(document)
|
||||
tmp_cursor.movePosition(QtGui.QTextCursor.End)
|
||||
tmp_cursor.insertText("{}\n".format(20 * "-"))
|
||||
|
||||
if at_max:
|
||||
self._output_widget.scroll_to_bottom()
|
||||
|
||||
def add_tab(self, tab_name, index=None):
|
||||
widget = PythonTabWidget(self)
|
||||
widget.before_execute.connect(self._on_before_execute)
|
||||
widget.add_tab_requested.connect(self._on_add_requested)
|
||||
if index is None:
|
||||
if self._tab_widget.count() > 0:
|
||||
index = self._tab_widget.currentIndex() + 1
|
||||
else:
|
||||
index = 0
|
||||
|
||||
self._tabs.append(widget)
|
||||
self._tab_widget.insertTab(index, widget, tab_name)
|
||||
self._tab_widget.setCurrentIndex(index)
|
||||
|
||||
if self._tab_widget.count() > 1:
|
||||
self._tab_widget.setTabsClosable(True)
|
||||
widget.setFocus()
|
||||
return widget
|
||||
|
||||
def showEvent(self, event):
|
||||
self._line_check_timer.start()
|
||||
self._registry_saved = False
|
||||
super(PythonInterpreterWidget, self).showEvent(event)
|
||||
# First show setup
|
||||
if self._first_show:
|
||||
self._first_show = False
|
||||
self._on_first_show()
|
||||
|
||||
self._output_widget.scroll_to_bottom()
|
||||
|
||||
def _on_first_show(self):
|
||||
# Change stylesheet
|
||||
self.setStyleSheet(load_stylesheet())
|
||||
# Check if splitter size ratio is set
|
||||
# - first store value to local variable and then unset it
|
||||
splitter_size_ratio = self._splitter_size_ratio
|
||||
self._splitter_size_ratio = None
|
||||
# Skip if is not set
|
||||
if not splitter_size_ratio:
|
||||
return
|
||||
|
||||
# Skip if number of size items does not match to splitter
|
||||
splitters_count = len(self._widgets_splitter.sizes())
|
||||
if len(splitter_size_ratio) == splitters_count:
|
||||
self._widgets_splitter.setSizes(splitter_size_ratio)
|
||||
|
||||
def closeEvent(self, event):
|
||||
self.save_registry()
|
||||
super(PythonInterpreterWidget, self).closeEvent(event)
|
||||
self._line_check_timer.stop()
|
||||
|
|
@ -7,6 +7,10 @@ from .constants import (
|
|||
|
||||
from .anatomy import Anatomy
|
||||
|
||||
from .tempdir import get_temp_dir
|
||||
|
||||
from .staging_dir import get_staging_dir_info
|
||||
|
||||
from .create import (
|
||||
BaseCreator,
|
||||
Creator,
|
||||
|
|
@ -117,6 +121,12 @@ __all__ = (
|
|||
# --- Anatomy ---
|
||||
"Anatomy",
|
||||
|
||||
# --- Temp dir ---
|
||||
"get_temp_dir",
|
||||
|
||||
# --- Staging dir ---
|
||||
"get_staging_dir_info",
|
||||
|
||||
# --- Create ---
|
||||
"BaseCreator",
|
||||
"Creator",
|
||||
|
|
|
|||
|
|
@ -585,9 +585,6 @@ def version_up_current_workfile():
|
|||
"""Function to increment and save workfile
|
||||
"""
|
||||
host = registered_host()
|
||||
if not host.has_unsaved_changes():
|
||||
print("No unsaved changes, skipping file save..")
|
||||
return
|
||||
|
||||
project_name = get_current_project_name()
|
||||
folder_path = get_current_folder_path()
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ from ayon_core.lib.events import QueuedEventSystem
|
|||
from ayon_core.lib.attribute_definitions import get_default_values
|
||||
from ayon_core.host import IPublishHost, IWorkfileHost
|
||||
from ayon_core.pipeline import Anatomy
|
||||
from ayon_core.pipeline.template_data import get_template_data
|
||||
from ayon_core.pipeline.plugin_discover import DiscoverResult
|
||||
|
||||
from .exceptions import (
|
||||
|
|
@ -480,6 +481,36 @@ class CreateContext:
|
|||
self.get_current_project_name())
|
||||
return self._current_project_settings
|
||||
|
||||
def get_template_data(
|
||||
self, folder_path: Optional[str], task_name: Optional[str]
|
||||
) -> Dict[str, Any]:
|
||||
"""Prepare template data for given context.
|
||||
|
||||
Method is using cached entities and settings to prepare template data.
|
||||
|
||||
Args:
|
||||
folder_path (Optional[str]): Folder path.
|
||||
task_name (Optional[str]): Task name.
|
||||
|
||||
Returns:
|
||||
dict[str, Any]: Template data.
|
||||
|
||||
"""
|
||||
project_entity = self.get_current_project_entity()
|
||||
folder_entity = task_entity = None
|
||||
if folder_path:
|
||||
folder_entity = self.get_folder_entity(folder_path)
|
||||
if task_name and folder_entity:
|
||||
task_entity = self.get_task_entity(folder_path, task_name)
|
||||
|
||||
return get_template_data(
|
||||
project_entity,
|
||||
folder_entity,
|
||||
task_entity,
|
||||
host_name=self.host_name,
|
||||
settings=self.get_current_project_settings(),
|
||||
)
|
||||
|
||||
@property
|
||||
def context_has_changed(self):
|
||||
"""Host context has changed.
|
||||
|
|
@ -724,11 +755,19 @@ class CreateContext:
|
|||
).format(creator_class.host_name, self.host_name))
|
||||
continue
|
||||
|
||||
creator = creator_class(
|
||||
project_settings,
|
||||
self,
|
||||
self.headless
|
||||
)
|
||||
# TODO report initialization error
|
||||
try:
|
||||
creator = creator_class(
|
||||
project_settings,
|
||||
self,
|
||||
self.headless
|
||||
)
|
||||
except Exception:
|
||||
self.log.error(
|
||||
f"Failed to initialize plugin: {creator_class}",
|
||||
exc_info=True
|
||||
)
|
||||
continue
|
||||
|
||||
if not creator.enabled:
|
||||
disabled_creators[creator_identifier] = creator
|
||||
|
|
@ -1283,12 +1322,16 @@ class CreateContext:
|
|||
|
||||
@contextmanager
|
||||
def bulk_pre_create_attr_defs_change(self, sender=None):
|
||||
with self._bulk_context("pre_create_attrs_change", sender) as bulk_info:
|
||||
with self._bulk_context(
|
||||
"pre_create_attrs_change", sender
|
||||
) as bulk_info:
|
||||
yield bulk_info
|
||||
|
||||
@contextmanager
|
||||
def bulk_create_attr_defs_change(self, sender=None):
|
||||
with self._bulk_context("create_attrs_change", sender) as bulk_info:
|
||||
with self._bulk_context(
|
||||
"create_attrs_change", sender
|
||||
) as bulk_info:
|
||||
yield bulk_info
|
||||
|
||||
@contextmanager
|
||||
|
|
@ -1946,9 +1989,9 @@ class CreateContext:
|
|||
creator are just removed from context.
|
||||
|
||||
Args:
|
||||
instances (List[CreatedInstance]): Instances that should be removed.
|
||||
Remove logic is done using creator, which may require to
|
||||
do other cleanup than just remove instance from context.
|
||||
instances (List[CreatedInstance]): Instances that should be
|
||||
removed. Remove logic is done using creator, which may require
|
||||
to do other cleanup than just remove instance from context.
|
||||
sender (Optional[str]): Sender of the event.
|
||||
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import copy
|
||||
import collections
|
||||
from typing import TYPE_CHECKING, Optional, Dict, Any
|
||||
|
|
@ -6,7 +7,7 @@ from typing import TYPE_CHECKING, Optional, Dict, Any
|
|||
from abc import ABC, abstractmethod
|
||||
|
||||
from ayon_core.settings import get_project_settings
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.lib import Logger, get_version_from_path
|
||||
from ayon_core.pipeline.plugin_discover import (
|
||||
discover,
|
||||
register_plugin,
|
||||
|
|
@ -14,6 +15,7 @@ from ayon_core.pipeline.plugin_discover import (
|
|||
deregister_plugin,
|
||||
deregister_plugin_path
|
||||
)
|
||||
from ayon_core.pipeline.staging_dir import get_staging_dir_info, StagingDir
|
||||
|
||||
from .constants import DEFAULT_VARIANT_VALUE
|
||||
from .product_name import get_product_name
|
||||
|
|
@ -560,6 +562,10 @@ class BaseCreator(ABC):
|
|||
instance
|
||||
)
|
||||
|
||||
cur_project_name = self.create_context.get_current_project_name()
|
||||
if not project_entity and project_name == cur_project_name:
|
||||
project_entity = self.create_context.get_current_project_entity()
|
||||
|
||||
return get_product_name(
|
||||
project_name,
|
||||
task_name,
|
||||
|
|
@ -831,6 +837,108 @@ class Creator(BaseCreator):
|
|||
"""
|
||||
return self.pre_create_attr_defs
|
||||
|
||||
def get_staging_dir(self, instance) -> Optional[StagingDir]:
|
||||
"""Return the staging dir and persistence from instance.
|
||||
|
||||
Args:
|
||||
instance (CreatedInstance): Instance for which should be staging
|
||||
dir gathered.
|
||||
|
||||
Returns:
|
||||
Optional[namedtuple]: Staging dir path and persistence or None
|
||||
"""
|
||||
create_ctx = self.create_context
|
||||
product_name = instance.get("productName")
|
||||
product_type = instance.get("productType")
|
||||
folder_path = instance.get("folderPath")
|
||||
|
||||
# this can only work if product name and folder path are available
|
||||
if not product_name or not folder_path:
|
||||
return None
|
||||
|
||||
publish_settings = self.project_settings["core"]["publish"]
|
||||
follow_workfile_version = (
|
||||
publish_settings
|
||||
["CollectAnatomyInstanceData"]
|
||||
["follow_workfile_version"]
|
||||
)
|
||||
follow_version_hosts = (
|
||||
publish_settings
|
||||
["CollectSceneVersion"]
|
||||
["hosts"]
|
||||
)
|
||||
|
||||
current_host = create_ctx.host.name
|
||||
follow_workfile_version = (
|
||||
follow_workfile_version and
|
||||
current_host in follow_version_hosts
|
||||
)
|
||||
|
||||
# Gather version number provided from the instance.
|
||||
current_workfile = create_ctx.get_current_workfile_path()
|
||||
version = instance.get("version")
|
||||
|
||||
# If follow workfile, gather version from workfile path.
|
||||
if version is None and follow_workfile_version and current_workfile:
|
||||
workfile_version = get_version_from_path(current_workfile)
|
||||
if workfile_version is not None:
|
||||
version = int(workfile_version)
|
||||
|
||||
# Fill-up version with next version available.
|
||||
if version is None:
|
||||
versions = self.get_next_versions_for_instances(
|
||||
[instance]
|
||||
)
|
||||
version, = tuple(versions.values())
|
||||
|
||||
template_data = {"version": version}
|
||||
|
||||
staging_dir_info = get_staging_dir_info(
|
||||
create_ctx.get_current_project_entity(),
|
||||
create_ctx.get_folder_entity(folder_path),
|
||||
create_ctx.get_task_entity(folder_path, instance.get("task")),
|
||||
product_type,
|
||||
product_name,
|
||||
create_ctx.host_name,
|
||||
anatomy=create_ctx.get_current_project_anatomy(),
|
||||
project_settings=create_ctx.get_current_project_settings(),
|
||||
always_return_path=False,
|
||||
logger=self.log,
|
||||
template_data=template_data,
|
||||
)
|
||||
|
||||
return staging_dir_info or None
|
||||
|
||||
def apply_staging_dir(self, instance):
|
||||
"""Apply staging dir with persistence to instance's transient data.
|
||||
|
||||
Method is called on instance creation and on instance update.
|
||||
|
||||
Args:
|
||||
instance (CreatedInstance): Instance for which should be staging
|
||||
dir applied.
|
||||
|
||||
Returns:
|
||||
Optional[str]: Staging dir path or None if not applied.
|
||||
"""
|
||||
staging_dir_info = self.get_staging_dir(instance)
|
||||
if staging_dir_info is None:
|
||||
return None
|
||||
|
||||
# path might be already created by get_staging_dir_info
|
||||
staging_dir_path = staging_dir_info.directory
|
||||
os.makedirs(staging_dir_path, exist_ok=True)
|
||||
|
||||
instance.transient_data.update({
|
||||
"stagingDir": staging_dir_path,
|
||||
"stagingDir_persistent": staging_dir_info.is_persistent,
|
||||
"stagingDir_is_custom": staging_dir_info.is_custom,
|
||||
})
|
||||
|
||||
self.log.info(f"Applied staging dir to instance: {staging_dir_path}")
|
||||
|
||||
return staging_dir_path
|
||||
|
||||
def _pre_create_attr_defs_changed(self):
|
||||
"""Called when pre-create attribute definitions change.
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import os
|
|||
import logging
|
||||
import collections
|
||||
|
||||
from ayon_core.pipeline.constants import AVALON_INSTANCE_ID
|
||||
from ayon_core.pipeline.constants import AYON_INSTANCE_ID
|
||||
|
||||
from .product_name import get_product_name
|
||||
|
||||
|
|
@ -34,7 +34,7 @@ class LegacyCreator:
|
|||
# Default data
|
||||
self.data = collections.OrderedDict()
|
||||
# TODO use 'AYON_INSTANCE_ID' when all hosts support it
|
||||
self.data["id"] = AVALON_INSTANCE_ID
|
||||
self.data["id"] = AYON_INSTANCE_ID
|
||||
self.data["productType"] = self.product_type
|
||||
self.data["folderPath"] = folder_path
|
||||
self.data["productName"] = name
|
||||
|
|
|
|||
|
|
@ -1,5 +1,9 @@
|
|||
import ayon_api
|
||||
from ayon_core.lib import StringTemplate, filter_profiles, prepare_template_data
|
||||
from ayon_core.lib import (
|
||||
StringTemplate,
|
||||
filter_profiles,
|
||||
prepare_template_data,
|
||||
)
|
||||
from ayon_core.settings import get_project_settings
|
||||
|
||||
from .constants import DEFAULT_PRODUCT_TEMPLATE
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import copy
|
||||
import collections
|
||||
from uuid import uuid4
|
||||
import typing
|
||||
from typing import Optional, Dict, List, Any
|
||||
|
||||
from ayon_core.lib.attribute_definitions import (
|
||||
|
|
@ -17,6 +18,9 @@ from ayon_core.pipeline import (
|
|||
from .exceptions import ImmutableKeyError
|
||||
from .changes import TrackChangesItem
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from .creator_plugins import BaseCreator
|
||||
|
||||
|
||||
class ConvertorItem:
|
||||
"""Item representing convertor plugin.
|
||||
|
|
@ -429,18 +433,26 @@ class CreatedInstance:
|
|||
__immutable_keys = (
|
||||
"id",
|
||||
"instance_id",
|
||||
"product_type",
|
||||
"productType",
|
||||
"creator_identifier",
|
||||
"creator_attributes",
|
||||
"publish_attributes"
|
||||
)
|
||||
# Keys that can be changed, but should not be removed from instance
|
||||
__required_keys = {
|
||||
"folderPath": None,
|
||||
"task": None,
|
||||
"productName": None,
|
||||
"active": True,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
product_type,
|
||||
product_name,
|
||||
data,
|
||||
creator,
|
||||
product_type: str,
|
||||
product_name: str,
|
||||
data: Dict[str, Any],
|
||||
creator: "BaseCreator",
|
||||
transient_data: Optional[Dict[str, Any]] = None,
|
||||
):
|
||||
self._creator = creator
|
||||
creator_identifier = creator.identifier
|
||||
|
|
@ -455,7 +467,9 @@ class CreatedInstance:
|
|||
self._members = []
|
||||
|
||||
# Data that can be used for lifetime of object
|
||||
self._transient_data = {}
|
||||
if transient_data is None:
|
||||
transient_data = {}
|
||||
self._transient_data = transient_data
|
||||
|
||||
# Create a copy of passed data to avoid changing them on the fly
|
||||
data = copy.deepcopy(data or {})
|
||||
|
|
@ -485,7 +499,7 @@ class CreatedInstance:
|
|||
item_id = data.get("id")
|
||||
# TODO use only 'AYON_INSTANCE_ID' when all hosts support it
|
||||
if item_id not in {AYON_INSTANCE_ID, AVALON_INSTANCE_ID}:
|
||||
item_id = AVALON_INSTANCE_ID
|
||||
item_id = AYON_INSTANCE_ID
|
||||
self._data["id"] = item_id
|
||||
self._data["productType"] = product_type
|
||||
self._data["productName"] = product_name
|
||||
|
|
@ -515,6 +529,9 @@ class CreatedInstance:
|
|||
if data:
|
||||
self._data.update(data)
|
||||
|
||||
for key, default in self.__required_keys.items():
|
||||
self._data.setdefault(key, default)
|
||||
|
||||
if not self._data.get("instance_id"):
|
||||
self._data["instance_id"] = str(uuid4())
|
||||
|
||||
|
|
@ -567,6 +584,8 @@ class CreatedInstance:
|
|||
has_key = key in self._data
|
||||
output = self._data.pop(key, *args, **kwargs)
|
||||
if has_key:
|
||||
if key in self.__required_keys:
|
||||
self._data[key] = self.__required_keys[key]
|
||||
self._create_context.instance_values_changed(
|
||||
self.id, {key: None}
|
||||
)
|
||||
|
|
@ -775,16 +794,26 @@ class CreatedInstance:
|
|||
self._create_context.instance_create_attr_defs_changed(self.id)
|
||||
|
||||
@classmethod
|
||||
def from_existing(cls, instance_data, creator):
|
||||
def from_existing(
|
||||
cls,
|
||||
instance_data: Dict[str, Any],
|
||||
creator: "BaseCreator",
|
||||
transient_data: Optional[Dict[str, Any]] = None,
|
||||
) -> "CreatedInstance":
|
||||
"""Convert instance data from workfile to CreatedInstance.
|
||||
|
||||
Args:
|
||||
instance_data (Dict[str, Any]): Data in a structure ready for
|
||||
'CreatedInstance' object.
|
||||
creator (BaseCreator): Creator plugin which is creating the
|
||||
instance of for which the instance belong.
|
||||
"""
|
||||
instance of for which the instance belongs.
|
||||
transient_data (Optional[dict[str, Any]]): Instance transient
|
||||
data.
|
||||
|
||||
Returns:
|
||||
CreatedInstance: Instance object.
|
||||
|
||||
"""
|
||||
instance_data = copy.deepcopy(instance_data)
|
||||
|
||||
product_type = instance_data.get("productType")
|
||||
|
|
@ -797,7 +826,11 @@ class CreatedInstance:
|
|||
product_name = instance_data.get("subset")
|
||||
|
||||
return cls(
|
||||
product_type, product_name, instance_data, creator
|
||||
product_type,
|
||||
product_name,
|
||||
instance_data,
|
||||
creator,
|
||||
transient_data=transient_data,
|
||||
)
|
||||
|
||||
def attribute_value_changed(self, key, changes):
|
||||
|
|
|
|||
|
|
@ -387,7 +387,7 @@ def get_representations_delivery_template_data(
|
|||
# convert representation entity. Fixed in 'ayon_api' 1.0.10.
|
||||
if isinstance(template_data, str):
|
||||
con = ayon_api.get_server_api_connection()
|
||||
repre_entity = con._representation_conversion(repre_entity)
|
||||
con._representation_conversion(repre_entity)
|
||||
template_data = repre_entity["context"]
|
||||
|
||||
template_data.update(copy.deepcopy(general_template_data))
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
import re
|
||||
import clique
|
||||
import math
|
||||
|
||||
import opentimelineio as otio
|
||||
from opentimelineio import opentime as _ot
|
||||
|
|
@ -196,11 +197,11 @@ def is_clip_from_media_sequence(otio_clip):
|
|||
return is_input_sequence or is_input_sequence_legacy
|
||||
|
||||
|
||||
def remap_range_on_file_sequence(otio_clip, in_out_range):
|
||||
def remap_range_on_file_sequence(otio_clip, otio_range):
|
||||
"""
|
||||
Args:
|
||||
otio_clip (otio.schema.Clip): The OTIO clip to check.
|
||||
in_out_range (tuple[float, float]): The in-out range to remap.
|
||||
otio_range (otio.schema.TimeRange): The trim range to apply.
|
||||
|
||||
Returns:
|
||||
tuple(int, int): The remapped range as discrete frame number.
|
||||
|
|
@ -211,36 +212,59 @@ def remap_range_on_file_sequence(otio_clip, in_out_range):
|
|||
if not is_clip_from_media_sequence(otio_clip):
|
||||
raise ValueError(f"Cannot map on non-file sequence clip {otio_clip}.")
|
||||
|
||||
try:
|
||||
media_in_trimmed, media_out_trimmed = in_out_range
|
||||
|
||||
except ValueError as error:
|
||||
raise ValueError("Invalid in_out_range provided.") from error
|
||||
|
||||
media_ref = otio_clip.media_reference
|
||||
available_range = otio_clip.available_range()
|
||||
source_range = otio_clip.source_range
|
||||
available_range_rate = available_range.start_time.rate
|
||||
media_in = available_range.start_time.value
|
||||
|
||||
# Backward-compatibility for Hiero OTIO exporter.
|
||||
# NTSC compatibility might introduce floating rates, when these are
|
||||
# not exactly the same (23.976 vs 23.976024627685547)
|
||||
# this will cause precision issue in computation.
|
||||
# Currently round to 2 decimals for comparison,
|
||||
# but this should always rescale after that.
|
||||
rounded_av_rate = round(available_range_rate, 2)
|
||||
rounded_range_rate = round(otio_range.start_time.rate, 2)
|
||||
|
||||
if rounded_av_rate != rounded_range_rate:
|
||||
raise ValueError("Inconsistent range between clip and provided clip")
|
||||
|
||||
source_range = otio_clip.source_range
|
||||
media_in = available_range.start_time
|
||||
available_range_start_frame = (
|
||||
available_range.start_time.to_frames()
|
||||
)
|
||||
|
||||
# Temporary.
|
||||
# Some AYON custom OTIO exporter were implemented with relative
|
||||
# source range for image sequence. Following code maintain
|
||||
# backward-compatibility by adjusting media_in
|
||||
# while we are updating those.
|
||||
conformed_src_in = source_range.start_time.rescaled_to(
|
||||
available_range_rate
|
||||
)
|
||||
if (
|
||||
is_clip_from_media_sequence(otio_clip)
|
||||
and otio_clip.available_range().start_time.to_frames() == media_ref.start_frame
|
||||
and source_range.start_time.to_frames() < media_ref.start_frame
|
||||
and available_range_start_frame == media_ref.start_frame
|
||||
and conformed_src_in.to_frames() < media_ref.start_frame
|
||||
):
|
||||
media_in = 0
|
||||
media_in = otio.opentime.RationalTime(
|
||||
0, rate=available_range_rate
|
||||
)
|
||||
|
||||
src_offset_in = otio_range.start_time - media_in
|
||||
frame_in = otio.opentime.RationalTime.from_frames(
|
||||
media_in_trimmed - media_in + media_ref.start_frame,
|
||||
media_ref.start_frame + src_offset_in.to_frames(),
|
||||
rate=available_range_rate,
|
||||
).to_frames()
|
||||
|
||||
# e.g.:
|
||||
# duration = 10 frames at 24fps
|
||||
# if frame_in = 1001 then
|
||||
# frame_out = 1010
|
||||
offset_duration = max(0, otio_range.duration.to_frames() - 1)
|
||||
|
||||
frame_out = otio.opentime.RationalTime.from_frames(
|
||||
media_out_trimmed - media_in + media_ref.start_frame,
|
||||
frame_in + offset_duration,
|
||||
rate=available_range_rate,
|
||||
).to_frames()
|
||||
|
||||
|
|
@ -258,21 +282,6 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
|
|||
media_ref = otio_clip.media_reference
|
||||
is_input_sequence = is_clip_from_media_sequence(otio_clip)
|
||||
|
||||
# Temporary.
|
||||
# Some AYON custom OTIO exporter were implemented with relative
|
||||
# source range for image sequence. Following code maintain
|
||||
# backward-compatibility by adjusting available range
|
||||
# while we are updating those.
|
||||
if (
|
||||
is_input_sequence
|
||||
and available_range.start_time.to_frames() == media_ref.start_frame
|
||||
and source_range.start_time.to_frames() < media_ref.start_frame
|
||||
):
|
||||
available_range = _ot.TimeRange(
|
||||
_ot.RationalTime(0, rate=available_range_rate),
|
||||
available_range.duration,
|
||||
)
|
||||
|
||||
# Conform source range bounds to available range rate
|
||||
# .e.g. embedded TC of (3600 sec/ 1h), duration 100 frames
|
||||
#
|
||||
|
|
@ -303,8 +312,12 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
|
|||
rounded_av_rate = round(available_range_rate, 2)
|
||||
rounded_src_rate = round(source_range.start_time.rate, 2)
|
||||
if rounded_av_rate != rounded_src_rate:
|
||||
conformed_src_in = source_range.start_time.rescaled_to(available_range_rate)
|
||||
conformed_src_duration = source_range.duration.rescaled_to(available_range_rate)
|
||||
conformed_src_in = source_range.start_time.rescaled_to(
|
||||
available_range_rate
|
||||
)
|
||||
conformed_src_duration = source_range.duration.rescaled_to(
|
||||
available_range_rate
|
||||
)
|
||||
conformed_source_range = otio.opentime.TimeRange(
|
||||
start_time=conformed_src_in,
|
||||
duration=conformed_src_duration
|
||||
|
|
@ -313,10 +326,24 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
|
|||
else:
|
||||
conformed_source_range = source_range
|
||||
|
||||
# Temporary.
|
||||
# Some AYON custom OTIO exporter were implemented with relative
|
||||
# source range for image sequence. Following code maintain
|
||||
# backward-compatibility by adjusting available range
|
||||
# while we are updating those.
|
||||
if (
|
||||
is_input_sequence
|
||||
and available_range.start_time.to_frames() == media_ref.start_frame
|
||||
and conformed_source_range.start_time.to_frames() <
|
||||
media_ref.start_frame
|
||||
):
|
||||
available_range = _ot.TimeRange(
|
||||
_ot.RationalTime(0, rate=available_range_rate),
|
||||
available_range.duration,
|
||||
)
|
||||
|
||||
# modifiers
|
||||
time_scalar = 1.
|
||||
offset_in = 0
|
||||
offset_out = 0
|
||||
time_warp_nodes = []
|
||||
|
||||
# Check for speed effects and adjust playback speed accordingly
|
||||
|
|
@ -347,51 +374,134 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
|
|||
tw_node.update(metadata)
|
||||
tw_node["lookup"] = list(lookup)
|
||||
|
||||
# get first and last frame offsets
|
||||
offset_in += lookup[0]
|
||||
offset_out += lookup[-1]
|
||||
|
||||
# add to timewarp nodes
|
||||
time_warp_nodes.append(tw_node)
|
||||
|
||||
# multiply by time scalar
|
||||
offset_in *= time_scalar
|
||||
offset_out *= time_scalar
|
||||
|
||||
# scale handles
|
||||
handle_start *= abs(time_scalar)
|
||||
handle_end *= abs(time_scalar)
|
||||
|
||||
# flip offset and handles if reversed speed
|
||||
if time_scalar < 0:
|
||||
offset_in, offset_out = offset_out, offset_in
|
||||
handle_start, handle_end = handle_end, handle_start
|
||||
|
||||
# compute retimed range
|
||||
media_in_trimmed = conformed_source_range.start_time.value + offset_in
|
||||
media_out_trimmed = media_in_trimmed + (
|
||||
(
|
||||
conformed_source_range.duration.value
|
||||
* abs(time_scalar)
|
||||
+ offset_out
|
||||
) - 1
|
||||
)
|
||||
|
||||
media_in = available_range.start_time.value
|
||||
media_out = available_range.end_time_inclusive().value
|
||||
|
||||
# If media source is an image sequence, returned
|
||||
# mediaIn/mediaOut have to correspond
|
||||
# to frame numbers from source sequence.
|
||||
if is_input_sequence:
|
||||
|
||||
src_in = conformed_source_range.start_time
|
||||
src_duration = math.ceil(
|
||||
otio_clip.source_range.duration.value
|
||||
* abs(time_scalar)
|
||||
)
|
||||
retimed_duration = otio.opentime.RationalTime(
|
||||
src_duration,
|
||||
otio_clip.source_range.duration.rate
|
||||
)
|
||||
retimed_duration = retimed_duration.rescaled_to(src_in.rate)
|
||||
|
||||
trim_range = otio.opentime.TimeRange(
|
||||
start_time=src_in,
|
||||
duration=retimed_duration,
|
||||
)
|
||||
|
||||
# preserve discrete frame numbers
|
||||
media_in_trimmed, media_out_trimmed = remap_range_on_file_sequence(
|
||||
otio_clip,
|
||||
(media_in_trimmed, media_out_trimmed)
|
||||
trim_range,
|
||||
)
|
||||
media_in = media_ref.start_frame
|
||||
media_out = media_in + available_range.duration.to_frames() - 1
|
||||
|
||||
else:
|
||||
# compute retimed range
|
||||
media_in_trimmed = conformed_source_range.start_time.value
|
||||
|
||||
offset_duration = (
|
||||
conformed_source_range.duration.value
|
||||
* abs(time_scalar)
|
||||
)
|
||||
|
||||
# Offset duration by 1 for media out frame
|
||||
# - only if duration is not single frame (start frame != end frame)
|
||||
if offset_duration > 0:
|
||||
offset_duration -= 1
|
||||
media_out_trimmed = media_in_trimmed + offset_duration
|
||||
|
||||
media_in = available_range.start_time.value
|
||||
media_out = available_range.end_time_inclusive().value
|
||||
|
||||
if time_warp_nodes:
|
||||
# Naive approach: Resolve consecutive timewarp(s) on range,
|
||||
# then check if plate range has to be extended beyond source range.
|
||||
in_frame = media_in_trimmed
|
||||
frame_range = [in_frame]
|
||||
for _ in range(otio_clip.source_range.duration.to_frames() - 1):
|
||||
in_frame += time_scalar
|
||||
frame_range.append(in_frame)
|
||||
|
||||
# Different editorial DCC might have different TimeWarp logic.
|
||||
# The following logic assumes that the "lookup" list values are
|
||||
# frame offsets relative to the current source frame number.
|
||||
#
|
||||
# media_source_range |______1_____|______2______|______3______|
|
||||
#
|
||||
# media_retimed_range |______2_____|______2______|______3______|
|
||||
#
|
||||
# TimeWarp lookup +1 0 0
|
||||
for tw_idx, tw in enumerate(time_warp_nodes):
|
||||
for idx, frame_number in enumerate(frame_range):
|
||||
# First timewarp, apply on media range
|
||||
if tw_idx == 0:
|
||||
frame_range[idx] = round(
|
||||
frame_number +
|
||||
(tw["lookup"][idx] * time_scalar)
|
||||
)
|
||||
# Consecutive timewarp, apply on the previous result
|
||||
else:
|
||||
new_idx = round(idx + tw["lookup"][idx])
|
||||
|
||||
if 0 <= new_idx < len(frame_range):
|
||||
frame_range[idx] = frame_range[new_idx]
|
||||
continue
|
||||
|
||||
# TODO: implementing this would need to actually have
|
||||
# retiming engine resolve process within AYON,
|
||||
# resolving wraps as curves, then projecting
|
||||
# those into the previous media_range.
|
||||
raise NotImplementedError(
|
||||
"Unsupported consecutive timewarps "
|
||||
"(out of computed range)"
|
||||
)
|
||||
|
||||
# adjust range if needed
|
||||
media_in_trimmed_before_tw = media_in_trimmed
|
||||
media_in_trimmed = max(min(frame_range), media_in)
|
||||
media_out_trimmed = min(max(frame_range), media_out)
|
||||
|
||||
# If TimeWarp changes the first frame of the soure range,
|
||||
# we need to offset the first TimeWarp values accordingly.
|
||||
#
|
||||
# expected_range |______2_____|______2______|______3______|
|
||||
#
|
||||
# EDITORIAL
|
||||
# media_source_range |______1_____|______2______|______3______|
|
||||
#
|
||||
# TimeWarp lookup +1 0 0
|
||||
#
|
||||
# EXTRACTED PLATE
|
||||
# plate_range |______2_____|______3______|_ _ _ _ _ _ _|
|
||||
#
|
||||
# expected TimeWarp 0 -1 -1
|
||||
if media_in_trimmed != media_in_trimmed_before_tw:
|
||||
offset = media_in_trimmed_before_tw - media_in_trimmed
|
||||
offset *= 1.0 / time_scalar
|
||||
time_warp_nodes[0]["lookup"] = [
|
||||
value + offset
|
||||
for value in time_warp_nodes[0]["lookup"]
|
||||
]
|
||||
|
||||
# adjust available handles if needed
|
||||
if (media_in_trimmed - media_in) < handle_start:
|
||||
handle_start = max(0, media_in_trimmed - media_in)
|
||||
|
|
@ -410,16 +520,16 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
|
|||
"retime": True,
|
||||
"speed": time_scalar,
|
||||
"timewarps": time_warp_nodes,
|
||||
"handleStart": int(handle_start),
|
||||
"handleEnd": int(handle_end)
|
||||
"handleStart": math.ceil(handle_start),
|
||||
"handleEnd": math.ceil(handle_end)
|
||||
}
|
||||
}
|
||||
|
||||
returning_dict = {
|
||||
"mediaIn": media_in_trimmed,
|
||||
"mediaOut": media_out_trimmed,
|
||||
"handleStart": int(handle_start),
|
||||
"handleEnd": int(handle_end),
|
||||
"handleStart": math.ceil(handle_start),
|
||||
"handleEnd": math.ceil(handle_end),
|
||||
"speed": time_scalar
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,13 +18,13 @@ def parse_ayon_entity_uri(uri: str) -> Optional[dict]:
|
|||
|
||||
Example:
|
||||
>>> parse_ayon_entity_uri(
|
||||
>>> "ayon://test/char/villain?product=modelMain&version=2&representation=usd" # noqa: E501
|
||||
>>> "ayon://test/char/villain?product=modelMain&version=2&representation=usd"
|
||||
>>> )
|
||||
{'project': 'test', 'folderPath': '/char/villain',
|
||||
'product': 'modelMain', 'version': 1,
|
||||
'representation': 'usd'}
|
||||
>>> parse_ayon_entity_uri(
|
||||
>>> "ayon+entity://project/folder?product=renderMain&version=3&representation=exr" # noqa: E501
|
||||
>>> "ayon+entity://project/folder?product=renderMain&version=3&representation=exr"
|
||||
>>> )
|
||||
{'project': 'project', 'folderPath': '/folder',
|
||||
'product': 'renderMain', 'version': 3,
|
||||
|
|
@ -34,7 +34,7 @@ def parse_ayon_entity_uri(uri: str) -> Optional[dict]:
|
|||
dict[str, Union[str, int]]: The individual key with their values as
|
||||
found in the ayon entity URI.
|
||||
|
||||
"""
|
||||
""" # noqa: E501
|
||||
|
||||
if not (uri.startswith("ayon+entity://") or uri.startswith("ayon://")):
|
||||
return {}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
from __future__ import annotations
|
||||
import copy
|
||||
import os
|
||||
import re
|
||||
|
|
@ -8,7 +9,10 @@ import attr
|
|||
import ayon_api
|
||||
import clique
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.pipeline import get_current_project_name, get_representation_path
|
||||
from ayon_core.pipeline import (
|
||||
get_current_project_name,
|
||||
get_representation_path,
|
||||
)
|
||||
from ayon_core.pipeline.create import get_product_name
|
||||
from ayon_core.pipeline.farm.patterning import match_aov_pattern
|
||||
from ayon_core.pipeline.publish import KnownPublishError
|
||||
|
|
@ -244,7 +248,8 @@ def create_skeleton_instance(
|
|||
"useSequenceForReview": data.get("useSequenceForReview", True),
|
||||
# map inputVersions `ObjectId` -> `str` so json supports it
|
||||
"inputVersions": list(map(str, data.get("inputVersions", []))),
|
||||
"colorspace": data.get("colorspace")
|
||||
"colorspace": data.get("colorspace"),
|
||||
"hasExplicitFrames": data.get("hasExplicitFrames")
|
||||
}
|
||||
|
||||
if data.get("renderlayer"):
|
||||
|
|
@ -295,11 +300,17 @@ def _add_review_families(families):
|
|||
return families
|
||||
|
||||
|
||||
def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
||||
skip_integration_repre_list,
|
||||
do_not_add_review,
|
||||
context,
|
||||
color_managed_plugin):
|
||||
def prepare_representations(
|
||||
skeleton_data,
|
||||
exp_files,
|
||||
anatomy,
|
||||
aov_filter,
|
||||
skip_integration_repre_list,
|
||||
do_not_add_review,
|
||||
context,
|
||||
color_managed_plugin,
|
||||
frames_to_render=None
|
||||
):
|
||||
"""Create representations for file sequences.
|
||||
|
||||
This will return representations of expected files if they are not
|
||||
|
|
@ -315,6 +326,8 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
|||
skip_integration_repre_list (list): exclude specific extensions,
|
||||
do_not_add_review (bool): explicitly skip review
|
||||
color_managed_plugin (publish.ColormanagedPyblishPluginMixin)
|
||||
frames_to_render (str | None): implicit or explicit range of frames
|
||||
to render this value is sent to Deadline in JobInfo.Frames
|
||||
Returns:
|
||||
list of representations
|
||||
|
||||
|
|
@ -325,6 +338,14 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
|||
|
||||
log = Logger.get_logger("farm_publishing")
|
||||
|
||||
if frames_to_render is not None:
|
||||
frames_to_render = convert_frames_str_to_list(frames_to_render)
|
||||
else:
|
||||
# Backwards compatibility for older logic
|
||||
frame_start = int(skeleton_data.get("frameStartHandle"))
|
||||
frame_end = int(skeleton_data.get("frameEndHandle"))
|
||||
frames_to_render = list(range(frame_start, frame_end + 1))
|
||||
|
||||
# create representation for every collected sequence
|
||||
for collection in collections:
|
||||
ext = collection.tail.lstrip(".")
|
||||
|
|
@ -361,20 +382,27 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
|||
" This may cause issues on farm."
|
||||
).format(staging))
|
||||
|
||||
frame_start = int(skeleton_data.get("frameStartHandle"))
|
||||
frame_start = frames_to_render[0]
|
||||
frame_end = frames_to_render[-1]
|
||||
if skeleton_data.get("slate"):
|
||||
frame_start -= 1
|
||||
frames_to_render.insert(0, frame_start)
|
||||
|
||||
filenames = [
|
||||
os.path.basename(filepath)
|
||||
for filepath in _get_real_files_to_render(
|
||||
collection, frames_to_render
|
||||
)
|
||||
]
|
||||
# explicitly disable review by user
|
||||
preview = preview and not do_not_add_review
|
||||
rep = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": [os.path.basename(f) for f in list(collection)],
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": int(skeleton_data.get("frameEndHandle")),
|
||||
# If expectedFile are absolute, we need only filenames
|
||||
"files": filenames,
|
||||
"stagingDir": staging,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"fps": skeleton_data.get("fps"),
|
||||
"tags": ["review"] if preview else [],
|
||||
}
|
||||
|
|
@ -453,9 +481,93 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
|||
return representations
|
||||
|
||||
|
||||
def create_instances_for_aov(instance, skeleton, aov_filter,
|
||||
skip_integration_repre_list,
|
||||
do_not_add_review):
|
||||
def convert_frames_str_to_list(frames: str) -> list[int]:
|
||||
"""Convert frames definition string to frames.
|
||||
|
||||
Handles formats as:
|
||||
>>> convert_frames_str_to_list('1001')
|
||||
[1001]
|
||||
>>> convert_frames_str_to_list('1002,1004')
|
||||
[1002, 1004]
|
||||
>>> convert_frames_str_to_list('1003-1005')
|
||||
[1003, 1004, 1005]
|
||||
>>> convert_frames_str_to_list('1001-1021x5')
|
||||
[1001, 1006, 1011, 1016, 1021]
|
||||
|
||||
Args:
|
||||
frames (str): String with frames definition.
|
||||
|
||||
Returns:
|
||||
list[int]: List of frames.
|
||||
|
||||
"""
|
||||
step_pattern = re.compile(r"(?:step|by|every|x|:)(\d+)$")
|
||||
|
||||
output = []
|
||||
step = 1
|
||||
for frame in frames.split(","):
|
||||
if "-" in frame:
|
||||
frame_start, frame_end = frame.split("-")
|
||||
match = step_pattern.findall(frame_end)
|
||||
if match:
|
||||
step = int(match[0])
|
||||
frame_end = re.sub(step_pattern, "", frame_end)
|
||||
|
||||
output.extend(
|
||||
range(int(frame_start), int(frame_end) + 1, step)
|
||||
)
|
||||
else:
|
||||
output.append(int(frame))
|
||||
output.sort()
|
||||
return output
|
||||
|
||||
|
||||
def _get_real_files_to_render(collection, frames_to_render):
|
||||
"""Filter files with frames that should be really rendered.
|
||||
|
||||
'expected_files' are collected from DCC based on timeline setting. This is
|
||||
being calculated differently in each DCC. Filtering here is on single place
|
||||
|
||||
But artists might explicitly set frames they want to render in Publisher UI
|
||||
This range would override and filter previously prepared expected files
|
||||
from DCC.
|
||||
|
||||
Example:
|
||||
>>> expected_files = clique.parse([
|
||||
>>> "foo_v01.0001.exr",
|
||||
>>> "foo_v01.0002.exr",
|
||||
>>> ])
|
||||
>>> frames_to_render = [1]
|
||||
>>> _get_real_files_to_render(expected_files, frames_to_render)
|
||||
["foo_v01.0001.exr"]
|
||||
|
||||
Args:
|
||||
collection (clique.Collection): absolute paths
|
||||
frames_to_render (list[int]): of int 1001
|
||||
|
||||
Returns:
|
||||
list[str]: absolute paths of files to be rendered
|
||||
|
||||
|
||||
"""
|
||||
included_frames = set(collection.indexes).intersection(frames_to_render)
|
||||
real_collection = clique.Collection(
|
||||
collection.head,
|
||||
collection.tail,
|
||||
collection.padding,
|
||||
indexes=included_frames
|
||||
)
|
||||
return list(real_collection)
|
||||
|
||||
|
||||
def create_instances_for_aov(
|
||||
instance,
|
||||
skeleton,
|
||||
aov_filter,
|
||||
skip_integration_repre_list,
|
||||
do_not_add_review,
|
||||
frames_to_render=None
|
||||
):
|
||||
"""Create instances from AOVs.
|
||||
|
||||
This will create new pyblish.api.Instances by going over expected
|
||||
|
|
@ -467,6 +579,7 @@ def create_instances_for_aov(instance, skeleton, aov_filter,
|
|||
aov_filter (dict): AOV filter.
|
||||
skip_integration_repre_list (list): skip
|
||||
do_not_add_review (bool): Explicitly disable reviews
|
||||
frames_to_render (str | None): Frames to render.
|
||||
|
||||
Returns:
|
||||
list of pyblish.api.Instance: Instances created from
|
||||
|
|
@ -513,7 +626,8 @@ def create_instances_for_aov(instance, skeleton, aov_filter,
|
|||
aov_filter,
|
||||
additional_color_data,
|
||||
skip_integration_repre_list,
|
||||
do_not_add_review
|
||||
do_not_add_review,
|
||||
frames_to_render
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -642,8 +756,15 @@ def get_product_name_and_group_from_template(
|
|||
return resulting_product_name, resulting_group_name
|
||||
|
||||
|
||||
def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
||||
skip_integration_repre_list, do_not_add_review):
|
||||
def _create_instances_for_aov(
|
||||
instance,
|
||||
skeleton,
|
||||
aov_filter,
|
||||
additional_data,
|
||||
skip_integration_repre_list,
|
||||
do_not_add_review,
|
||||
frames_to_render
|
||||
):
|
||||
"""Create instance for each AOV found.
|
||||
|
||||
This will create new instance for every AOV it can detect in expected
|
||||
|
|
@ -657,7 +778,8 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
|||
skip_integration_repre_list (list): list of extensions that shouldn't
|
||||
be published
|
||||
do_not_add_review (bool): explicitly disable review
|
||||
|
||||
frames_to_render (str | None): implicit or explicit range of
|
||||
frames to render this value is sent to Deadline in JobInfo.Frames
|
||||
|
||||
Returns:
|
||||
list of instances
|
||||
|
|
@ -677,10 +799,23 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
|||
# go through AOVs in expected files
|
||||
for aov, files in expected_files[0].items():
|
||||
collected_files = _collect_expected_files_for_aov(files)
|
||||
first_filepath = collected_files
|
||||
if isinstance(first_filepath, (list, tuple)):
|
||||
first_filepath = first_filepath[0]
|
||||
staging_dir = os.path.dirname(first_filepath)
|
||||
|
||||
expected_filepath = collected_files
|
||||
if isinstance(collected_files, (list, tuple)):
|
||||
expected_filepath = collected_files[0]
|
||||
if (
|
||||
frames_to_render is not None
|
||||
and isinstance(collected_files, (list, tuple)) # not single file
|
||||
):
|
||||
aov_frames_to_render = convert_frames_str_to_list(frames_to_render)
|
||||
collections, _ = clique.assemble(collected_files)
|
||||
collected_files = _get_real_files_to_render(
|
||||
collections[0], aov_frames_to_render)
|
||||
else:
|
||||
frame_start = int(skeleton.get("frameStartHandle"))
|
||||
frame_end = int(skeleton.get("frameEndHandle"))
|
||||
aov_frames_to_render = list(range(frame_start, frame_end + 1))
|
||||
|
||||
dynamic_data = {
|
||||
"aov": aov,
|
||||
|
|
@ -691,7 +826,7 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
|||
# TODO: this must be changed to be more robust. Any coincidence
|
||||
# of camera name in the file path will be considered as
|
||||
# camera name. This is not correct.
|
||||
camera = [cam for cam in cameras if cam in expected_filepath]
|
||||
camera = [cam for cam in cameras if cam in first_filepath]
|
||||
|
||||
# Is there just one camera matching?
|
||||
# TODO: this is not true, we can have multiple cameras in the scene
|
||||
|
|
@ -702,9 +837,14 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
|||
|
||||
project_settings = instance.context.data.get("project_settings")
|
||||
|
||||
use_legacy_product_name = True
|
||||
try:
|
||||
use_legacy_product_name = project_settings["core"]["tools"]["creator"]["use_legacy_product_names_for_renders"] # noqa: E501
|
||||
use_legacy_product_name = (
|
||||
project_settings
|
||||
["core"]
|
||||
["tools"]
|
||||
["creator"]
|
||||
["use_legacy_product_names_for_renders"]
|
||||
)
|
||||
except KeyError:
|
||||
warnings.warn(
|
||||
("use_legacy_for_renders not found in project settings. "
|
||||
|
|
@ -720,7 +860,9 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
|||
dynamic_data=dynamic_data)
|
||||
|
||||
else:
|
||||
product_name, group_name = get_product_name_and_group_from_template(
|
||||
(
|
||||
product_name, group_name
|
||||
) = get_product_name_and_group_from_template(
|
||||
task_entity=instance.data["taskEntity"],
|
||||
project_name=instance.context.data["projectName"],
|
||||
host_name=instance.context.data["hostName"],
|
||||
|
|
@ -729,10 +871,8 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
|||
dynamic_data=dynamic_data
|
||||
)
|
||||
|
||||
staging = os.path.dirname(expected_filepath)
|
||||
|
||||
try:
|
||||
staging = remap_source(staging, anatomy)
|
||||
staging_dir = remap_source(staging_dir, anatomy)
|
||||
except ValueError as e:
|
||||
log.warning(e)
|
||||
|
||||
|
|
@ -740,7 +880,7 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
|||
|
||||
app = os.environ.get("AYON_HOST_NAME", "")
|
||||
|
||||
render_file_name = os.path.basename(expected_filepath)
|
||||
render_file_name = os.path.basename(first_filepath)
|
||||
|
||||
aov_patterns = aov_filter
|
||||
|
||||
|
|
@ -797,10 +937,10 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
|||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": collected_files,
|
||||
"frameStart": int(skeleton["frameStartHandle"]),
|
||||
"frameEnd": int(skeleton["frameEndHandle"]),
|
||||
"frameStart": aov_frames_to_render[0],
|
||||
"frameEnd": aov_frames_to_render[-1],
|
||||
# If expectedFile are absolute, we need only filenames
|
||||
"stagingDir": staging,
|
||||
"stagingDir": staging_dir,
|
||||
"fps": new_instance.get("fps"),
|
||||
"tags": ["review"] if preview else [],
|
||||
"colorspaceData": {
|
||||
|
|
@ -863,7 +1003,7 @@ def _collect_expected_files_for_aov(files):
|
|||
# but we really expect only one collection.
|
||||
# Nothing else make sense.
|
||||
if len(cols) != 1:
|
||||
raise ValueError("Only one image sequence type is expected.") # noqa: E501
|
||||
raise ValueError("Only one image sequence type is expected.")
|
||||
return list(cols[0])
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -13,15 +13,7 @@ from .utils import get_representation_path_from_context
|
|||
|
||||
|
||||
class LoaderPlugin(list):
|
||||
"""Load representation into host application
|
||||
|
||||
Arguments:
|
||||
context (dict): avalon-core:context-1.0
|
||||
|
||||
.. versionadded:: 4.0
|
||||
This class was introduced
|
||||
|
||||
"""
|
||||
"""Load representation into host application"""
|
||||
|
||||
product_types = set()
|
||||
representations = set()
|
||||
|
|
|
|||
|
|
@ -465,7 +465,9 @@ def update_container(container, version=-1):
|
|||
from ayon_core.pipeline import get_current_project_name
|
||||
|
||||
# Compute the different version from 'representation'
|
||||
project_name = get_current_project_name()
|
||||
project_name = container.get("project_name")
|
||||
if project_name is None:
|
||||
project_name = get_current_project_name()
|
||||
repre_id = container["representation"]
|
||||
if not _is_valid_representation_id(repre_id):
|
||||
raise ValueError(
|
||||
|
|
@ -542,9 +544,6 @@ def update_container(container, version=-1):
|
|||
)
|
||||
)
|
||||
|
||||
path = get_representation_path(new_representation)
|
||||
if not path or not os.path.exists(path):
|
||||
raise ValueError("Path {} doesn't exist".format(path))
|
||||
project_entity = ayon_api.get_project(project_name)
|
||||
context = {
|
||||
"project": project_entity,
|
||||
|
|
@ -553,6 +552,9 @@ def update_container(container, version=-1):
|
|||
"version": new_version,
|
||||
"representation": new_representation,
|
||||
}
|
||||
path = get_representation_path_from_context(context)
|
||||
if not path or not os.path.exists(path):
|
||||
raise ValueError("Path {} doesn't exist".format(path))
|
||||
|
||||
return Loader().update(container, context)
|
||||
|
||||
|
|
@ -588,7 +590,9 @@ def switch_container(container, representation, loader_plugin=None):
|
|||
)
|
||||
|
||||
# Get the new representation to switch to
|
||||
project_name = get_current_project_name()
|
||||
project_name = container.get("project_name")
|
||||
if project_name is None:
|
||||
project_name = get_current_project_name()
|
||||
|
||||
context = get_representation_context(
|
||||
project_name, representation["id"]
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
from __future__ import annotations
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
from typing import Any, Union
|
||||
|
||||
from ayon_core.settings import get_project_settings
|
||||
from ayon_core.lib import Logger
|
||||
|
|
@ -9,7 +11,7 @@ from .anatomy import Anatomy
|
|||
from .template_data import get_project_template_data
|
||||
|
||||
|
||||
def concatenate_splitted_paths(split_paths, anatomy):
|
||||
def concatenate_splitted_paths(split_paths, anatomy: Anatomy):
|
||||
log = Logger.get_logger("concatenate_splitted_paths")
|
||||
pattern_array = re.compile(r"\[.*\]")
|
||||
output = []
|
||||
|
|
@ -47,7 +49,7 @@ def concatenate_splitted_paths(split_paths, anatomy):
|
|||
return output
|
||||
|
||||
|
||||
def fill_paths(path_list, anatomy):
|
||||
def fill_paths(path_list: list[str], anatomy: Anatomy):
|
||||
format_data = get_project_template_data(project_name=anatomy.project_name)
|
||||
format_data["root"] = anatomy.roots
|
||||
filled_paths = []
|
||||
|
|
@ -59,7 +61,7 @@ def fill_paths(path_list, anatomy):
|
|||
return filled_paths
|
||||
|
||||
|
||||
def create_project_folders(project_name, basic_paths=None):
|
||||
def create_project_folders(project_name: str, basic_paths=None):
|
||||
log = Logger.get_logger("create_project_folders")
|
||||
anatomy = Anatomy(project_name)
|
||||
if basic_paths is None:
|
||||
|
|
@ -80,8 +82,19 @@ def create_project_folders(project_name, basic_paths=None):
|
|||
os.makedirs(path)
|
||||
|
||||
|
||||
def _list_path_items(folder_structure):
|
||||
def _list_path_items(
|
||||
folder_structure: Union[dict[str, Any], list[str]]):
|
||||
output = []
|
||||
|
||||
# Allow leaf folders of the `project_folder_structure` to use a list of
|
||||
# strings instead of a dictionary of keys with empty values.
|
||||
if isinstance(folder_structure, list):
|
||||
if not all(isinstance(item, str) for item in folder_structure):
|
||||
raise ValueError(
|
||||
f"List items must all be strings. Got: {folder_structure}")
|
||||
return [[path] for path in folder_structure]
|
||||
|
||||
# Process key, value as key for folder names and value its subfolders
|
||||
for key, value in folder_structure.items():
|
||||
if not value:
|
||||
output.append(key)
|
||||
|
|
@ -99,7 +112,7 @@ def _list_path_items(folder_structure):
|
|||
return output
|
||||
|
||||
|
||||
def get_project_basic_paths(project_name):
|
||||
def get_project_basic_paths(project_name: str):
|
||||
project_settings = get_project_settings(project_name)
|
||||
folder_structure = (
|
||||
project_settings["core"]["project_folder_structure"]
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ from .constants import (
|
|||
ValidateContentsOrder,
|
||||
ValidateSceneOrder,
|
||||
ValidateMeshOrder,
|
||||
FARM_JOB_ENV_DATA_KEY,
|
||||
)
|
||||
|
||||
from .publish_plugins import (
|
||||
|
|
@ -59,6 +60,7 @@ __all__ = (
|
|||
"ValidateContentsOrder",
|
||||
"ValidateSceneOrder",
|
||||
"ValidateMeshOrder",
|
||||
"FARM_JOB_ENV_DATA_KEY",
|
||||
|
||||
"AbstractMetaInstancePlugin",
|
||||
"AbstractMetaContextPlugin",
|
||||
|
|
|
|||
|
|
@ -8,4 +8,5 @@ ValidateMeshOrder = pyblish.api.ValidatorOrder + 0.3
|
|||
|
||||
DEFAULT_PUBLISH_TEMPLATE = "default"
|
||||
DEFAULT_HERO_PUBLISH_TEMPLATE = "default"
|
||||
TRANSIENT_DIR_TEMPLATE = "default"
|
||||
|
||||
FARM_JOB_ENV_DATA_KEY: str = "farmJobEnv"
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import os
|
|||
import sys
|
||||
import inspect
|
||||
import copy
|
||||
import tempfile
|
||||
import warnings
|
||||
import xml.etree.ElementTree
|
||||
from typing import Optional, Union, List
|
||||
|
||||
|
|
@ -18,15 +18,11 @@ from ayon_core.lib import (
|
|||
)
|
||||
from ayon_core.settings import get_project_settings
|
||||
from ayon_core.addon import AddonsManager
|
||||
from ayon_core.pipeline import (
|
||||
tempdir,
|
||||
Anatomy
|
||||
)
|
||||
from ayon_core.pipeline import get_staging_dir_info
|
||||
from ayon_core.pipeline.plugin_discover import DiscoverResult
|
||||
from .constants import (
|
||||
DEFAULT_PUBLISH_TEMPLATE,
|
||||
DEFAULT_HERO_PUBLISH_TEMPLATE,
|
||||
TRANSIENT_DIR_TEMPLATE
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -468,6 +464,12 @@ def filter_pyblish_plugins(plugins):
|
|||
if getattr(plugin, "enabled", True) is False:
|
||||
plugins.remove(plugin)
|
||||
|
||||
# Pyblish already operated a filter based on host.
|
||||
# But applying settings might have changed "hosts"
|
||||
# value in plugin so re-filter.
|
||||
elif not pyblish.plugin.host_is_compatible(plugin):
|
||||
plugins.remove(plugin)
|
||||
|
||||
|
||||
def get_errored_instances_from_context(context, plugin=None):
|
||||
"""Collect failed instances from pyblish context.
|
||||
|
|
@ -581,58 +583,6 @@ def context_plugin_should_run(plugin, context):
|
|||
return False
|
||||
|
||||
|
||||
def get_instance_staging_dir(instance):
|
||||
"""Unified way how staging dir is stored and created on instances.
|
||||
|
||||
First check if 'stagingDir' is already set in instance data.
|
||||
In case there already is new tempdir will not be created.
|
||||
|
||||
It also supports `AYON_TMPDIR`, so studio can define own temp
|
||||
shared repository per project or even per more granular context.
|
||||
Template formatting is supported also with optional keys. Folder is
|
||||
created in case it doesn't exists.
|
||||
|
||||
Available anatomy formatting keys:
|
||||
- root[work | <root name key>]
|
||||
- project[name | code]
|
||||
|
||||
Note:
|
||||
Staging dir does not have to be necessarily in tempdir so be careful
|
||||
about its usage.
|
||||
|
||||
Args:
|
||||
instance (pyblish.lib.Instance): Instance for which we want to get
|
||||
staging dir.
|
||||
|
||||
Returns:
|
||||
str: Path to staging dir of instance.
|
||||
"""
|
||||
staging_dir = instance.data.get('stagingDir')
|
||||
if staging_dir:
|
||||
return staging_dir
|
||||
|
||||
anatomy = instance.context.data.get("anatomy")
|
||||
|
||||
# get customized tempdir path from `AYON_TMPDIR` env var
|
||||
custom_temp_dir = tempdir.create_custom_tempdir(
|
||||
anatomy.project_name, anatomy)
|
||||
|
||||
if custom_temp_dir:
|
||||
staging_dir = os.path.normpath(
|
||||
tempfile.mkdtemp(
|
||||
prefix="pyblish_tmp_",
|
||||
dir=custom_temp_dir
|
||||
)
|
||||
)
|
||||
else:
|
||||
staging_dir = os.path.normpath(
|
||||
tempfile.mkdtemp(prefix="pyblish_tmp_")
|
||||
)
|
||||
instance.data['stagingDir'] = staging_dir
|
||||
|
||||
return staging_dir
|
||||
|
||||
|
||||
def get_publish_repre_path(instance, repre, only_published=False):
|
||||
"""Get representation path that can be used for integration.
|
||||
|
||||
|
|
@ -685,6 +635,8 @@ def get_publish_repre_path(instance, repre, only_published=False):
|
|||
return None
|
||||
|
||||
|
||||
# deprecated: backward compatibility only (2024-09-12)
|
||||
# TODO: remove in the future
|
||||
def get_custom_staging_dir_info(
|
||||
project_name,
|
||||
host_name,
|
||||
|
|
@ -694,67 +646,88 @@ def get_custom_staging_dir_info(
|
|||
product_name,
|
||||
project_settings=None,
|
||||
anatomy=None,
|
||||
log=None
|
||||
log=None,
|
||||
):
|
||||
"""Checks profiles if context should use special custom dir as staging.
|
||||
from ayon_core.pipeline.staging_dir import get_staging_dir_config
|
||||
warnings.warn(
|
||||
(
|
||||
"Function 'get_custom_staging_dir_info' in"
|
||||
" 'ayon_core.pipeline.publish' is deprecated. Please use"
|
||||
" 'get_custom_staging_dir_info'"
|
||||
" in 'ayon_core.pipeline.stagingdir'."
|
||||
),
|
||||
DeprecationWarning,
|
||||
)
|
||||
tr_data = get_staging_dir_config(
|
||||
project_name,
|
||||
task_type,
|
||||
task_name,
|
||||
product_type,
|
||||
product_name,
|
||||
host_name,
|
||||
project_settings=project_settings,
|
||||
anatomy=anatomy,
|
||||
log=log,
|
||||
)
|
||||
|
||||
Args:
|
||||
project_name (str)
|
||||
host_name (str)
|
||||
product_type (str)
|
||||
task_name (str)
|
||||
task_type (str)
|
||||
product_name (str)
|
||||
project_settings(Dict[str, Any]): Prepared project settings.
|
||||
anatomy (Dict[str, Any])
|
||||
log (Logger) (optional)
|
||||
if not tr_data:
|
||||
return None, None
|
||||
|
||||
return tr_data["template"], tr_data["persistence"]
|
||||
|
||||
|
||||
def get_instance_staging_dir(instance):
|
||||
"""Unified way how staging dir is stored and created on instances.
|
||||
|
||||
First check if 'stagingDir' is already set in instance data.
|
||||
In case there already is new tempdir will not be created.
|
||||
|
||||
Returns:
|
||||
(tuple)
|
||||
Raises:
|
||||
ValueError - if misconfigured template should be used
|
||||
str: Path to staging dir
|
||||
"""
|
||||
settings = project_settings or get_project_settings(project_name)
|
||||
custom_staging_dir_profiles = (settings["core"]
|
||||
["tools"]
|
||||
["publish"]
|
||||
["custom_staging_dir_profiles"])
|
||||
if not custom_staging_dir_profiles:
|
||||
return None, None
|
||||
staging_dir = instance.data.get("stagingDir")
|
||||
|
||||
if not log:
|
||||
log = Logger.get_logger("get_custom_staging_dir_info")
|
||||
if staging_dir:
|
||||
return staging_dir
|
||||
|
||||
filtering_criteria = {
|
||||
"hosts": host_name,
|
||||
"families": product_type,
|
||||
"task_names": task_name,
|
||||
"task_types": task_type,
|
||||
"subsets": product_name
|
||||
}
|
||||
profile = filter_profiles(custom_staging_dir_profiles,
|
||||
filtering_criteria,
|
||||
logger=log)
|
||||
anatomy_data = instance.data["anatomyData"]
|
||||
template_data = copy.deepcopy(anatomy_data)
|
||||
|
||||
if not profile or not profile["active"]:
|
||||
return None, None
|
||||
# context data based variables
|
||||
context = instance.context
|
||||
|
||||
if not anatomy:
|
||||
anatomy = Anatomy(project_name)
|
||||
# add current file as workfile name into formatting data
|
||||
current_file = context.data.get("currentFile")
|
||||
if current_file:
|
||||
workfile = os.path.basename(current_file)
|
||||
workfile_name, _ = os.path.splitext(workfile)
|
||||
template_data["workfile_name"] = workfile_name
|
||||
|
||||
template_name = profile["template_name"] or TRANSIENT_DIR_TEMPLATE
|
||||
|
||||
custom_staging_dir = anatomy.get_template_item(
|
||||
"staging", template_name, "directory", default=None
|
||||
staging_dir_info = get_staging_dir_info(
|
||||
context.data["projectEntity"],
|
||||
instance.data.get("folderEntity"),
|
||||
instance.data.get("taskEntity"),
|
||||
instance.data["productType"],
|
||||
instance.data["productName"],
|
||||
context.data["hostName"],
|
||||
anatomy=context.data["anatomy"],
|
||||
project_settings=context.data["project_settings"],
|
||||
template_data=template_data,
|
||||
always_return_path=True,
|
||||
username=context.data["user"],
|
||||
)
|
||||
if custom_staging_dir is None:
|
||||
raise ValueError((
|
||||
"Anatomy of project \"{}\" does not have set"
|
||||
" \"{}\" template key!"
|
||||
).format(project_name, template_name))
|
||||
is_persistent = profile["custom_staging_dir_persistent"]
|
||||
|
||||
return custom_staging_dir.template, is_persistent
|
||||
staging_dir_path = staging_dir_info.directory
|
||||
|
||||
# path might be already created by get_staging_dir_info
|
||||
os.makedirs(staging_dir_path, exist_ok=True)
|
||||
instance.data.update({
|
||||
"stagingDir": staging_dir_path,
|
||||
"stagingDir_persistent": staging_dir_info.is_persistent,
|
||||
"stagingDir_is_custom": staging_dir_info.is_custom
|
||||
})
|
||||
|
||||
return staging_dir_path
|
||||
|
||||
|
||||
def get_published_workfile_instance(context):
|
||||
|
|
@ -799,7 +772,7 @@ def replace_with_published_scene_path(instance, replace_in_path=True):
|
|||
return
|
||||
|
||||
# determine published path from Anatomy.
|
||||
template_data = workfile_instance.data.get("anatomyData")
|
||||
template_data = copy.deepcopy(workfile_instance.data["anatomyData"])
|
||||
rep = workfile_instance.data["representations"][0]
|
||||
template_data["representation"] = rep.get("name")
|
||||
template_data["ext"] = rep.get("ext")
|
||||
|
|
|
|||
|
|
@ -205,9 +205,9 @@ class AYONPyblishPluginMixin:
|
|||
if not cls.__instanceEnabled__:
|
||||
return False
|
||||
|
||||
for _ in pyblish.logic.plugins_by_families(
|
||||
[cls], [instance.product_type]
|
||||
):
|
||||
families = [instance.product_type]
|
||||
families.extend(instance.get("families", []))
|
||||
for _ in pyblish.logic.plugins_by_families([cls], families):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
|
@ -292,6 +292,9 @@ class OptionalPyblishPluginMixin(AYONPyblishPluginMixin):
|
|||
```
|
||||
"""
|
||||
|
||||
# Allow exposing tooltip from class with `optional_tooltip` attribute
|
||||
optional_tooltip: Optional[str] = None
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
"""Attribute definitions based on plugin's optional attribute."""
|
||||
|
|
@ -304,8 +307,14 @@ class OptionalPyblishPluginMixin(AYONPyblishPluginMixin):
|
|||
active = getattr(cls, "active", True)
|
||||
# Return boolean stored under 'active' key with label of the class name
|
||||
label = cls.label or cls.__name__
|
||||
|
||||
return [
|
||||
BoolDef("active", default=active, label=label)
|
||||
BoolDef(
|
||||
"active",
|
||||
default=active,
|
||||
label=label,
|
||||
tooltip=cls.optional_tooltip,
|
||||
)
|
||||
]
|
||||
|
||||
def is_active(self, data):
|
||||
|
|
|
|||
242
client/ayon_core/pipeline/staging_dir.py
Normal file
242
client/ayon_core/pipeline/staging_dir.py
Normal file
|
|
@ -0,0 +1,242 @@
|
|||
import logging
|
||||
import warnings
|
||||
from typing import Optional, Dict, Any
|
||||
from dataclasses import dataclass
|
||||
|
||||
from ayon_core.lib import Logger, filter_profiles
|
||||
from ayon_core.settings import get_project_settings
|
||||
|
||||
from .template_data import get_template_data
|
||||
from .anatomy import Anatomy
|
||||
from .tempdir import get_temp_dir
|
||||
|
||||
|
||||
@dataclass
|
||||
class StagingDir:
|
||||
directory: str
|
||||
is_persistent: bool
|
||||
# Whether the staging dir is a custom staging dir
|
||||
is_custom: bool
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
if key == "persistent":
|
||||
warnings.warn(
|
||||
"'StagingDir.persistent' is deprecated."
|
||||
" Use 'StagingDir.is_persistent' instead.",
|
||||
DeprecationWarning
|
||||
)
|
||||
key = "is_persistent"
|
||||
super().__setattr__(key, value)
|
||||
|
||||
@property
|
||||
def persistent(self):
|
||||
warnings.warn(
|
||||
"'StagingDir.persistent' is deprecated."
|
||||
" Use 'StagingDir.is_persistent' instead.",
|
||||
DeprecationWarning
|
||||
)
|
||||
return self.is_persistent
|
||||
|
||||
|
||||
def get_staging_dir_config(
|
||||
project_name: str,
|
||||
task_type: Optional[str],
|
||||
task_name: Optional[str],
|
||||
product_type: str,
|
||||
product_name: str,
|
||||
host_name: str,
|
||||
project_settings: Optional[Dict[str, Any]] = None,
|
||||
anatomy: Optional[Anatomy] = None,
|
||||
log: Optional[logging.Logger] = None,
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Get matching staging dir profile.
|
||||
|
||||
Args:
|
||||
host_name (str): Name of host.
|
||||
project_name (str): Name of project.
|
||||
task_type (Optional[str]): Type of task.
|
||||
task_name (Optional[str]): Name of task.
|
||||
product_type (str): Type of product.
|
||||
product_name (str): Name of product.
|
||||
project_settings(Dict[str, Any]): Prepared project settings.
|
||||
anatomy (Dict[str, Any])
|
||||
log (Optional[logging.Logger])
|
||||
|
||||
Returns:
|
||||
Dict or None: Data with directory template and is_persistent or None
|
||||
|
||||
Raises:
|
||||
KeyError - if misconfigured template should be used
|
||||
|
||||
"""
|
||||
settings = project_settings or get_project_settings(project_name)
|
||||
|
||||
staging_dir_profiles = settings["core"]["tools"]["publish"][
|
||||
"custom_staging_dir_profiles"
|
||||
]
|
||||
|
||||
if not staging_dir_profiles:
|
||||
return None
|
||||
|
||||
if not log:
|
||||
log = Logger.get_logger("get_staging_dir_config")
|
||||
|
||||
filtering_criteria = {
|
||||
"hosts": host_name,
|
||||
"task_types": task_type,
|
||||
"task_names": task_name,
|
||||
"product_types": product_type,
|
||||
"product_names": product_name,
|
||||
}
|
||||
profile = filter_profiles(
|
||||
staging_dir_profiles, filtering_criteria, logger=log)
|
||||
|
||||
if not profile or not profile["active"]:
|
||||
return None
|
||||
|
||||
if not anatomy:
|
||||
anatomy = Anatomy(project_name)
|
||||
|
||||
# get template from template name
|
||||
template_name = profile["template_name"]
|
||||
|
||||
template = anatomy.get_template_item("staging", template_name)
|
||||
|
||||
if not template:
|
||||
# template should always be found either from anatomy or from profile
|
||||
raise KeyError(
|
||||
f"Staging template '{template_name}' was not found."
|
||||
"Check project anatomy or settings at: "
|
||||
"'ayon+settings://core/tools/publish/custom_staging_dir_profiles'"
|
||||
)
|
||||
|
||||
data_persistence = profile["custom_staging_dir_persistent"]
|
||||
|
||||
return {"template": template, "persistence": data_persistence}
|
||||
|
||||
|
||||
def get_staging_dir_info(
|
||||
project_entity: Dict[str, Any],
|
||||
folder_entity: Optional[Dict[str, Any]],
|
||||
task_entity: Optional[Dict[str, Any]],
|
||||
product_type: str,
|
||||
product_name: str,
|
||||
host_name: str,
|
||||
anatomy: Optional[Anatomy] = None,
|
||||
project_settings: Optional[Dict[str, Any]] = None,
|
||||
template_data: Optional[Dict[str, Any]] = None,
|
||||
always_return_path: bool = True,
|
||||
force_tmp_dir: bool = False,
|
||||
logger: Optional[logging.Logger] = None,
|
||||
prefix: Optional[str] = None,
|
||||
suffix: Optional[str] = None,
|
||||
username: Optional[str] = None,
|
||||
) -> Optional[StagingDir]:
|
||||
"""Get staging dir info data.
|
||||
|
||||
If `force_temp` is set, staging dir will be created as tempdir.
|
||||
If `always_get_some_dir` is set, staging dir will be created as tempdir if
|
||||
no staging dir profile is found.
|
||||
If `prefix` or `suffix` is not set, default values will be used.
|
||||
|
||||
Arguments:
|
||||
project_entity (Dict[str, Any]): Project entity.
|
||||
folder_entity (Optional[Dict[str, Any]]): Folder entity.
|
||||
task_entity (Optional[Dict[str, Any]]): Task entity.
|
||||
product_type (str): Type of product.
|
||||
product_name (str): Name of product.
|
||||
host_name (str): Name of host.
|
||||
anatomy (Optional[Anatomy]): Anatomy object.
|
||||
project_settings (Optional[Dict[str, Any]]): Prepared project settings.
|
||||
template_data (Optional[Dict[str, Any]]): Additional data for
|
||||
formatting staging dir template.
|
||||
always_return_path (Optional[bool]): If True, staging dir will be
|
||||
created as tempdir if no staging dir profile is found. Input value
|
||||
False will return None if no staging dir profile is found.
|
||||
force_tmp_dir (Optional[bool]): If True, staging dir will be created as
|
||||
tempdir.
|
||||
logger (Optional[logging.Logger]): Logger instance.
|
||||
prefix (Optional[str]) Optional prefix for staging dir name.
|
||||
suffix (Optional[str]): Optional suffix for staging dir name.
|
||||
username (Optional[str]): AYON Username.
|
||||
|
||||
Returns:
|
||||
Optional[StagingDir]: Staging dir info data
|
||||
|
||||
"""
|
||||
log = logger or Logger.get_logger("get_staging_dir_info")
|
||||
|
||||
if anatomy is None:
|
||||
anatomy = Anatomy(
|
||||
project_entity["name"], project_entity=project_entity
|
||||
)
|
||||
|
||||
if force_tmp_dir:
|
||||
return StagingDir(
|
||||
get_temp_dir(
|
||||
project_name=project_entity["name"],
|
||||
anatomy=anatomy,
|
||||
prefix=prefix,
|
||||
suffix=suffix,
|
||||
),
|
||||
is_persistent=False,
|
||||
is_custom=False
|
||||
)
|
||||
|
||||
# making few queries to database
|
||||
ctx_data = get_template_data(
|
||||
project_entity, folder_entity, task_entity, host_name,
|
||||
settings=project_settings,
|
||||
username=username
|
||||
)
|
||||
|
||||
# add additional data
|
||||
ctx_data["product"] = {
|
||||
"type": product_type,
|
||||
"name": product_name
|
||||
}
|
||||
|
||||
# add additional template formatting data
|
||||
if template_data:
|
||||
ctx_data.update(template_data)
|
||||
|
||||
task_name = task_type = None
|
||||
if task_entity:
|
||||
task_name = task_entity["name"]
|
||||
task_type = task_entity["taskType"]
|
||||
|
||||
# get staging dir config
|
||||
staging_dir_config = get_staging_dir_config(
|
||||
project_entity["name"],
|
||||
task_type,
|
||||
task_name ,
|
||||
product_type,
|
||||
product_name,
|
||||
host_name,
|
||||
project_settings=project_settings,
|
||||
anatomy=anatomy,
|
||||
log=log,
|
||||
)
|
||||
|
||||
if staging_dir_config:
|
||||
dir_template = staging_dir_config["template"]["directory"]
|
||||
return StagingDir(
|
||||
dir_template.format_strict(ctx_data),
|
||||
is_persistent=staging_dir_config["persistence"],
|
||||
is_custom=True
|
||||
)
|
||||
|
||||
# no config found but force an output
|
||||
if always_return_path:
|
||||
return StagingDir(
|
||||
get_temp_dir(
|
||||
project_name=project_entity["name"],
|
||||
anatomy=anatomy,
|
||||
prefix=prefix,
|
||||
suffix=suffix,
|
||||
),
|
||||
is_persistent=False,
|
||||
is_custom=False
|
||||
)
|
||||
|
||||
return None
|
||||
|
|
@ -3,11 +3,90 @@ Temporary folder operations
|
|||
"""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
import warnings
|
||||
|
||||
from ayon_core.lib import StringTemplate
|
||||
from ayon_core.pipeline import Anatomy
|
||||
|
||||
|
||||
def get_temp_dir(
|
||||
project_name, anatomy=None, prefix=None, suffix=None, use_local_temp=False
|
||||
):
|
||||
"""Get temporary dir path.
|
||||
|
||||
If `use_local_temp` is set, tempdir will be created in local tempdir.
|
||||
If `anatomy` is not set, default anatomy will be used.
|
||||
If `prefix` or `suffix` is not set, default values will be used.
|
||||
|
||||
It also supports `AYON_TMPDIR`, so studio can define own temp
|
||||
shared repository per project or even per more granular context.
|
||||
Template formatting is supported also with optional keys. Folder is
|
||||
created in case it doesn't exists.
|
||||
|
||||
Args:
|
||||
project_name (str): Name of project.
|
||||
anatomy (Optional[Anatomy]): Project Anatomy object.
|
||||
suffix (Optional[str]): Suffix for tempdir.
|
||||
prefix (Optional[str]): Prefix for tempdir.
|
||||
use_local_temp (Optional[bool]): If True, temp dir will be created in
|
||||
local tempdir.
|
||||
|
||||
Returns:
|
||||
str: Path to staging dir of instance.
|
||||
|
||||
"""
|
||||
if prefix is None:
|
||||
prefix = "ay_tmp_"
|
||||
suffix = suffix or ""
|
||||
|
||||
if use_local_temp:
|
||||
return _create_local_staging_dir(prefix, suffix)
|
||||
|
||||
# make sure anatomy is set
|
||||
if not anatomy:
|
||||
anatomy = Anatomy(project_name)
|
||||
|
||||
# get customized tempdir path from `OPENPYPE_TMPDIR` env var
|
||||
custom_temp_dir = _create_custom_tempdir(anatomy.project_name, anatomy)
|
||||
|
||||
return _create_local_staging_dir(prefix, suffix, dirpath=custom_temp_dir)
|
||||
|
||||
|
||||
def _create_local_staging_dir(prefix, suffix, dirpath=None):
|
||||
"""Create local staging dir
|
||||
|
||||
Args:
|
||||
prefix (str): prefix for tempdir
|
||||
suffix (str): suffix for tempdir
|
||||
dirpath (Optional[str]): path to tempdir
|
||||
|
||||
Returns:
|
||||
str: path to tempdir
|
||||
"""
|
||||
# use pathlib for creating tempdir
|
||||
return tempfile.mkdtemp(
|
||||
prefix=prefix, suffix=suffix, dir=dirpath
|
||||
)
|
||||
|
||||
|
||||
def create_custom_tempdir(project_name, anatomy=None):
|
||||
"""Backward compatibility deprecated since 2024/12/09.
|
||||
"""
|
||||
warnings.warn(
|
||||
"Used deprecated 'create_custom_tempdir' "
|
||||
"use 'ayon_core.pipeline.tempdir.get_temp_dir' instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
if anatomy is None:
|
||||
anatomy = Anatomy(project_name)
|
||||
|
||||
return _create_custom_tempdir(project_name, anatomy)
|
||||
|
||||
|
||||
def _create_custom_tempdir(project_name, anatomy):
|
||||
""" Create custom tempdir
|
||||
|
||||
Template path formatting is supporting:
|
||||
|
|
@ -18,42 +97,35 @@ def create_custom_tempdir(project_name, anatomy=None):
|
|||
|
||||
Args:
|
||||
project_name (str): project name
|
||||
anatomy (ayon_core.pipeline.Anatomy)[optional]: Anatomy object
|
||||
anatomy (ayon_core.pipeline.Anatomy): Anatomy object
|
||||
|
||||
Returns:
|
||||
str | None: formatted path or None
|
||||
"""
|
||||
env_tmpdir = os.getenv("AYON_TMPDIR")
|
||||
if not env_tmpdir:
|
||||
return
|
||||
return None
|
||||
|
||||
custom_tempdir = None
|
||||
if "{" in env_tmpdir:
|
||||
if anatomy is None:
|
||||
anatomy = Anatomy(project_name)
|
||||
# create base formate data
|
||||
data = {
|
||||
template_data = {
|
||||
"root": anatomy.roots,
|
||||
"project": {
|
||||
"name": anatomy.project_name,
|
||||
"code": anatomy.project_code,
|
||||
}
|
||||
},
|
||||
}
|
||||
# path is anatomy template
|
||||
custom_tempdir = StringTemplate.format_template(
|
||||
env_tmpdir, data).normalized()
|
||||
env_tmpdir, template_data)
|
||||
|
||||
custom_tempdir_path = Path(custom_tempdir)
|
||||
|
||||
else:
|
||||
# path is absolute
|
||||
custom_tempdir = env_tmpdir
|
||||
custom_tempdir_path = Path(env_tmpdir)
|
||||
|
||||
# create the dir path if it doesn't exists
|
||||
if not os.path.exists(custom_tempdir):
|
||||
try:
|
||||
# create it if it doesn't exists
|
||||
os.makedirs(custom_tempdir)
|
||||
except IOError as error:
|
||||
raise IOError(
|
||||
"Path couldn't be created: {}".format(error))
|
||||
custom_tempdir_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
return custom_tempdir
|
||||
return custom_tempdir_path.as_posix()
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from ayon_core.settings import get_studio_settings
|
|||
from ayon_core.lib.local_settings import get_ayon_username
|
||||
|
||||
|
||||
def get_general_template_data(settings=None):
|
||||
def get_general_template_data(settings=None, username=None):
|
||||
"""General template data based on system settings or machine.
|
||||
|
||||
Output contains formatting keys:
|
||||
|
|
@ -14,17 +14,22 @@ def get_general_template_data(settings=None):
|
|||
|
||||
Args:
|
||||
settings (Dict[str, Any]): Studio or project settings.
|
||||
username (Optional[str]): AYON Username.
|
||||
"""
|
||||
|
||||
if not settings:
|
||||
settings = get_studio_settings()
|
||||
|
||||
if username is None:
|
||||
username = get_ayon_username()
|
||||
|
||||
core_settings = settings["core"]
|
||||
return {
|
||||
"studio": {
|
||||
"name": core_settings["studio_name"],
|
||||
"code": core_settings["studio_code"]
|
||||
},
|
||||
"user": get_ayon_username()
|
||||
"user": username
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -87,14 +92,13 @@ def get_folder_template_data(folder_entity, project_name):
|
|||
"""
|
||||
|
||||
path = folder_entity["path"]
|
||||
hierarchy_parts = path.split("/")
|
||||
# Remove empty string from the beginning
|
||||
hierarchy_parts.pop(0)
|
||||
# Remove empty string from the beginning and split by '/'
|
||||
parents = path.lstrip("/").split("/")
|
||||
# Remove last part which is folder name
|
||||
folder_name = hierarchy_parts.pop(-1)
|
||||
hierarchy = "/".join(hierarchy_parts)
|
||||
if hierarchy_parts:
|
||||
parent_name = hierarchy_parts[-1]
|
||||
folder_name = parents.pop(-1)
|
||||
hierarchy = "/".join(parents)
|
||||
if parents:
|
||||
parent_name = parents[-1]
|
||||
else:
|
||||
parent_name = project_name
|
||||
|
||||
|
|
@ -103,6 +107,7 @@ def get_folder_template_data(folder_entity, project_name):
|
|||
"name": folder_name,
|
||||
"type": folder_entity["folderType"],
|
||||
"path": path,
|
||||
"parents": parents,
|
||||
},
|
||||
"asset": folder_name,
|
||||
"hierarchy": hierarchy,
|
||||
|
|
@ -145,6 +150,7 @@ def get_template_data(
|
|||
task_entity=None,
|
||||
host_name=None,
|
||||
settings=None,
|
||||
username=None
|
||||
):
|
||||
"""Prepare data for templates filling from entered documents and info.
|
||||
|
||||
|
|
@ -167,12 +173,13 @@ def get_template_data(
|
|||
host_name (Optional[str]): Used to fill '{app}' key.
|
||||
settings (Union[Dict, None]): Prepared studio or project settings.
|
||||
They're queried if not passed (may be slower).
|
||||
username (Optional[str]): AYON Username.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Data prepared for filling workdir template.
|
||||
"""
|
||||
|
||||
template_data = get_general_template_data(settings)
|
||||
template_data = get_general_template_data(settings, username=username)
|
||||
template_data.update(get_project_template_data(project_entity))
|
||||
if folder_entity:
|
||||
template_data.update(get_folder_template_data(
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import re
|
||||
import copy
|
||||
import platform
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
import ayon_api
|
||||
|
||||
|
|
@ -16,12 +17,12 @@ from ayon_core.pipeline.template_data import get_template_data
|
|||
|
||||
|
||||
def get_workfile_template_key_from_context(
|
||||
project_name,
|
||||
folder_path,
|
||||
task_name,
|
||||
host_name,
|
||||
project_settings=None
|
||||
):
|
||||
project_name: str,
|
||||
folder_path: str,
|
||||
task_name: str,
|
||||
host_name: str,
|
||||
project_settings: Optional[Dict[str, Any]] = None,
|
||||
) -> str:
|
||||
"""Helper function to get template key for workfile template.
|
||||
|
||||
Do the same as `get_workfile_template_key` but returns value for "session
|
||||
|
|
@ -34,15 +35,23 @@ def get_workfile_template_key_from_context(
|
|||
host_name (str): Host name.
|
||||
project_settings (Dict[str, Any]): Project settings for passed
|
||||
'project_name'. Not required at all but makes function faster.
|
||||
"""
|
||||
|
||||
Returns:
|
||||
str: Workfile template name.
|
||||
|
||||
"""
|
||||
folder_entity = ayon_api.get_folder_by_path(
|
||||
project_name, folder_path, fields={"id"}
|
||||
project_name,
|
||||
folder_path,
|
||||
fields={"id"},
|
||||
)
|
||||
task_entity = ayon_api.get_task_by_name(
|
||||
project_name, folder_entity["id"], task_name
|
||||
project_name,
|
||||
folder_entity["id"],
|
||||
task_name,
|
||||
fields={"taskType"},
|
||||
)
|
||||
task_type = task_entity.get("type")
|
||||
task_type = task_entity.get("taskType")
|
||||
|
||||
return get_workfile_template_key(
|
||||
project_name, task_type, host_name, project_settings
|
||||
|
|
|
|||
|
|
@ -54,6 +54,7 @@ from ayon_core.pipeline.plugin_discover import (
|
|||
from ayon_core.pipeline.create import (
|
||||
discover_legacy_creator_plugins,
|
||||
CreateContext,
|
||||
HiddenCreator,
|
||||
)
|
||||
|
||||
_NOT_SET = object()
|
||||
|
|
@ -309,7 +310,13 @@ class AbstractTemplateBuilder(ABC):
|
|||
self._creators_by_name = creators_by_name
|
||||
|
||||
def _collect_creators(self):
|
||||
self._creators_by_name = dict(self.create_context.creators)
|
||||
self._creators_by_name = {
|
||||
identifier: creator
|
||||
for identifier, creator
|
||||
in self.create_context.manual_creators.items()
|
||||
# Do not list HiddenCreator even though it is a 'manual creator'
|
||||
if not isinstance(creator, HiddenCreator)
|
||||
}
|
||||
|
||||
def get_creators_by_name(self):
|
||||
if self._creators_by_name is None:
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ class OpenTaskPath(LauncherAction):
|
|||
if platform_name == "windows":
|
||||
args = ["start", path]
|
||||
elif platform_name == "darwin":
|
||||
args = ["open", "-na", path]
|
||||
args = ["open", "-R", path]
|
||||
elif platform_name == "linux":
|
||||
args = ["xdg-open", path]
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -116,11 +116,11 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
|||
|
||||
if not_found_folder_paths:
|
||||
joined_folder_paths = ", ".join(
|
||||
["\"{}\"".format(path) for path in not_found_folder_paths]
|
||||
[f"\"{path}\"" for path in not_found_folder_paths]
|
||||
)
|
||||
self.log.warning(
|
||||
f"Not found folder entities with paths {joined_folder_paths}."
|
||||
)
|
||||
self.log.warning((
|
||||
"Not found folder entities with paths \"{}\"."
|
||||
).format(joined_folder_paths))
|
||||
|
||||
def fill_missing_task_entities(self, context, project_name):
|
||||
self.log.debug("Querying task entities for instances.")
|
||||
|
|
@ -413,14 +413,16 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
|||
# Backwards compatible (Deprecated since 24/06/06)
|
||||
or instance.data.get("newAssetPublishing")
|
||||
):
|
||||
hierarchy = instance.data["hierarchy"]
|
||||
anatomy_data["hierarchy"] = hierarchy
|
||||
folder_path = instance.data["folderPath"]
|
||||
parents = folder_path.lstrip("/").split("/")
|
||||
folder_name = parents.pop(-1)
|
||||
|
||||
parent_name = project_entity["name"]
|
||||
if hierarchy:
|
||||
parent_name = hierarchy.split("/")[-1]
|
||||
hierarchy = ""
|
||||
if parents:
|
||||
parent_name = parents[-1]
|
||||
hierarchy = "/".join(parents)
|
||||
|
||||
folder_name = instance.data["folderPath"].split("/")[-1]
|
||||
anatomy_data.update({
|
||||
"asset": folder_name,
|
||||
"hierarchy": hierarchy,
|
||||
|
|
@ -432,6 +434,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
|||
# Using 'Shot' is current default behavior of editorial
|
||||
# (or 'newHierarchyIntegration') publishing.
|
||||
"type": "Shot",
|
||||
"parents": parents,
|
||||
},
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -1,76 +0,0 @@
|
|||
"""
|
||||
Requires:
|
||||
anatomy
|
||||
|
||||
|
||||
Provides:
|
||||
instance.data -> stagingDir (folder path)
|
||||
-> stagingDir_persistent (bool)
|
||||
"""
|
||||
import copy
|
||||
import os.path
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline.publish.lib import get_custom_staging_dir_info
|
||||
|
||||
|
||||
class CollectCustomStagingDir(pyblish.api.InstancePlugin):
|
||||
"""Looks through profiles if stagingDir should be persistent and in special
|
||||
location.
|
||||
|
||||
Transient staging dir could be useful in specific use cases where is
|
||||
desirable to have temporary renders in specific, persistent folders, could
|
||||
be on disks optimized for speed for example.
|
||||
|
||||
It is studio responsibility to clean up obsolete folders with data.
|
||||
|
||||
Location of the folder is configured in `project_anatomy/templates/others`.
|
||||
('transient' key is expected, with 'folder' key)
|
||||
|
||||
Which family/task type/product is applicable is configured in:
|
||||
`project_settings/global/tools/publish/custom_staging_dir_profiles`
|
||||
|
||||
"""
|
||||
label = "Collect Custom Staging Directory"
|
||||
order = pyblish.api.CollectorOrder + 0.4990
|
||||
|
||||
template_key = "transient"
|
||||
|
||||
def process(self, instance):
|
||||
product_type = instance.data["productType"]
|
||||
product_name = instance.data["productName"]
|
||||
host_name = instance.context.data["hostName"]
|
||||
project_name = instance.context.data["projectName"]
|
||||
project_settings = instance.context.data["project_settings"]
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
task = instance.data["anatomyData"].get("task", {})
|
||||
|
||||
transient_tml, is_persistent = get_custom_staging_dir_info(
|
||||
project_name,
|
||||
host_name,
|
||||
product_type,
|
||||
product_name,
|
||||
task.get("name"),
|
||||
task.get("type"),
|
||||
project_settings=project_settings,
|
||||
anatomy=anatomy,
|
||||
log=self.log)
|
||||
|
||||
if transient_tml:
|
||||
anatomy_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
anatomy_data["root"] = anatomy.roots
|
||||
scene_name = instance.context.data.get("currentFile")
|
||||
if scene_name:
|
||||
anatomy_data["scene_name"] = os.path.basename(scene_name)
|
||||
transient_dir = transient_tml.format(**anatomy_data)
|
||||
instance.data["stagingDir"] = transient_dir
|
||||
|
||||
instance.data["stagingDir_persistent"] = is_persistent
|
||||
result_str = "Adding '{}' as".format(transient_dir)
|
||||
else:
|
||||
result_str = "Not adding"
|
||||
|
||||
self.log.debug("{} custom staging dir for instance with '{}'".format(
|
||||
result_str, product_type
|
||||
))
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.lib import get_ayon_username
|
||||
from ayon_core.pipeline.publish import FARM_JOB_ENV_DATA_KEY
|
||||
|
||||
|
||||
class CollectCoreJobEnvVars(pyblish.api.ContextPlugin):
|
||||
"""Collect set of environment variables to submit with deadline jobs"""
|
||||
order = pyblish.api.CollectorOrder - 0.45
|
||||
label = "AYON core Farm Environment Variables"
|
||||
targets = ["local"]
|
||||
|
||||
def process(self, context):
|
||||
env = context.data.setdefault(FARM_JOB_ENV_DATA_KEY, {})
|
||||
|
||||
# Disable colored logs on farm
|
||||
for key, value in (
|
||||
("AYON_LOG_NO_COLORS", "1"),
|
||||
("AYON_PROJECT_NAME", context.data["projectName"]),
|
||||
("AYON_FOLDER_PATH", context.data.get("folderPath")),
|
||||
("AYON_TASK_NAME", context.data.get("task")),
|
||||
# NOTE we should use 'context.data["user"]' but that has higher
|
||||
# order.
|
||||
("AYON_USERNAME", get_ayon_username()),
|
||||
("AYON_HOST_NAME", context.data["hostName"]),
|
||||
):
|
||||
if value:
|
||||
self.log.debug(f"Setting job env: {key}: {value}")
|
||||
env[key] = value
|
||||
|
||||
for key in [
|
||||
"AYON_BUNDLE_NAME",
|
||||
"AYON_USE_STAGING",
|
||||
"AYON_IN_TESTS",
|
||||
# NOTE Not sure why workdir is needed?
|
||||
"AYON_WORKDIR",
|
||||
# DEPRECATED remove when deadline stops using it (added in 1.1.2)
|
||||
"AYON_DEFAULT_SETTINGS_VARIANT",
|
||||
]:
|
||||
value = os.getenv(key)
|
||||
if value:
|
||||
self.log.debug(f"Setting job env: {key}: {value}")
|
||||
env[key] = value
|
||||
|
||||
|
|
@ -13,8 +13,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
|
|||
|
||||
label = "Collect Hierarchy"
|
||||
order = pyblish.api.CollectorOrder - 0.076
|
||||
families = ["shot"]
|
||||
hosts = ["resolve", "hiero", "flame"]
|
||||
hosts = ["resolve", "hiero", "flame", "traypublisher"]
|
||||
|
||||
def process(self, context):
|
||||
project_name = context.data["projectName"]
|
||||
|
|
@ -32,36 +31,50 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
|
|||
product_type = instance.data["productType"]
|
||||
families = instance.data["families"]
|
||||
|
||||
# exclude other families then self.families with intersection
|
||||
if not set(self.families).intersection(
|
||||
set(families + [product_type])
|
||||
):
|
||||
# exclude other families then "shot" with intersection
|
||||
if "shot" not in (families + [product_type]):
|
||||
self.log.debug("Skipping not a shot: {}".format(families))
|
||||
continue
|
||||
|
||||
# exclude if not masterLayer True
|
||||
# Skip if is not a hero track
|
||||
if not instance.data.get("heroTrack"):
|
||||
self.log.debug("Skipping not a shot from hero track")
|
||||
continue
|
||||
|
||||
shot_data = {
|
||||
"entity_type": "folder",
|
||||
# WARNING Default folder type is hardcoded
|
||||
# suppose that all instances are Shots
|
||||
"folder_type": "Shot",
|
||||
# WARNING unless overwritten, default folder type is hardcoded
|
||||
# to shot
|
||||
"folder_type": instance.data.get("folder_type") or "Shot",
|
||||
"tasks": instance.data.get("tasks") or {},
|
||||
"comments": instance.data.get("comments", []),
|
||||
"attributes": {
|
||||
"handleStart": instance.data["handleStart"],
|
||||
"handleEnd": instance.data["handleEnd"],
|
||||
"frameStart": instance.data["frameStart"],
|
||||
"frameEnd": instance.data["frameEnd"],
|
||||
"clipIn": instance.data["clipIn"],
|
||||
"clipOut": instance.data["clipOut"],
|
||||
"fps": instance.data["fps"],
|
||||
"resolutionWidth": instance.data["resolutionWidth"],
|
||||
"resolutionHeight": instance.data["resolutionHeight"],
|
||||
"pixelAspect": instance.data["pixelAspect"],
|
||||
},
|
||||
}
|
||||
|
||||
shot_data["attributes"] = {}
|
||||
SHOT_ATTRS = (
|
||||
"handleStart",
|
||||
"handleEnd",
|
||||
"frameStart",
|
||||
"frameEnd",
|
||||
"clipIn",
|
||||
"clipOut",
|
||||
"fps",
|
||||
"resolutionWidth",
|
||||
"resolutionHeight",
|
||||
"pixelAspect",
|
||||
)
|
||||
for shot_attr in SHOT_ATTRS:
|
||||
attr_value = instance.data.get(shot_attr)
|
||||
if attr_value is None:
|
||||
# Shot attribute might not be defined (e.g. CSV ingest)
|
||||
self.log.debug(
|
||||
"%s shot attribute is not defined for instance.",
|
||||
shot_attr
|
||||
)
|
||||
continue
|
||||
|
||||
shot_data["attributes"][shot_attr] = attr_value
|
||||
|
||||
# Split by '/' for AYON where asset is a path
|
||||
name = instance.data["folderPath"].split("/")[-1]
|
||||
actual = {name: shot_data}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,47 @@
|
|||
"""
|
||||
Requires:
|
||||
anatomy
|
||||
|
||||
|
||||
Provides:
|
||||
instance.data -> stagingDir (folder path)
|
||||
-> stagingDir_persistent (bool)
|
||||
"""
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline.publish import get_instance_staging_dir
|
||||
|
||||
|
||||
class CollectManagedStagingDir(pyblish.api.InstancePlugin):
|
||||
"""Apply matching Staging Dir profile to a instance.
|
||||
|
||||
Apply Staging dir via profiles could be useful in specific use cases
|
||||
where is desirable to have temporary renders in specific,
|
||||
persistent folders, could be on disks optimized for speed for example.
|
||||
|
||||
It is studio's responsibility to clean up obsolete folders with data.
|
||||
|
||||
Location of the folder is configured in:
|
||||
`ayon+anatomy://_/templates/staging`.
|
||||
|
||||
Which family/task type/subset is applicable is configured in:
|
||||
`ayon+settings://core/tools/publish/custom_staging_dir_profiles`
|
||||
"""
|
||||
|
||||
label = "Collect Managed Staging Directory"
|
||||
order = pyblish.api.CollectorOrder + 0.4990
|
||||
|
||||
def process(self, instance):
|
||||
""" Collect the staging data and stores it to the instance.
|
||||
|
||||
Args:
|
||||
instance (object): The instance to inspect.
|
||||
"""
|
||||
staging_dir_path = get_instance_staging_dir(instance)
|
||||
persistance = instance.data.get("stagingDir_persistent", False)
|
||||
|
||||
self.log.info((
|
||||
f"Instance staging dir was set to `{staging_dir_path}` "
|
||||
f"and persistence is set to `{persistance}`"
|
||||
))
|
||||
|
|
@ -1,74 +1,120 @@
|
|||
"""Plugin for collecting OTIO frame ranges and related timing information.
|
||||
|
||||
This module contains a unified plugin that handles:
|
||||
- Basic timeline frame ranges
|
||||
- Source media frame ranges
|
||||
- Retimed clip frame ranges
|
||||
"""
|
||||
Requires:
|
||||
otioTimeline -> context data attribute
|
||||
review -> instance data attribute
|
||||
masterLayer -> instance data attribute
|
||||
otioClipRange -> instance data attribute
|
||||
"""
|
||||
|
||||
from pprint import pformat
|
||||
|
||||
import opentimelineio as otio
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline.editorial import (
|
||||
get_media_range_with_retimes,
|
||||
otio_range_to_frame_range,
|
||||
otio_range_with_handles,
|
||||
)
|
||||
|
||||
|
||||
class CollectOtioFrameRanges(pyblish.api.InstancePlugin):
|
||||
"""Getting otio ranges from otio_clip
|
||||
def validate_otio_clip(instance, logger):
|
||||
"""Validate if instance has required OTIO clip data.
|
||||
|
||||
Adding timeline and source ranges to instance data"""
|
||||
Args:
|
||||
instance: The instance to validate
|
||||
logger: Logger object to use for debug messages
|
||||
|
||||
label = "Collect OTIO Frame Ranges"
|
||||
Returns:
|
||||
bool: True if valid, False otherwise
|
||||
"""
|
||||
if not instance.data.get("otioClip"):
|
||||
logger.debug("Skipping collect OTIO range - no clip found.")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class CollectOtioRanges(pyblish.api.InstancePlugin):
|
||||
"""Collect all OTIO-related frame ranges and timing information.
|
||||
|
||||
This plugin handles collection of:
|
||||
- Basic timeline frame ranges with handles
|
||||
- Source media frame ranges with handles
|
||||
- Retimed clip frame ranges
|
||||
|
||||
Requires:
|
||||
otioClip (otio.schema.Clip): OTIO clip object
|
||||
workfileFrameStart (int): Starting frame of work file
|
||||
|
||||
Optional:
|
||||
shotDurationFromSource (int): Duration from source if retimed
|
||||
|
||||
Provides:
|
||||
frameStart (int): Start frame in timeline
|
||||
frameEnd (int): End frame in timeline
|
||||
clipIn (int): Clip in point
|
||||
clipOut (int): Clip out point
|
||||
clipInH (int): Clip in point with handles
|
||||
clipOutH (int): Clip out point with handles
|
||||
sourceStart (int): Source media start frame
|
||||
sourceEnd (int): Source media end frame
|
||||
sourceStartH (int): Source media start frame with handles
|
||||
sourceEndH (int): Source media end frame with handles
|
||||
"""
|
||||
|
||||
label = "Collect OTIO Ranges"
|
||||
order = pyblish.api.CollectorOrder - 0.08
|
||||
families = ["shot", "clip"]
|
||||
hosts = ["resolve", "hiero", "flame", "traypublisher"]
|
||||
|
||||
def process(self, instance):
|
||||
# Not all hosts can import these modules.
|
||||
import opentimelineio as otio
|
||||
from ayon_core.pipeline.editorial import (
|
||||
get_media_range_with_retimes,
|
||||
otio_range_to_frame_range,
|
||||
otio_range_with_handles
|
||||
"""Process the instance to collect all frame ranges.
|
||||
|
||||
Args:
|
||||
instance: The instance to process
|
||||
"""
|
||||
if not validate_otio_clip(instance, self.log):
|
||||
return
|
||||
|
||||
otio_clip = instance.data["otioClip"]
|
||||
|
||||
# Collect timeline ranges if workfile start frame is available
|
||||
if "workfileFrameStart" in instance.data:
|
||||
self._collect_timeline_ranges(instance, otio_clip)
|
||||
|
||||
# Traypublisher Simple or Advanced editorial publishing is
|
||||
# working with otio clips which are having no available range
|
||||
# because they are not having any media references.
|
||||
try:
|
||||
otio_clip.available_range()
|
||||
has_available_range = True
|
||||
except otio._otio.CannotComputeAvailableRangeError:
|
||||
self.log.info("Clip has no available range")
|
||||
has_available_range = False
|
||||
|
||||
# Collect source ranges if clip has available range
|
||||
if has_available_range:
|
||||
self._collect_source_ranges(instance, otio_clip)
|
||||
|
||||
# Handle retimed ranges if source duration is available
|
||||
if "shotDurationFromSource" in instance.data:
|
||||
self._collect_retimed_ranges(instance, otio_clip)
|
||||
|
||||
def _collect_timeline_ranges(self, instance, otio_clip):
|
||||
"""Collect basic timeline frame ranges."""
|
||||
workfile_start = instance.data["workfileFrameStart"]
|
||||
|
||||
# Get timeline ranges
|
||||
otio_tl_range = otio_clip.range_in_parent()
|
||||
otio_tl_range_handles = otio_range_with_handles(
|
||||
otio_tl_range,
|
||||
instance
|
||||
)
|
||||
|
||||
# get basic variables
|
||||
otio_clip = instance.data["otioClip"]
|
||||
workfile_start = instance.data["workfileFrameStart"]
|
||||
workfile_source_duration = instance.data.get("shotDurationFromSource")
|
||||
# Convert to frames
|
||||
tl_start, tl_end = otio_range_to_frame_range(otio_tl_range)
|
||||
tl_start_h, tl_end_h = otio_range_to_frame_range(otio_tl_range_handles)
|
||||
|
||||
# get ranges
|
||||
otio_tl_range = otio_clip.range_in_parent()
|
||||
otio_src_range = otio_clip.source_range
|
||||
otio_avalable_range = otio_clip.available_range()
|
||||
otio_tl_range_handles = otio_range_with_handles(
|
||||
otio_tl_range, instance)
|
||||
otio_src_range_handles = otio_range_with_handles(
|
||||
otio_src_range, instance)
|
||||
|
||||
# get source avalable start frame
|
||||
src_starting_from = otio.opentime.to_frames(
|
||||
otio_avalable_range.start_time,
|
||||
otio_avalable_range.start_time.rate)
|
||||
|
||||
# convert to frames
|
||||
range_convert = otio_range_to_frame_range
|
||||
tl_start, tl_end = range_convert(otio_tl_range)
|
||||
tl_start_h, tl_end_h = range_convert(otio_tl_range_handles)
|
||||
src_start, src_end = range_convert(otio_src_range)
|
||||
src_start_h, src_end_h = range_convert(otio_src_range_handles)
|
||||
frame_start = workfile_start
|
||||
frame_end = frame_start + otio.opentime.to_frames(
|
||||
otio_tl_range.duration, otio_tl_range.duration.rate) - 1
|
||||
|
||||
# in case of retimed clip and frame range should not be retimed
|
||||
if workfile_source_duration:
|
||||
# get available range trimmed with processed retimes
|
||||
retimed_attributes = get_media_range_with_retimes(
|
||||
otio_clip, 0, 0)
|
||||
self.log.debug(
|
||||
">> retimed_attributes: {}".format(retimed_attributes))
|
||||
media_in = int(retimed_attributes["mediaIn"])
|
||||
media_out = int(retimed_attributes["mediaOut"])
|
||||
frame_end = frame_start + (media_out - media_in) + 1
|
||||
self.log.debug(frame_end)
|
||||
frame_end = frame_start + otio_tl_range.duration.to_frames() - 1
|
||||
|
||||
data = {
|
||||
"frameStart": frame_start,
|
||||
|
|
@ -77,13 +123,77 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin):
|
|||
"clipOut": tl_end - 1,
|
||||
"clipInH": tl_start_h,
|
||||
"clipOutH": tl_end_h - 1,
|
||||
"sourceStart": src_starting_from + src_start,
|
||||
"sourceEnd": src_starting_from + src_end - 1,
|
||||
"sourceStartH": src_starting_from + src_start_h,
|
||||
"sourceEndH": src_starting_from + src_end_h - 1,
|
||||
}
|
||||
instance.data.update(data)
|
||||
self.log.debug(
|
||||
"_ data: {}".format(pformat(data)))
|
||||
self.log.debug(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
self.log.debug(f"Added frame ranges: {pformat(data)}")
|
||||
|
||||
def _collect_source_ranges(self, instance, otio_clip):
|
||||
"""Collect source media frame ranges."""
|
||||
# Get source ranges
|
||||
otio_src_range = otio_clip.source_range
|
||||
otio_available_range = otio_clip.available_range()
|
||||
|
||||
# Backward-compatibility for Hiero OTIO exporter.
|
||||
# NTSC compatibility might introduce floating rates, when these are
|
||||
# not exactly the same (23.976 vs 23.976024627685547)
|
||||
# this will cause precision issue in computation.
|
||||
# Currently round to 2 decimals for comparison,
|
||||
# but this should always rescale after that.
|
||||
rounded_av_rate = round(otio_available_range.start_time.rate, 2)
|
||||
rounded_src_rate = round(otio_src_range.start_time.rate, 2)
|
||||
if rounded_av_rate != rounded_src_rate:
|
||||
conformed_src_in = otio_src_range.start_time.rescaled_to(
|
||||
otio_available_range.start_time.rate
|
||||
)
|
||||
conformed_src_duration = otio_src_range.duration.rescaled_to(
|
||||
otio_available_range.duration.rate
|
||||
)
|
||||
conformed_source_range = otio.opentime.TimeRange(
|
||||
start_time=conformed_src_in,
|
||||
duration=conformed_src_duration
|
||||
)
|
||||
else:
|
||||
conformed_source_range = otio_src_range
|
||||
|
||||
source_start = conformed_source_range.start_time
|
||||
source_end = source_start + conformed_source_range.duration
|
||||
handle_start = otio.opentime.RationalTime(
|
||||
instance.data.get("handleStart", 0),
|
||||
source_start.rate
|
||||
)
|
||||
handle_end = otio.opentime.RationalTime(
|
||||
instance.data.get("handleEnd", 0),
|
||||
source_start.rate
|
||||
)
|
||||
source_start_h = source_start - handle_start
|
||||
source_end_h = source_end + handle_end
|
||||
data = {
|
||||
"sourceStart": source_start.to_frames(),
|
||||
"sourceEnd": source_end.to_frames() - 1,
|
||||
"sourceStartH": source_start_h.to_frames(),
|
||||
"sourceEndH": source_end_h.to_frames() - 1,
|
||||
}
|
||||
instance.data.update(data)
|
||||
self.log.debug(f"Added source ranges: {pformat(data)}")
|
||||
|
||||
def _collect_retimed_ranges(self, instance, otio_clip):
|
||||
"""Handle retimed clip frame ranges."""
|
||||
retimed_attributes = get_media_range_with_retimes(otio_clip, 0, 0)
|
||||
self.log.debug(f"Retimed attributes: {retimed_attributes}")
|
||||
|
||||
frame_start = instance.data["frameStart"]
|
||||
media_in = int(retimed_attributes["mediaIn"])
|
||||
media_out = int(retimed_attributes["mediaOut"])
|
||||
frame_end = frame_start + (media_out - media_in)
|
||||
|
||||
data = {
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"sourceStart": media_in,
|
||||
"sourceEnd": media_out,
|
||||
"sourceStartH": media_in - int(retimed_attributes["handleStart"]),
|
||||
"sourceEndH": media_out + int(retimed_attributes["handleEnd"]),
|
||||
}
|
||||
|
||||
instance.data.update(data)
|
||||
self.log.debug(f"Updated retimed values: {data}")
|
||||
|
|
|
|||
|
|
@ -36,6 +36,16 @@ class CollectOtioReview(pyblish.api.InstancePlugin):
|
|||
# optionally get `reviewTrack`
|
||||
review_track_name = instance.data.get("reviewTrack")
|
||||
|
||||
# [clip_media] setting:
|
||||
# Extract current clip source range as reviewable.
|
||||
# Flag review content from otio_clip.
|
||||
if not review_track_name and "review" in instance.data["families"]:
|
||||
otio_review_clips = [otio_clip]
|
||||
|
||||
# skip if no review track available
|
||||
elif not review_track_name:
|
||||
return
|
||||
|
||||
# generate range in parent
|
||||
otio_tl_range = otio_clip.range_in_parent()
|
||||
|
||||
|
|
@ -43,12 +53,14 @@ class CollectOtioReview(pyblish.api.InstancePlugin):
|
|||
clip_frame_end = int(
|
||||
otio_tl_range.start_time.value + otio_tl_range.duration.value)
|
||||
|
||||
# skip if no review track available
|
||||
if not review_track_name:
|
||||
return
|
||||
|
||||
# loop all tracks and match with name in `reviewTrack`
|
||||
for track in otio_timeline.tracks:
|
||||
|
||||
# No review track defined, skip the loop
|
||||
if review_track_name is None:
|
||||
break
|
||||
|
||||
# Not current review track, skip it.
|
||||
if review_track_name != track.name:
|
||||
continue
|
||||
|
||||
|
|
@ -95,9 +107,42 @@ class CollectOtioReview(pyblish.api.InstancePlugin):
|
|||
instance.data["label"] = label + " (review)"
|
||||
instance.data["families"] += ["review", "ftrack"]
|
||||
instance.data["otioReviewClips"] = otio_review_clips
|
||||
|
||||
self.log.info(
|
||||
"Creating review track: {}".format(otio_review_clips))
|
||||
|
||||
# get colorspace from metadata if available
|
||||
# get metadata from first clip with media reference
|
||||
r_otio_cl = next(
|
||||
(
|
||||
clip
|
||||
for clip in otio_review_clips
|
||||
if (
|
||||
isinstance(clip, otio.schema.Clip)
|
||||
and clip.media_reference
|
||||
)
|
||||
),
|
||||
None
|
||||
)
|
||||
if r_otio_cl is not None:
|
||||
media_ref = r_otio_cl.media_reference
|
||||
media_metadata = media_ref.metadata
|
||||
|
||||
# TODO: we might need some alternative method since
|
||||
# native OTIO exports do not support ayon metadata
|
||||
review_colorspace = media_metadata.get(
|
||||
"ayon.source.colorspace"
|
||||
)
|
||||
if review_colorspace is None:
|
||||
# Backwards compatibility for older scenes
|
||||
review_colorspace = media_metadata.get(
|
||||
"openpype.source.colourtransform"
|
||||
)
|
||||
if review_colorspace:
|
||||
instance.data["reviewColorspace"] = review_colorspace
|
||||
self.log.info(
|
||||
"Review colorspace: {}".format(review_colorspace))
|
||||
|
||||
self.log.debug(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
self.log.debug(
|
||||
|
|
|
|||
|
|
@ -6,16 +6,21 @@ Provides:
|
|||
instance -> otioReviewClips
|
||||
"""
|
||||
import os
|
||||
import math
|
||||
|
||||
import clique
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_core.pipeline.publish import (
|
||||
get_publish_template_name
|
||||
)
|
||||
|
||||
|
||||
class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
||||
class CollectOtioSubsetResources(
|
||||
pyblish.api.InstancePlugin,
|
||||
publish.ColormanagedPyblishPluginMixin
|
||||
):
|
||||
"""Get Resources for a product version"""
|
||||
|
||||
label = "Collect OTIO Subset Resources"
|
||||
|
|
@ -65,9 +70,17 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
self.log.debug(
|
||||
">> retimed_attributes: {}".format(retimed_attributes))
|
||||
|
||||
# break down into variables
|
||||
media_in = int(retimed_attributes["mediaIn"])
|
||||
media_out = int(retimed_attributes["mediaOut"])
|
||||
# break down into variables as rounded frame numbers
|
||||
#
|
||||
# 0 1 2 3 4
|
||||
# |-------------|---------------|--------------|-------------|
|
||||
# |_______________media range_______________|
|
||||
# 0.6 3.2
|
||||
#
|
||||
# As rounded frames, media_in = 0 and media_out = 4
|
||||
media_in = math.floor(retimed_attributes["mediaIn"])
|
||||
media_out = math.ceil(retimed_attributes["mediaOut"])
|
||||
|
||||
handle_start = int(retimed_attributes["handleStart"])
|
||||
handle_end = int(retimed_attributes["handleEnd"])
|
||||
|
||||
|
|
@ -169,9 +182,18 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
path, trimmed_media_range_h, metadata)
|
||||
self.staging_dir, collection = collection_data
|
||||
|
||||
self.log.debug(collection)
|
||||
repre = self._create_representation(
|
||||
frame_start, frame_end, collection=collection)
|
||||
if len(collection.indexes) > 1:
|
||||
self.log.debug(collection)
|
||||
repre = self._create_representation(
|
||||
frame_start, frame_end, collection=collection)
|
||||
else:
|
||||
filename = tuple(collection)[0]
|
||||
self.log.debug(filename)
|
||||
|
||||
# TODO: discuss this, it erases frame number.
|
||||
repre = self._create_representation(
|
||||
frame_start, frame_end, file=filename)
|
||||
|
||||
|
||||
else:
|
||||
_trim = False
|
||||
|
|
@ -187,12 +209,18 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
repre = self._create_representation(
|
||||
frame_start, frame_end, file=filename, trim=_trim)
|
||||
|
||||
|
||||
instance.data["originalDirname"] = self.staging_dir
|
||||
|
||||
# add representation to instance data
|
||||
if repre:
|
||||
# add representation to instance data
|
||||
colorspace = instance.data.get("colorspace")
|
||||
# add colorspace data to representation
|
||||
self.set_representation_colorspace(
|
||||
repre, instance.context, colorspace)
|
||||
|
||||
instance.data["representations"].append(repre)
|
||||
self.log.debug(">>>>>>>> {}".format(repre))
|
||||
|
||||
|
||||
self.log.debug(instance.data)
|
||||
|
||||
|
|
@ -213,7 +241,8 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
representation_data = {
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"stagingDir": self.staging_dir
|
||||
"stagingDir": self.staging_dir,
|
||||
"tags": [],
|
||||
}
|
||||
|
||||
if kwargs.get("collection"):
|
||||
|
|
@ -239,8 +268,10 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
"frameEnd": end,
|
||||
})
|
||||
|
||||
if kwargs.get("trim") is True:
|
||||
representation_data["tags"] = ["trim"]
|
||||
for tag_name in ("trim", "delete", "review"):
|
||||
if kwargs.get(tag_name) is True:
|
||||
representation_data["tags"].append(tag_name)
|
||||
|
||||
return representation_data
|
||||
|
||||
def get_template_name(self, instance):
|
||||
|
|
|
|||
|
|
@ -93,8 +93,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
|
|||
|
||||
# now we can just add instances from json file and we are done
|
||||
any_staging_dir_persistent = False
|
||||
for instance_data in data.get("instances"):
|
||||
|
||||
for instance_data in data["instances"]:
|
||||
self.log.debug(" - processing instance for {}".format(
|
||||
instance_data.get("productName")))
|
||||
instance = self._context.create_instance(
|
||||
|
|
@ -105,7 +104,11 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
|
|||
instance.data.update(instance_data)
|
||||
|
||||
# stash render job id for later validation
|
||||
instance.data["render_job_id"] = data.get("job").get("_id")
|
||||
instance.data["publishJobMetadata"] = data
|
||||
# TODO remove 'render_job_id' here and rather use
|
||||
# 'publishJobMetadata' where is needed.
|
||||
# - this is deadline specific
|
||||
instance.data["render_job_id"] = data.get("job", {}).get("_id")
|
||||
staging_dir_persistent = instance.data.get(
|
||||
"stagingDir_persistent", False
|
||||
)
|
||||
|
|
|
|||
|
|
@ -66,7 +66,8 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
|
|||
"yeticacheUE",
|
||||
"tycache",
|
||||
"usd",
|
||||
"oxrig"
|
||||
"oxrig",
|
||||
"sbsar",
|
||||
]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -14,23 +14,7 @@ class CollectSceneVersion(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.CollectorOrder
|
||||
label = 'Collect Scene Version'
|
||||
# configurable in Settings
|
||||
hosts = [
|
||||
"aftereffects",
|
||||
"blender",
|
||||
"celaction",
|
||||
"fusion",
|
||||
"harmony",
|
||||
"hiero",
|
||||
"houdini",
|
||||
"maya",
|
||||
"max",
|
||||
"nuke",
|
||||
"photoshop",
|
||||
"resolve",
|
||||
"tvpaint",
|
||||
"motionbuilder",
|
||||
"substancepainter"
|
||||
]
|
||||
hosts = ["*"]
|
||||
|
||||
# in some cases of headless publishing (for example webpublisher using PS)
|
||||
# you want to ignore version from name and let integrate use next version
|
||||
|
|
|
|||
|
|
@ -9,11 +9,13 @@ import clique
|
|||
import pyblish.api
|
||||
|
||||
from ayon_core import resources, AYON_CORE_ROOT
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_core.pipeline import (
|
||||
publish,
|
||||
get_temp_dir
|
||||
)
|
||||
from ayon_core.lib import (
|
||||
run_ayon_launcher_process,
|
||||
|
||||
get_transcode_temp_directory,
|
||||
convert_input_paths_for_ffmpeg,
|
||||
should_convert_for_ffmpeg
|
||||
)
|
||||
|
|
@ -250,7 +252,10 @@ class ExtractBurnin(publish.Extractor):
|
|||
# - change staging dir of source representation
|
||||
# - must be set back after output definitions processing
|
||||
if do_convert:
|
||||
new_staging_dir = get_transcode_temp_directory()
|
||||
new_staging_dir = get_temp_dir(
|
||||
project_name=instance.context.data["projectName"],
|
||||
use_local_temp=True,
|
||||
)
|
||||
repre["stagingDir"] = new_staging_dir
|
||||
|
||||
convert_input_paths_for_ffmpeg(
|
||||
|
|
|
|||
|
|
@ -3,16 +3,16 @@ import copy
|
|||
import clique
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_core.pipeline import (
|
||||
publish,
|
||||
get_temp_dir
|
||||
)
|
||||
from ayon_core.lib import (
|
||||
|
||||
is_oiio_supported,
|
||||
)
|
||||
|
||||
from ayon_core.lib.transcoding import (
|
||||
UnknownRGBAChannelsError,
|
||||
convert_colorspace,
|
||||
get_transcode_temp_directory,
|
||||
)
|
||||
|
||||
from ayon_core.lib.profiles_filtering import filter_profiles
|
||||
|
|
@ -117,7 +117,10 @@ class ExtractOIIOTranscode(publish.Extractor):
|
|||
new_repre = copy.deepcopy(repre)
|
||||
|
||||
original_staging_dir = new_repre["stagingDir"]
|
||||
new_staging_dir = get_transcode_temp_directory()
|
||||
new_staging_dir = get_temp_dir(
|
||||
project_name=instance.context.data["projectName"],
|
||||
use_local_temp=True,
|
||||
)
|
||||
new_repre["stagingDir"] = new_staging_dir
|
||||
|
||||
output_extension = output_def["extension"]
|
||||
|
|
@ -128,6 +131,7 @@ class ExtractOIIOTranscode(publish.Extractor):
|
|||
output_extension)
|
||||
|
||||
transcoding_type = output_def["transcoding_type"]
|
||||
|
||||
target_colorspace = view = display = None
|
||||
# NOTE: we use colorspace_data as the fallback values for
|
||||
# the target colorspace.
|
||||
|
|
@ -159,8 +163,12 @@ class ExtractOIIOTranscode(publish.Extractor):
|
|||
additional_command_args = (output_def["oiiotool_args"]
|
||||
["additional_command_args"])
|
||||
|
||||
files_to_convert = self._translate_to_sequence(
|
||||
files_to_convert)
|
||||
self.log.debug("Files to convert: {}".format(files_to_convert))
|
||||
unknown_rgba_channels = False
|
||||
for file_name in files_to_convert:
|
||||
self.log.debug("Transcoding file: `{}`".format(file_name))
|
||||
input_path = os.path.join(original_staging_dir,
|
||||
file_name)
|
||||
output_path = self._get_output_file_path(input_path,
|
||||
|
|
@ -281,7 +289,7 @@ class ExtractOIIOTranscode(publish.Extractor):
|
|||
(list) of [file.1001-1010#.exr] or [fileA.exr, fileB.exr]
|
||||
"""
|
||||
pattern = [clique.PATTERNS["frames"]]
|
||||
collections, remainder = clique.assemble(
|
||||
collections, _ = clique.assemble(
|
||||
files_to_convert, patterns=pattern,
|
||||
assume_padded_when_ambiguous=True)
|
||||
|
||||
|
|
@ -292,6 +300,9 @@ class ExtractOIIOTranscode(publish.Extractor):
|
|||
|
||||
collection = collections[0]
|
||||
frames = list(collection.indexes)
|
||||
if collection.holes():
|
||||
return files_to_convert
|
||||
|
||||
frame_str = "{}-{}#".format(frames[0], frames[-1])
|
||||
file_name = "{}{}{}".format(collection.head, frame_str,
|
||||
collection.tail)
|
||||
|
|
|
|||
|
|
@ -37,6 +37,9 @@ class ExtractColorspaceData(publish.Extractor,
|
|||
# get colorspace settings
|
||||
context = instance.context
|
||||
|
||||
# colorspace name could be kept in instance.data
|
||||
colorspace = instance.data.get("colorspace")
|
||||
|
||||
# loop representations
|
||||
for representation in representations:
|
||||
# skip if colorspaceData is already at representation
|
||||
|
|
@ -44,5 +47,4 @@ class ExtractColorspaceData(publish.Extractor,
|
|||
continue
|
||||
|
||||
self.set_representation_colorspace(
|
||||
representation, context
|
||||
)
|
||||
representation, context, colorspace)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ class ExtractHierarchyToAYON(pyblish.api.ContextPlugin):
|
|||
|
||||
order = pyblish.api.ExtractorOrder - 0.01
|
||||
label = "Extract Hierarchy To AYON"
|
||||
families = ["clip", "shot"]
|
||||
|
||||
def process(self, context):
|
||||
if not context.data.get("hierarchyContext"):
|
||||
|
|
|
|||
|
|
@ -71,20 +71,18 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
|
|||
name = inst.data["folderPath"]
|
||||
|
||||
recycling_file = [f for f in created_files if name in f]
|
||||
|
||||
# frameranges
|
||||
timeline_in_h = inst.data["clipInH"]
|
||||
timeline_out_h = inst.data["clipOutH"]
|
||||
fps = inst.data["fps"]
|
||||
|
||||
# create duration
|
||||
duration = (timeline_out_h - timeline_in_h) + 1
|
||||
audio_clip = inst.data["otioClip"]
|
||||
audio_range = audio_clip.range_in_parent()
|
||||
duration = audio_range.duration.to_frames()
|
||||
|
||||
# ffmpeg generate new file only if doesn't exists already
|
||||
if not recycling_file:
|
||||
# convert to seconds
|
||||
start_sec = float(timeline_in_h / fps)
|
||||
duration_sec = float(duration / fps)
|
||||
parent_track = audio_clip.parent()
|
||||
parent_track_start = parent_track.range_in_parent().start_time
|
||||
relative_start_time = (
|
||||
audio_range.start_time - parent_track_start)
|
||||
start_sec = relative_start_time.to_seconds()
|
||||
duration_sec = audio_range.duration.to_seconds()
|
||||
|
||||
# temp audio file
|
||||
audio_fpath = self.create_temp_file(name)
|
||||
|
|
@ -163,34 +161,36 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
|
|||
|
||||
output = []
|
||||
# go trough all audio tracks
|
||||
for otio_track in otio_timeline.tracks:
|
||||
if "Audio" not in otio_track.kind:
|
||||
continue
|
||||
for otio_track in otio_timeline.audio_tracks():
|
||||
self.log.debug("_" * 50)
|
||||
playhead = 0
|
||||
for otio_clip in otio_track:
|
||||
self.log.debug(otio_clip)
|
||||
if isinstance(otio_clip, otio.schema.Gap):
|
||||
playhead += otio_clip.source_range.duration.value
|
||||
elif isinstance(otio_clip, otio.schema.Clip):
|
||||
start = otio_clip.source_range.start_time.value
|
||||
duration = otio_clip.source_range.duration.value
|
||||
fps = otio_clip.source_range.start_time.rate
|
||||
if (isinstance(otio_clip, otio.schema.Clip) and
|
||||
not otio_clip.media_reference.is_missing_reference):
|
||||
media_av_start = otio_clip.available_range().start_time
|
||||
clip_start = otio_clip.source_range.start_time
|
||||
fps = clip_start.rate
|
||||
conformed_av_start = media_av_start.rescaled_to(fps)
|
||||
# ffmpeg ignores embedded tc
|
||||
start = clip_start - conformed_av_start
|
||||
duration = otio_clip.source_range.duration
|
||||
media_path = otio_clip.media_reference.target_url
|
||||
input = {
|
||||
"mediaPath": media_path,
|
||||
"delayFrame": playhead,
|
||||
"startFrame": start,
|
||||
"durationFrame": duration,
|
||||
"startFrame": start.to_frames(),
|
||||
"durationFrame": duration.to_frames(),
|
||||
"delayMilSec": int(float(playhead / fps) * 1000),
|
||||
"startSec": float(start / fps),
|
||||
"durationSec": float(duration / fps),
|
||||
"fps": fps
|
||||
"startSec": start.to_seconds(),
|
||||
"durationSec": duration.to_seconds(),
|
||||
"fps": float(fps)
|
||||
}
|
||||
if input not in output:
|
||||
output.append(input)
|
||||
self.log.debug("__ input: {}".format(input))
|
||||
playhead += otio_clip.source_range.duration.value
|
||||
|
||||
playhead += otio_clip.source_range.duration.value
|
||||
|
||||
return output
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,10 @@ from ayon_core.lib import (
|
|||
from ayon_core.pipeline import publish
|
||||
|
||||
|
||||
class ExtractOTIOReview(publish.Extractor):
|
||||
class ExtractOTIOReview(
|
||||
publish.Extractor,
|
||||
publish.ColormanagedPyblishPluginMixin
|
||||
):
|
||||
"""
|
||||
Extract OTIO timeline into one concuted image sequence file.
|
||||
|
||||
|
|
@ -68,17 +71,24 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
# TODO: convert resulting image sequence to mp4
|
||||
|
||||
# get otio clip and other time info from instance clip
|
||||
otio_review_clips = instance.data.get("otioReviewClips")
|
||||
|
||||
if otio_review_clips is None:
|
||||
self.log.info(f"Instance `{instance}` has no otioReviewClips")
|
||||
return
|
||||
|
||||
# TODO: what if handles are different in `versionData`?
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
otio_review_clips = instance.data["otioReviewClips"]
|
||||
|
||||
# add plugin wide attributes
|
||||
self.representation_files = []
|
||||
self.used_frames = []
|
||||
self.workfile_start = int(instance.data.get(
|
||||
"workfileFrameStart", 1001)) - handle_start
|
||||
self.padding = len(str(self.workfile_start))
|
||||
# NOTE: padding has to be converted from
|
||||
# end frame since start could be lower then 1000
|
||||
self.padding = len(str(instance.data.get("frameEnd", 1001)))
|
||||
self.used_frames.append(self.workfile_start)
|
||||
self.to_width = instance.data.get(
|
||||
"resolutionWidth") or self.to_width
|
||||
|
|
@ -86,8 +96,10 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
"resolutionHeight") or self.to_height
|
||||
|
||||
# skip instance if no reviewable data available
|
||||
if (not isinstance(otio_review_clips[0], otio.schema.Clip)) \
|
||||
and (len(otio_review_clips) == 1):
|
||||
if (
|
||||
not isinstance(otio_review_clips[0], otio.schema.Clip)
|
||||
and len(otio_review_clips) == 1
|
||||
):
|
||||
self.log.warning(
|
||||
"Instance `{}` has nothing to process".format(instance))
|
||||
return
|
||||
|
|
@ -119,26 +131,32 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
res_data[key] = value
|
||||
break
|
||||
|
||||
self.to_width, self.to_height = res_data["width"], res_data["height"]
|
||||
self.log.debug("> self.to_width x self.to_height: {} x {}".format(
|
||||
self.to_width, self.to_height
|
||||
))
|
||||
self.to_width, self.to_height = (
|
||||
res_data["width"], res_data["height"]
|
||||
)
|
||||
self.log.debug(
|
||||
"> self.to_width x self.to_height:"
|
||||
f" {self.to_width} x {self.to_height}"
|
||||
)
|
||||
|
||||
available_range = r_otio_cl.available_range()
|
||||
available_range_start_frame = (
|
||||
available_range.start_time.to_frames()
|
||||
)
|
||||
processing_range = None
|
||||
self.actual_fps = available_range.duration.rate
|
||||
start = src_range.start_time.rescaled_to(self.actual_fps)
|
||||
duration = src_range.duration.rescaled_to(self.actual_fps)
|
||||
|
||||
# Temporary.
|
||||
# Some AYON custom OTIO exporter were implemented with relative
|
||||
# source range for image sequence. Following code maintain
|
||||
# backward-compatibility by adjusting available range
|
||||
# Some AYON custom OTIO exporter were implemented with
|
||||
# relative source range for image sequence. Following code
|
||||
# maintain backward-compatibility by adjusting available range
|
||||
# while we are updating those.
|
||||
if (
|
||||
is_clip_from_media_sequence(r_otio_cl)
|
||||
and available_range.start_time.to_frames() == media_ref.start_frame
|
||||
and src_range.start_time.to_frames() < media_ref.start_frame
|
||||
and available_range_start_frame == media_ref.start_frame
|
||||
and start.to_frames() < media_ref.start_frame
|
||||
):
|
||||
available_range = otio.opentime.TimeRange(
|
||||
otio.opentime.RationalTime(0, rate=self.actual_fps),
|
||||
|
|
@ -168,7 +186,7 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
start -= clip_handle_start
|
||||
duration += clip_handle_start
|
||||
elif len(otio_review_clips) > 1 \
|
||||
and (index == len(otio_review_clips) - 1):
|
||||
and (index == len(otio_review_clips) - 1):
|
||||
# more clips | last clip reframing with handle
|
||||
duration += clip_handle_end
|
||||
elif len(otio_review_clips) == 1:
|
||||
|
|
@ -190,13 +208,9 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
# File sequence way
|
||||
if is_sequence:
|
||||
# Remap processing range to input file sequence.
|
||||
processing_range_as_frames = (
|
||||
processing_range.start_time.to_frames(),
|
||||
processing_range.end_time_inclusive().to_frames()
|
||||
)
|
||||
first, last = remap_range_on_file_sequence(
|
||||
r_otio_cl,
|
||||
processing_range_as_frames,
|
||||
processing_range,
|
||||
)
|
||||
input_fps = processing_range.start_time.rate
|
||||
|
||||
|
|
@ -236,7 +250,8 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
# Extraction via FFmpeg.
|
||||
else:
|
||||
path = media_ref.target_url
|
||||
# Set extract range from 0 (FFmpeg ignores embedded timecode).
|
||||
# Set extract range from 0 (FFmpeg ignores
|
||||
# embedded timecode).
|
||||
extract_range = otio.opentime.TimeRange(
|
||||
otio.opentime.RationalTime(
|
||||
(
|
||||
|
|
@ -263,8 +278,15 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
|
||||
# creating and registering representation
|
||||
representation = self._create_representation(start, duration)
|
||||
|
||||
# add colorspace data to representation
|
||||
if colorspace := instance.data.get("reviewColorspace"):
|
||||
self.set_representation_colorspace(
|
||||
representation, instance.context, colorspace
|
||||
)
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info("Adding representation: {}".format(representation))
|
||||
self.log.debug("Adding representation: {}".format(representation))
|
||||
|
||||
def _create_representation(self, start, duration):
|
||||
"""
|
||||
|
|
@ -298,6 +320,9 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
end = max(collection.indexes)
|
||||
|
||||
files = [f for f in collection]
|
||||
# single frame sequence
|
||||
if len(files) == 1:
|
||||
files = files[0]
|
||||
ext = collection.format("{tail}")
|
||||
representation_data.update({
|
||||
"name": ext[1:],
|
||||
|
|
@ -397,7 +422,8 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
to defined image sequence format.
|
||||
|
||||
Args:
|
||||
sequence (list): input dir path string, collection object, fps in list
|
||||
sequence (list): input dir path string, collection object,
|
||||
fps in list.
|
||||
video (list)[optional]: video_path string, otio_range in list
|
||||
gap (int)[optional]: gap duration
|
||||
end_offset (int)[optional]: offset gap frame start in frames
|
||||
|
|
|
|||
|
|
@ -22,8 +22,8 @@ from ayon_core.lib.transcoding import (
|
|||
should_convert_for_ffmpeg,
|
||||
get_review_layer_name,
|
||||
convert_input_paths_for_ffmpeg,
|
||||
get_transcode_temp_directory,
|
||||
)
|
||||
from ayon_core.pipeline import get_temp_dir
|
||||
from ayon_core.pipeline.publish import (
|
||||
KnownPublishError,
|
||||
get_publish_instance_label,
|
||||
|
|
@ -310,7 +310,10 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
# - change staging dir of source representation
|
||||
# - must be set back after output definitions processing
|
||||
if do_convert:
|
||||
new_staging_dir = get_transcode_temp_directory()
|
||||
new_staging_dir = get_temp_dir(
|
||||
project_name=instance.context.data["projectName"],
|
||||
use_local_temp=True,
|
||||
)
|
||||
repre["stagingDir"] = new_staging_dir
|
||||
|
||||
convert_input_paths_for_ffmpeg(
|
||||
|
|
|
|||
|
|
@ -35,9 +35,11 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
|
|||
"resolve",
|
||||
"traypublisher",
|
||||
"substancepainter",
|
||||
"substancedesigner",
|
||||
"nuke",
|
||||
"aftereffects",
|
||||
"unreal"
|
||||
"unreal",
|
||||
"houdini"
|
||||
]
|
||||
enabled = False
|
||||
|
||||
|
|
@ -341,8 +343,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
|
|||
# to be published locally
|
||||
continue
|
||||
|
||||
valid = "review" in tags or "thumb-nuke" in tags
|
||||
if not valid:
|
||||
if "review" not in tags:
|
||||
continue
|
||||
|
||||
if not repre.get("files"):
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from operator import attrgetter
|
||||
import dataclasses
|
||||
import os
|
||||
from typing import Dict
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import pyblish.api
|
||||
try:
|
||||
|
|
@ -14,7 +14,8 @@ from ayon_core.lib import (
|
|||
BoolDef,
|
||||
UISeparatorDef,
|
||||
UILabelDef,
|
||||
EnumDef
|
||||
EnumDef,
|
||||
filter_profiles
|
||||
)
|
||||
try:
|
||||
from ayon_core.pipeline.usdlib import (
|
||||
|
|
@ -281,6 +282,9 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
|||
"fx": 500,
|
||||
"lighting": 600,
|
||||
}
|
||||
# Default profiles to set certain instance attribute defaults based on
|
||||
# profiles in settings
|
||||
profiles: List[Dict[str, Any]] = []
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings):
|
||||
|
|
@ -298,6 +302,8 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
|||
if contribution_layers:
|
||||
cls.contribution_layers = contribution_layers
|
||||
|
||||
cls.profiles = plugin_settings.get("profiles", [])
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
|
|
@ -463,6 +469,29 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
|||
if not cls.instance_matches_plugin_families(instance):
|
||||
return []
|
||||
|
||||
# Set default target layer based on product type
|
||||
current_context_task_type = create_context.get_current_task_type()
|
||||
profile = filter_profiles(cls.profiles, {
|
||||
"product_types": instance.data["productType"],
|
||||
"task_types": current_context_task_type
|
||||
})
|
||||
if not profile:
|
||||
profile = {}
|
||||
|
||||
# Define defaults
|
||||
default_enabled = profile.get("contribution_enabled", True)
|
||||
default_contribution_layer = profile.get(
|
||||
"contribution_layer", None)
|
||||
default_apply_as_variant = profile.get(
|
||||
"contribution_apply_as_variant", False)
|
||||
default_target_product = profile.get(
|
||||
"contribution_target_product", "usdAsset")
|
||||
default_init_as = (
|
||||
"asset"
|
||||
if profile.get("contribution_target_product") == "usdAsset"
|
||||
else "shot")
|
||||
init_as_visible = False
|
||||
|
||||
# Attributes logic
|
||||
publish_attributes = instance["publish_attributes"].get(
|
||||
cls.__name__, {})
|
||||
|
|
@ -485,7 +514,7 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
|||
"In both cases the USD data itself is free to have "
|
||||
"references and sublayers of its own."
|
||||
),
|
||||
default=True),
|
||||
default=default_enabled),
|
||||
TextDef("contribution_target_product",
|
||||
label="Target product",
|
||||
tooltip=(
|
||||
|
|
@ -495,7 +524,7 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
|||
"the contribution itself will be added to the "
|
||||
"department layer."
|
||||
),
|
||||
default="usdAsset",
|
||||
default=default_target_product,
|
||||
visible=visible),
|
||||
EnumDef("contribution_target_product_init",
|
||||
label="Initialize as",
|
||||
|
|
@ -507,8 +536,8 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
|||
"setting will do nothing."
|
||||
),
|
||||
items=["asset", "shot"],
|
||||
default="asset",
|
||||
visible=visible),
|
||||
default=default_init_as,
|
||||
visible=visible and init_as_visible),
|
||||
|
||||
# Asset layer, e.g. model.usd, look.usd, rig.usd
|
||||
EnumDef("contribution_layer",
|
||||
|
|
@ -520,7 +549,7 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
|||
"the list) will contribute as a stronger opinion."
|
||||
),
|
||||
items=list(cls.contribution_layers.keys()),
|
||||
default="model",
|
||||
default=default_contribution_layer,
|
||||
visible=visible),
|
||||
BoolDef("contribution_apply_as_variant",
|
||||
label="Add as variant",
|
||||
|
|
@ -532,7 +561,7 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
|||
"appended to as a sublayer to the department layer "
|
||||
"instead."
|
||||
),
|
||||
default=True,
|
||||
default=default_apply_as_variant,
|
||||
visible=visible),
|
||||
TextDef("contribution_variant_set_name",
|
||||
label="Variant Set Name",
|
||||
|
|
@ -588,31 +617,6 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
|||
instance.set_publish_plugin_attr_defs(cls.__name__, new_attrs)
|
||||
|
||||
|
||||
class CollectUSDLayerContributionsHoudiniLook(CollectUSDLayerContributions):
|
||||
"""
|
||||
This is solely here to expose the attribute definitions for the
|
||||
Houdini "look" family.
|
||||
"""
|
||||
# TODO: Improve how this is built for the look family
|
||||
hosts = ["houdini"]
|
||||
families = ["look"]
|
||||
label = CollectUSDLayerContributions.label + " (Look)"
|
||||
|
||||
@classmethod
|
||||
def get_attr_defs_for_instance(cls, create_context, instance):
|
||||
# Filtering of instance, if needed, can be customized
|
||||
if not cls.instance_matches_plugin_families(instance):
|
||||
return []
|
||||
|
||||
defs = super().get_attr_defs_for_instance(create_context, instance)
|
||||
|
||||
# Update default for department layer to look
|
||||
layer_def = next(d for d in defs if d.key == "contribution_layer")
|
||||
layer_def.default = "look"
|
||||
|
||||
return defs
|
||||
|
||||
|
||||
class ValidateUSDDependencies(pyblish.api.InstancePlugin):
|
||||
families = ["usdLayer"]
|
||||
|
||||
|
|
|
|||
|
|
@ -706,7 +706,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
# In case source are published in place we need to
|
||||
# skip renumbering
|
||||
repre_frame_start = repre.get("frameStart")
|
||||
if repre_frame_start is not None:
|
||||
explicit_frames = instance.data.get("hasExplicitFrames", False)
|
||||
if not explicit_frames and repre_frame_start is not None:
|
||||
index_frame_start = int(repre_frame_start)
|
||||
# Shift destination sequence to the start frame
|
||||
destination_indexes = [
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ class ValidateCurrentSaveFile(pyblish.api.ContextPlugin):
|
|||
label = "Validate File Saved"
|
||||
order = pyblish.api.ValidatorOrder - 0.1
|
||||
hosts = ["fusion", "houdini", "max", "maya", "nuke", "substancepainter",
|
||||
"cinema4d"]
|
||||
"cinema4d", "silhouette"]
|
||||
actions = [SaveByVersionUpAction, ShowWorkfilesAction]
|
||||
|
||||
def process(self, context):
|
||||
|
|
|
|||
|
|
@ -11,8 +11,8 @@ class ValidateProductUniqueness(pyblish.api.ContextPlugin):
|
|||
"""Validate all product names are unique.
|
||||
|
||||
This only validates whether the instances currently set to publish from
|
||||
the workfile overlap one another for the folder + product they are publishing
|
||||
to.
|
||||
the workfile overlap one another for the folder + product they are
|
||||
publishing to.
|
||||
|
||||
This does not perform any check against existing publishes in the database
|
||||
since it is allowed to publish into existing products resulting in
|
||||
|
|
@ -72,8 +72,10 @@ class ValidateProductUniqueness(pyblish.api.ContextPlugin):
|
|||
# All is ok
|
||||
return
|
||||
|
||||
msg = ("Instance product names {} are not unique. ".format(non_unique) +
|
||||
"Please remove or rename duplicates.")
|
||||
msg = (
|
||||
f"Instance product names {non_unique} are not unique."
|
||||
" Please remove or rename duplicates."
|
||||
)
|
||||
formatting_data = {
|
||||
"non_unique": ",".join(non_unique)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -79,7 +79,8 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
- Datatypes explanation:
|
||||
<color> string format must be supported by FFmpeg.
|
||||
Examples: "#000000", "0x000000", "black"
|
||||
<font> must be accesible by ffmpeg = name of registered Font in system or path to font file.
|
||||
<font> must be accesible by ffmpeg = name of registered Font in system
|
||||
or path to font file.
|
||||
Examples: "Arial", "C:/Windows/Fonts/arial.ttf"
|
||||
|
||||
- Possible keys:
|
||||
|
|
@ -87,17 +88,21 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
"bg_opacity" - Opacity of background (box around text) - <float, Range:0-1>
|
||||
"bg_color" - Background color - <color>
|
||||
"bg_padding" - Background padding in pixels - <int>
|
||||
"x_offset" - offsets burnin vertically by entered pixels from border - <int>
|
||||
"y_offset" - offsets burnin horizontally by entered pixels from border - <int>
|
||||
"x_offset" - offsets burnin vertically by entered pixels
|
||||
from border - <int>
|
||||
"y_offset" - offsets burnin horizontally by entered pixels
|
||||
from border - <int>
|
||||
- x_offset & y_offset should be set at least to same value as bg_padding!!
|
||||
"font" - Font Family for text - <font>
|
||||
"font_size" - Font size in pixels - <int>
|
||||
"font_color" - Color of text - <color>
|
||||
"frame_offset" - Default start frame - <int>
|
||||
- required IF start frame is not set when using frames or timecode burnins
|
||||
- required IF start frame is not set when using frames
|
||||
or timecode burnins
|
||||
|
||||
On initializing class can be set General options through "options_init" arg.
|
||||
General can be overridden when adding burnin
|
||||
On initializing class can be set General options through
|
||||
"options_init" arg.
|
||||
General options can be overridden when adding burnin.
|
||||
|
||||
'''
|
||||
TOP_CENTERED = ffmpeg_burnins.TOP_CENTERED
|
||||
|
|
|
|||
|
|
@ -190,6 +190,7 @@ def get_current_project_settings():
|
|||
project_name = os.environ.get("AYON_PROJECT_NAME")
|
||||
if not project_name:
|
||||
raise ValueError(
|
||||
"Missing context project in environemt variable `AYON_PROJECT_NAME`."
|
||||
"Missing context project in environment"
|
||||
" variable `AYON_PROJECT_NAME`."
|
||||
)
|
||||
return get_project_settings(project_name)
|
||||
|
|
|
|||
|
|
@ -23,6 +23,9 @@ Enabled vs Disabled logic in most of stylesheets
|
|||
font-family: "Noto Sans";
|
||||
font-weight: 450;
|
||||
outline: none;
|
||||
|
||||
/* Define icon size to fix size issues for most of DCCs */
|
||||
icon-size: 16px;
|
||||
}
|
||||
|
||||
QWidget {
|
||||
|
|
@ -44,10 +47,6 @@ QLabel {
|
|||
background: transparent;
|
||||
}
|
||||
|
||||
QLabel[overriden="1"] {
|
||||
color: {color:font-overridden};
|
||||
}
|
||||
|
||||
/* Inputs */
|
||||
QAbstractSpinBox, QLineEdit, QPlainTextEdit, QTextEdit {
|
||||
border: 1px solid {color:border};
|
||||
|
|
@ -1172,6 +1171,8 @@ ValidationArtistMessage QLabel {
|
|||
|
||||
#PublishLogMessage {
|
||||
font-family: "Noto Sans Mono";
|
||||
border: none;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
#PublishInstanceLogsLabel {
|
||||
|
|
@ -1589,6 +1590,10 @@ CreateNextPageOverlay {
|
|||
}
|
||||
|
||||
/* Attribute Definition widgets */
|
||||
AttributeDefinitionsLabel[overridden="1"] {
|
||||
color: {color:font-overridden};
|
||||
}
|
||||
|
||||
AttributeDefinitionsWidget QAbstractSpinBox, QLineEdit, QPlainTextEdit, QTextEdit {
|
||||
padding: 1px;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
from .widgets import (
|
||||
create_widget_for_attr_def,
|
||||
AttributeDefinitionsWidget,
|
||||
AttributeDefinitionsLabel,
|
||||
)
|
||||
|
||||
from .dialog import (
|
||||
|
|
@ -11,6 +12,7 @@ from .dialog import (
|
|||
__all__ = (
|
||||
"create_widget_for_attr_def",
|
||||
"AttributeDefinitionsWidget",
|
||||
"AttributeDefinitionsLabel",
|
||||
|
||||
"AttributeDefinitionsDialog",
|
||||
)
|
||||
|
|
|
|||
1
client/ayon_core/tools/attribute_defs/_constants.py
Normal file
1
client/ayon_core/tools/attribute_defs/_constants.py
Normal file
|
|
@ -0,0 +1 @@
|
|||
REVERT_TO_DEFAULT_LABEL = "Revert to default"
|
||||
|
|
@ -17,6 +17,8 @@ from ayon_core.tools.utils import (
|
|||
PixmapLabel
|
||||
)
|
||||
|
||||
from ._constants import REVERT_TO_DEFAULT_LABEL
|
||||
|
||||
ITEM_ID_ROLE = QtCore.Qt.UserRole + 1
|
||||
ITEM_LABEL_ROLE = QtCore.Qt.UserRole + 2
|
||||
ITEM_ICON_ROLE = QtCore.Qt.UserRole + 3
|
||||
|
|
@ -252,7 +254,7 @@ class FilesModel(QtGui.QStandardItemModel):
|
|||
"""Make sure that removed items are removed from items mapping.
|
||||
|
||||
Connected with '_on_insert'. When user drag item and drop it to same
|
||||
view the item is actually removed and creted again but it happens in
|
||||
view the item is actually removed and created again but it happens in
|
||||
inner calls of Qt.
|
||||
"""
|
||||
|
||||
|
|
@ -598,7 +600,7 @@ class FilesView(QtWidgets.QListView):
|
|||
"""View showing instances and their groups."""
|
||||
|
||||
remove_requested = QtCore.Signal()
|
||||
context_menu_requested = QtCore.Signal(QtCore.QPoint)
|
||||
context_menu_requested = QtCore.Signal(QtCore.QPoint, bool)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(FilesView, self).__init__(*args, **kwargs)
|
||||
|
|
@ -690,9 +692,8 @@ class FilesView(QtWidgets.QListView):
|
|||
|
||||
def _on_context_menu_request(self, pos):
|
||||
index = self.indexAt(pos)
|
||||
if index.isValid():
|
||||
point = self.viewport().mapToGlobal(pos)
|
||||
self.context_menu_requested.emit(point)
|
||||
point = self.viewport().mapToGlobal(pos)
|
||||
self.context_menu_requested.emit(point, index.isValid())
|
||||
|
||||
def _on_selection_change(self):
|
||||
self._remove_btn.setEnabled(self.has_selected_item_ids())
|
||||
|
|
@ -721,27 +722,34 @@ class FilesView(QtWidgets.QListView):
|
|||
|
||||
class FilesWidget(QtWidgets.QFrame):
|
||||
value_changed = QtCore.Signal()
|
||||
revert_requested = QtCore.Signal()
|
||||
|
||||
def __init__(self, single_item, allow_sequences, extensions_label, parent):
|
||||
super(FilesWidget, self).__init__(parent)
|
||||
super().__init__(parent)
|
||||
self.setAcceptDrops(True)
|
||||
|
||||
wrapper_widget = QtWidgets.QWidget(self)
|
||||
|
||||
empty_widget = DropEmpty(
|
||||
single_item, allow_sequences, extensions_label, self
|
||||
single_item, allow_sequences, extensions_label, wrapper_widget
|
||||
)
|
||||
|
||||
files_model = FilesModel(single_item, allow_sequences)
|
||||
files_proxy_model = FilesProxyModel()
|
||||
files_proxy_model.setSourceModel(files_model)
|
||||
files_view = FilesView(self)
|
||||
files_view = FilesView(wrapper_widget)
|
||||
files_view.setModel(files_proxy_model)
|
||||
|
||||
layout = QtWidgets.QStackedLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
layout.setStackingMode(QtWidgets.QStackedLayout.StackAll)
|
||||
layout.addWidget(empty_widget)
|
||||
layout.addWidget(files_view)
|
||||
layout.setCurrentWidget(empty_widget)
|
||||
wrapper_layout = QtWidgets.QStackedLayout(wrapper_widget)
|
||||
wrapper_layout.setContentsMargins(0, 0, 0, 0)
|
||||
wrapper_layout.setStackingMode(QtWidgets.QStackedLayout.StackAll)
|
||||
wrapper_layout.addWidget(empty_widget)
|
||||
wrapper_layout.addWidget(files_view)
|
||||
wrapper_layout.setCurrentWidget(empty_widget)
|
||||
|
||||
main_layout = QtWidgets.QHBoxLayout(self)
|
||||
main_layout.setContentsMargins(0, 0, 0, 0)
|
||||
main_layout.addWidget(wrapper_widget, 1)
|
||||
|
||||
files_proxy_model.rowsInserted.connect(self._on_rows_inserted)
|
||||
files_proxy_model.rowsRemoved.connect(self._on_rows_removed)
|
||||
|
|
@ -761,7 +769,11 @@ class FilesWidget(QtWidgets.QFrame):
|
|||
|
||||
self._widgets_by_id = {}
|
||||
|
||||
self._layout = layout
|
||||
self._wrapper_widget = wrapper_widget
|
||||
self._wrapper_layout = wrapper_layout
|
||||
|
||||
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
|
||||
self.customContextMenuRequested.connect(self._on_context_menu)
|
||||
|
||||
def _set_multivalue(self, multivalue):
|
||||
if self._multivalue is multivalue:
|
||||
|
|
@ -770,7 +782,7 @@ class FilesWidget(QtWidgets.QFrame):
|
|||
self._files_view.set_multivalue(multivalue)
|
||||
self._files_model.set_multivalue(multivalue)
|
||||
self._files_proxy_model.set_multivalue(multivalue)
|
||||
self.setEnabled(not multivalue)
|
||||
self._wrapper_widget.setEnabled(not multivalue)
|
||||
|
||||
def set_value(self, value, multivalue):
|
||||
self._in_set_value = True
|
||||
|
|
@ -829,7 +841,7 @@ class FilesWidget(QtWidgets.QFrame):
|
|||
self._multivalue
|
||||
)
|
||||
widget.context_menu_requested.connect(
|
||||
self._on_context_menu_requested
|
||||
self._on_item_context_menu_request
|
||||
)
|
||||
self._files_view.setIndexWidget(index, widget)
|
||||
self._files_proxy_model.setData(
|
||||
|
|
@ -847,7 +859,7 @@ class FilesWidget(QtWidgets.QFrame):
|
|||
for row in range(self._files_proxy_model.rowCount()):
|
||||
index = self._files_proxy_model.index(row, 0)
|
||||
item_id = index.data(ITEM_ID_ROLE)
|
||||
available_item_ids.add(index.data(ITEM_ID_ROLE))
|
||||
available_item_ids.add(item_id)
|
||||
|
||||
widget_ids = set(self._widgets_by_id.keys())
|
||||
for item_id in available_item_ids:
|
||||
|
|
@ -888,22 +900,31 @@ class FilesWidget(QtWidgets.QFrame):
|
|||
if items_to_delete:
|
||||
self._remove_item_by_ids(items_to_delete)
|
||||
|
||||
def _on_context_menu_requested(self, pos):
|
||||
if self._multivalue:
|
||||
return
|
||||
def _on_context_menu(self, pos):
|
||||
self._on_context_menu_requested(pos, False)
|
||||
|
||||
def _on_context_menu_requested(self, pos, valid_index):
|
||||
menu = QtWidgets.QMenu(self._files_view)
|
||||
if valid_index and not self._multivalue:
|
||||
if self._files_view.has_selected_sequence():
|
||||
split_action = QtWidgets.QAction("Split sequence", menu)
|
||||
split_action.triggered.connect(self._on_split_request)
|
||||
menu.addAction(split_action)
|
||||
|
||||
if self._files_view.has_selected_sequence():
|
||||
split_action = QtWidgets.QAction("Split sequence", menu)
|
||||
split_action.triggered.connect(self._on_split_request)
|
||||
menu.addAction(split_action)
|
||||
remove_action = QtWidgets.QAction("Remove", menu)
|
||||
remove_action.triggered.connect(self._on_remove_requested)
|
||||
menu.addAction(remove_action)
|
||||
|
||||
remove_action = QtWidgets.QAction("Remove", menu)
|
||||
remove_action.triggered.connect(self._on_remove_requested)
|
||||
menu.addAction(remove_action)
|
||||
if not valid_index:
|
||||
revert_action = QtWidgets.QAction(REVERT_TO_DEFAULT_LABEL, menu)
|
||||
revert_action.triggered.connect(self.revert_requested)
|
||||
menu.addAction(revert_action)
|
||||
|
||||
menu.popup(pos)
|
||||
if menu.actions():
|
||||
menu.popup(pos)
|
||||
|
||||
def _on_item_context_menu_request(self, pos):
|
||||
self._on_context_menu_requested(pos, True)
|
||||
|
||||
def dragEnterEvent(self, event):
|
||||
if self._multivalue:
|
||||
|
|
@ -1011,5 +1032,5 @@ class FilesWidget(QtWidgets.QFrame):
|
|||
current_widget = self._files_view
|
||||
else:
|
||||
current_widget = self._empty_widget
|
||||
self._layout.setCurrentWidget(current_widget)
|
||||
self._wrapper_layout.setCurrentWidget(current_widget)
|
||||
self._files_view.update_remove_btn_visibility()
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import copy
|
||||
import typing
|
||||
from typing import Optional
|
||||
|
||||
from qtpy import QtWidgets, QtCore
|
||||
from qtpy import QtWidgets, QtCore, QtGui
|
||||
|
||||
from ayon_core.lib.attribute_definitions import (
|
||||
AbstractAttrDef,
|
||||
|
|
@ -20,14 +22,27 @@ from ayon_core.tools.utils import (
|
|||
FocusSpinBox,
|
||||
FocusDoubleSpinBox,
|
||||
MultiSelectionComboBox,
|
||||
PlaceholderLineEdit,
|
||||
PlaceholderPlainTextEdit,
|
||||
set_style_property,
|
||||
)
|
||||
from ayon_core.tools.utils import NiceCheckbox
|
||||
|
||||
from ._constants import REVERT_TO_DEFAULT_LABEL
|
||||
from .files_widget import FilesWidget
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Union
|
||||
|
||||
def create_widget_for_attr_def(attr_def, parent=None):
|
||||
widget = _create_widget_for_attr_def(attr_def, parent)
|
||||
|
||||
def create_widget_for_attr_def(
|
||||
attr_def: AbstractAttrDef,
|
||||
parent: Optional[QtWidgets.QWidget] = None,
|
||||
handle_revert_to_default: Optional[bool] = True,
|
||||
):
|
||||
widget = _create_widget_for_attr_def(
|
||||
attr_def, parent, handle_revert_to_default
|
||||
)
|
||||
if not attr_def.visible:
|
||||
widget.setVisible(False)
|
||||
|
||||
|
|
@ -36,42 +51,96 @@ def create_widget_for_attr_def(attr_def, parent=None):
|
|||
return widget
|
||||
|
||||
|
||||
def _create_widget_for_attr_def(attr_def, parent=None):
|
||||
def _create_widget_for_attr_def(
|
||||
attr_def: AbstractAttrDef,
|
||||
parent: "Union[QtWidgets.QWidget, None]",
|
||||
handle_revert_to_default: bool,
|
||||
):
|
||||
if not isinstance(attr_def, AbstractAttrDef):
|
||||
raise TypeError("Unexpected type \"{}\" expected \"{}\"".format(
|
||||
str(type(attr_def)), AbstractAttrDef
|
||||
))
|
||||
|
||||
cls = None
|
||||
if isinstance(attr_def, NumberDef):
|
||||
return NumberAttrWidget(attr_def, parent)
|
||||
cls = NumberAttrWidget
|
||||
|
||||
if isinstance(attr_def, TextDef):
|
||||
return TextAttrWidget(attr_def, parent)
|
||||
elif isinstance(attr_def, TextDef):
|
||||
cls = TextAttrWidget
|
||||
|
||||
if isinstance(attr_def, EnumDef):
|
||||
return EnumAttrWidget(attr_def, parent)
|
||||
elif isinstance(attr_def, EnumDef):
|
||||
cls = EnumAttrWidget
|
||||
|
||||
if isinstance(attr_def, BoolDef):
|
||||
return BoolAttrWidget(attr_def, parent)
|
||||
elif isinstance(attr_def, BoolDef):
|
||||
cls = BoolAttrWidget
|
||||
|
||||
if isinstance(attr_def, UnknownDef):
|
||||
return UnknownAttrWidget(attr_def, parent)
|
||||
elif isinstance(attr_def, UnknownDef):
|
||||
cls = UnknownAttrWidget
|
||||
|
||||
if isinstance(attr_def, HiddenDef):
|
||||
return HiddenAttrWidget(attr_def, parent)
|
||||
elif isinstance(attr_def, HiddenDef):
|
||||
cls = HiddenAttrWidget
|
||||
|
||||
if isinstance(attr_def, FileDef):
|
||||
return FileAttrWidget(attr_def, parent)
|
||||
elif isinstance(attr_def, FileDef):
|
||||
cls = FileAttrWidget
|
||||
|
||||
if isinstance(attr_def, UISeparatorDef):
|
||||
return SeparatorAttrWidget(attr_def, parent)
|
||||
elif isinstance(attr_def, UISeparatorDef):
|
||||
cls = SeparatorAttrWidget
|
||||
|
||||
if isinstance(attr_def, UILabelDef):
|
||||
return LabelAttrWidget(attr_def, parent)
|
||||
elif isinstance(attr_def, UILabelDef):
|
||||
cls = LabelAttrWidget
|
||||
|
||||
raise ValueError("Unknown attribute definition \"{}\"".format(
|
||||
str(type(attr_def))
|
||||
))
|
||||
if cls is None:
|
||||
raise ValueError("Unknown attribute definition \"{}\"".format(
|
||||
str(type(attr_def))
|
||||
))
|
||||
|
||||
return cls(attr_def, parent, handle_revert_to_default)
|
||||
|
||||
|
||||
class AttributeDefinitionsLabel(QtWidgets.QLabel):
|
||||
"""Label related to value attribute definition.
|
||||
|
||||
Label is used to show attribute definition label and to show if value
|
||||
is overridden.
|
||||
|
||||
Label can be right-clicked to revert value to default.
|
||||
"""
|
||||
revert_to_default_requested = QtCore.Signal(str)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
attr_id: str,
|
||||
label: str,
|
||||
parent: QtWidgets.QWidget,
|
||||
):
|
||||
super().__init__(label, parent)
|
||||
|
||||
self._attr_id = attr_id
|
||||
self._overridden = False
|
||||
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
|
||||
|
||||
self.customContextMenuRequested.connect(self._on_context_menu)
|
||||
|
||||
def set_overridden(self, overridden: bool):
|
||||
if self._overridden == overridden:
|
||||
return
|
||||
self._overridden = overridden
|
||||
set_style_property(
|
||||
self,
|
||||
"overridden",
|
||||
"1" if overridden else ""
|
||||
)
|
||||
|
||||
def _on_context_menu(self, point: QtCore.QPoint):
|
||||
menu = QtWidgets.QMenu(self)
|
||||
action = QtWidgets.QAction(menu)
|
||||
action.setText(REVERT_TO_DEFAULT_LABEL)
|
||||
action.triggered.connect(self._request_revert_to_default)
|
||||
menu.addAction(action)
|
||||
menu.exec_(self.mapToGlobal(point))
|
||||
|
||||
def _request_revert_to_default(self):
|
||||
self.revert_to_default_requested.emit(self._attr_id)
|
||||
|
||||
|
||||
class AttributeDefinitionsWidget(QtWidgets.QWidget):
|
||||
|
|
@ -83,16 +152,18 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
|
|||
"""
|
||||
|
||||
def __init__(self, attr_defs=None, parent=None):
|
||||
super(AttributeDefinitionsWidget, self).__init__(parent)
|
||||
super().__init__(parent)
|
||||
|
||||
self._widgets = []
|
||||
self._widgets_by_id = {}
|
||||
self._labels_by_id = {}
|
||||
self._current_keys = set()
|
||||
|
||||
self.set_attr_defs(attr_defs)
|
||||
|
||||
def clear_attr_defs(self):
|
||||
"""Remove all existing widgets and reset layout if needed."""
|
||||
self._widgets = []
|
||||
self._widgets_by_id = {}
|
||||
self._labels_by_id = {}
|
||||
self._current_keys = set()
|
||||
|
||||
layout = self.layout()
|
||||
|
|
@ -133,7 +204,7 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
|
|||
|
||||
self._current_keys.add(attr_def.key)
|
||||
widget = create_widget_for_attr_def(attr_def, self)
|
||||
self._widgets.append(widget)
|
||||
self._widgets_by_id[attr_def.id] = widget
|
||||
|
||||
if not attr_def.visible:
|
||||
continue
|
||||
|
|
@ -145,7 +216,13 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
|
|||
col_num = 2 - expand_cols
|
||||
|
||||
if attr_def.is_value_def and attr_def.label:
|
||||
label_widget = QtWidgets.QLabel(attr_def.label, self)
|
||||
label_widget = AttributeDefinitionsLabel(
|
||||
attr_def.id, attr_def.label, self
|
||||
)
|
||||
label_widget.revert_to_default_requested.connect(
|
||||
self._on_revert_request
|
||||
)
|
||||
self._labels_by_id[attr_def.id] = label_widget
|
||||
tooltip = attr_def.tooltip
|
||||
if tooltip:
|
||||
label_widget.setToolTip(tooltip)
|
||||
|
|
@ -160,6 +237,9 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
|
|||
if not attr_def.is_label_horizontal:
|
||||
row += 1
|
||||
|
||||
if attr_def.is_value_def:
|
||||
widget.value_changed.connect(self._on_value_change)
|
||||
|
||||
layout.addWidget(
|
||||
widget, row, col_num, 1, expand_cols
|
||||
)
|
||||
|
|
@ -168,7 +248,7 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
|
|||
def set_value(self, value):
|
||||
new_value = copy.deepcopy(value)
|
||||
unused_keys = set(new_value.keys())
|
||||
for widget in self._widgets:
|
||||
for widget in self._widgets_by_id.values():
|
||||
attr_def = widget.attr_def
|
||||
if attr_def.key not in new_value:
|
||||
continue
|
||||
|
|
@ -181,22 +261,42 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
|
|||
|
||||
def current_value(self):
|
||||
output = {}
|
||||
for widget in self._widgets:
|
||||
for widget in self._widgets_by_id.values():
|
||||
attr_def = widget.attr_def
|
||||
if not isinstance(attr_def, UIDef):
|
||||
output[attr_def.key] = widget.current_value()
|
||||
|
||||
return output
|
||||
|
||||
def _on_revert_request(self, attr_id):
|
||||
widget = self._widgets_by_id.get(attr_id)
|
||||
if widget is not None:
|
||||
widget.set_value(widget.attr_def.default)
|
||||
|
||||
def _on_value_change(self, value, attr_id):
|
||||
widget = self._widgets_by_id.get(attr_id)
|
||||
if widget is None:
|
||||
return
|
||||
label = self._labels_by_id.get(attr_id)
|
||||
if label is not None:
|
||||
label.set_overridden(value != widget.attr_def.default)
|
||||
|
||||
|
||||
class _BaseAttrDefWidget(QtWidgets.QWidget):
|
||||
# Type 'object' may not work with older PySide versions
|
||||
value_changed = QtCore.Signal(object, str)
|
||||
revert_to_default_requested = QtCore.Signal(str)
|
||||
|
||||
def __init__(self, attr_def, parent):
|
||||
super(_BaseAttrDefWidget, self).__init__(parent)
|
||||
def __init__(
|
||||
self,
|
||||
attr_def: AbstractAttrDef,
|
||||
parent: "Union[QtWidgets.QWidget, None]",
|
||||
handle_revert_to_default: Optional[bool] = True,
|
||||
):
|
||||
super().__init__(parent)
|
||||
|
||||
self.attr_def = attr_def
|
||||
self.attr_def: AbstractAttrDef = attr_def
|
||||
self._handle_revert_to_default: bool = handle_revert_to_default
|
||||
|
||||
main_layout = QtWidgets.QHBoxLayout(self)
|
||||
main_layout.setContentsMargins(0, 0, 0, 0)
|
||||
|
|
@ -205,6 +305,15 @@ class _BaseAttrDefWidget(QtWidgets.QWidget):
|
|||
|
||||
self._ui_init()
|
||||
|
||||
def revert_to_default_value(self):
|
||||
if not self.attr_def.is_value_def:
|
||||
return
|
||||
|
||||
if self._handle_revert_to_default:
|
||||
self.set_value(self.attr_def.default)
|
||||
else:
|
||||
self.revert_to_default_requested.emit(self.attr_def.id)
|
||||
|
||||
def _ui_init(self):
|
||||
raise NotImplementedError(
|
||||
"Method '_ui_init' is not implemented. {}".format(
|
||||
|
|
@ -255,7 +364,7 @@ class ClickableLineEdit(QtWidgets.QLineEdit):
|
|||
clicked = QtCore.Signal()
|
||||
|
||||
def __init__(self, text, parent):
|
||||
super(ClickableLineEdit, self).__init__(parent)
|
||||
super().__init__(parent)
|
||||
self.setText(text)
|
||||
self.setReadOnly(True)
|
||||
|
||||
|
|
@ -264,7 +373,7 @@ class ClickableLineEdit(QtWidgets.QLineEdit):
|
|||
def mousePressEvent(self, event):
|
||||
if event.button() == QtCore.Qt.LeftButton:
|
||||
self._mouse_pressed = True
|
||||
super(ClickableLineEdit, self).mousePressEvent(event)
|
||||
super().mousePressEvent(event)
|
||||
|
||||
def mouseReleaseEvent(self, event):
|
||||
if self._mouse_pressed:
|
||||
|
|
@ -272,7 +381,7 @@ class ClickableLineEdit(QtWidgets.QLineEdit):
|
|||
if self.rect().contains(event.pos()):
|
||||
self.clicked.emit()
|
||||
|
||||
super(ClickableLineEdit, self).mouseReleaseEvent(event)
|
||||
super().mouseReleaseEvent(event)
|
||||
|
||||
|
||||
class NumberAttrWidget(_BaseAttrDefWidget):
|
||||
|
|
@ -284,6 +393,9 @@ class NumberAttrWidget(_BaseAttrDefWidget):
|
|||
else:
|
||||
input_widget = FocusSpinBox(self)
|
||||
|
||||
# Override context menu event to add revert to default action
|
||||
input_widget.contextMenuEvent = self._input_widget_context_event
|
||||
|
||||
if self.attr_def.tooltip:
|
||||
input_widget.setToolTip(self.attr_def.tooltip)
|
||||
|
||||
|
|
@ -321,6 +433,16 @@ class NumberAttrWidget(_BaseAttrDefWidget):
|
|||
self._set_multiselection_visible(True)
|
||||
return False
|
||||
|
||||
def _input_widget_context_event(self, event):
|
||||
line_edit = self._input_widget.lineEdit()
|
||||
menu = line_edit.createStandardContextMenu()
|
||||
menu.setAttribute(QtCore.Qt.WA_DeleteOnClose)
|
||||
action = QtWidgets.QAction(menu)
|
||||
action.setText(REVERT_TO_DEFAULT_LABEL)
|
||||
action.triggered.connect(self.revert_to_default_value)
|
||||
menu.addAction(action)
|
||||
menu.popup(event.globalPos())
|
||||
|
||||
def current_value(self):
|
||||
return self._input_widget.value()
|
||||
|
||||
|
|
@ -382,9 +504,12 @@ class TextAttrWidget(_BaseAttrDefWidget):
|
|||
|
||||
self.multiline = self.attr_def.multiline
|
||||
if self.multiline:
|
||||
input_widget = QtWidgets.QPlainTextEdit(self)
|
||||
input_widget = PlaceholderPlainTextEdit(self)
|
||||
else:
|
||||
input_widget = QtWidgets.QLineEdit(self)
|
||||
input_widget = PlaceholderLineEdit(self)
|
||||
|
||||
# Override context menu event to add revert to default action
|
||||
input_widget.contextMenuEvent = self._input_widget_context_event
|
||||
|
||||
if (
|
||||
self.attr_def.placeholder
|
||||
|
|
@ -407,6 +532,15 @@ class TextAttrWidget(_BaseAttrDefWidget):
|
|||
|
||||
self.main_layout.addWidget(input_widget, 0)
|
||||
|
||||
def _input_widget_context_event(self, event):
|
||||
menu = self._input_widget.createStandardContextMenu()
|
||||
menu.setAttribute(QtCore.Qt.WA_DeleteOnClose)
|
||||
action = QtWidgets.QAction(menu)
|
||||
action.setText(REVERT_TO_DEFAULT_LABEL)
|
||||
action.triggered.connect(self.revert_to_default_value)
|
||||
menu.addAction(action)
|
||||
menu.popup(event.globalPos())
|
||||
|
||||
def _on_value_change(self):
|
||||
if self.multiline:
|
||||
new_value = self._input_widget.toPlainText()
|
||||
|
|
@ -459,6 +593,20 @@ class BoolAttrWidget(_BaseAttrDefWidget):
|
|||
self.main_layout.addWidget(input_widget, 0)
|
||||
self.main_layout.addStretch(1)
|
||||
|
||||
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
|
||||
self.customContextMenuRequested.connect(self._on_context_menu)
|
||||
|
||||
def _on_context_menu(self, pos):
|
||||
self._menu = QtWidgets.QMenu(self)
|
||||
|
||||
action = QtWidgets.QAction(self._menu)
|
||||
action.setText(REVERT_TO_DEFAULT_LABEL)
|
||||
action.triggered.connect(self.revert_to_default_value)
|
||||
self._menu.addAction(action)
|
||||
|
||||
global_pos = self.mapToGlobal(pos)
|
||||
self._menu.exec_(global_pos)
|
||||
|
||||
def _on_value_change(self):
|
||||
new_value = self._input_widget.isChecked()
|
||||
self.value_changed.emit(new_value, self.attr_def.id)
|
||||
|
|
@ -487,7 +635,7 @@ class BoolAttrWidget(_BaseAttrDefWidget):
|
|||
class EnumAttrWidget(_BaseAttrDefWidget):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._multivalue = False
|
||||
super(EnumAttrWidget, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
@property
|
||||
def multiselection(self):
|
||||
|
|
@ -495,7 +643,9 @@ class EnumAttrWidget(_BaseAttrDefWidget):
|
|||
|
||||
def _ui_init(self):
|
||||
if self.multiselection:
|
||||
input_widget = MultiSelectionComboBox(self)
|
||||
input_widget = MultiSelectionComboBox(
|
||||
self, placeholder=self.attr_def.placeholder
|
||||
)
|
||||
|
||||
else:
|
||||
input_widget = CustomTextComboBox(self)
|
||||
|
|
@ -509,6 +659,9 @@ class EnumAttrWidget(_BaseAttrDefWidget):
|
|||
for item in self.attr_def.items:
|
||||
input_widget.addItem(item["label"], item["value"])
|
||||
|
||||
if not self.attr_def.items:
|
||||
self._add_empty_item(input_widget)
|
||||
|
||||
idx = input_widget.findData(self.attr_def.default)
|
||||
if idx >= 0:
|
||||
input_widget.setCurrentIndex(idx)
|
||||
|
|
@ -522,6 +675,34 @@ class EnumAttrWidget(_BaseAttrDefWidget):
|
|||
|
||||
self.main_layout.addWidget(input_widget, 0)
|
||||
|
||||
input_widget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
|
||||
input_widget.customContextMenuRequested.connect(self._on_context_menu)
|
||||
|
||||
def _add_empty_item(self, input_widget):
|
||||
model = input_widget.model()
|
||||
if not isinstance(model, QtGui.QStandardItemModel):
|
||||
return
|
||||
|
||||
root_item = model.invisibleRootItem()
|
||||
|
||||
empty_item = QtGui.QStandardItem()
|
||||
empty_item.setData("< No items to select >", QtCore.Qt.DisplayRole)
|
||||
empty_item.setData("", QtCore.Qt.UserRole)
|
||||
empty_item.setFlags(QtCore.Qt.NoItemFlags)
|
||||
|
||||
root_item.appendRow(empty_item)
|
||||
|
||||
def _on_context_menu(self, pos):
|
||||
menu = QtWidgets.QMenu(self)
|
||||
|
||||
action = QtWidgets.QAction(menu)
|
||||
action.setText(REVERT_TO_DEFAULT_LABEL)
|
||||
action.triggered.connect(self.revert_to_default_value)
|
||||
menu.addAction(action)
|
||||
|
||||
global_pos = self.mapToGlobal(pos)
|
||||
menu.exec_(global_pos)
|
||||
|
||||
def _on_value_change(self):
|
||||
new_value = self.current_value()
|
||||
if self._multivalue:
|
||||
|
|
@ -614,7 +795,7 @@ class HiddenAttrWidget(_BaseAttrDefWidget):
|
|||
def setVisible(self, visible):
|
||||
if visible:
|
||||
visible = False
|
||||
super(HiddenAttrWidget, self).setVisible(visible)
|
||||
super().setVisible(visible)
|
||||
|
||||
def current_value(self):
|
||||
if self._multivalue:
|
||||
|
|
@ -650,10 +831,25 @@ class FileAttrWidget(_BaseAttrDefWidget):
|
|||
|
||||
self.main_layout.addWidget(input_widget, 0)
|
||||
|
||||
input_widget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
|
||||
input_widget.customContextMenuRequested.connect(self._on_context_menu)
|
||||
input_widget.revert_requested.connect(self.revert_to_default_value)
|
||||
|
||||
def _on_value_change(self):
|
||||
new_value = self.current_value()
|
||||
self.value_changed.emit(new_value, self.attr_def.id)
|
||||
|
||||
def _on_context_menu(self, pos):
|
||||
menu = QtWidgets.QMenu(self)
|
||||
|
||||
action = QtWidgets.QAction(menu)
|
||||
action.setText(REVERT_TO_DEFAULT_LABEL)
|
||||
action.triggered.connect(self.revert_to_default_value)
|
||||
menu.addAction(action)
|
||||
|
||||
global_pos = self.mapToGlobal(pos)
|
||||
menu.exec_(global_pos)
|
||||
|
||||
def current_value(self):
|
||||
return self._input_widget.current_value()
|
||||
|
||||
|
|
|
|||
|
|
@ -1,12 +1,18 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
import collections
|
||||
import contextlib
|
||||
import typing
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
import ayon_api
|
||||
|
||||
from ayon_core.lib import NestedCacheItem
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Union
|
||||
|
||||
HIERARCHY_MODEL_SENDER = "hierarchy.model"
|
||||
|
||||
|
||||
|
|
@ -82,19 +88,26 @@ class TaskItem:
|
|||
Args:
|
||||
task_id (str): Task id.
|
||||
name (str): Name of task.
|
||||
name (Union[str, None]): Task label.
|
||||
task_type (str): Type of task.
|
||||
parent_id (str): Parent folder id.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, task_id, name, task_type, parent_id
|
||||
self,
|
||||
task_id: str,
|
||||
name: str,
|
||||
label: Union[str, None],
|
||||
task_type: str,
|
||||
parent_id: str,
|
||||
):
|
||||
self.task_id = task_id
|
||||
self.name = name
|
||||
self.label = label
|
||||
self.task_type = task_type
|
||||
self.parent_id = parent_id
|
||||
|
||||
self._label = None
|
||||
self._full_label = None
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
|
|
@ -107,16 +120,17 @@ class TaskItem:
|
|||
return self.task_id
|
||||
|
||||
@property
|
||||
def label(self):
|
||||
def full_label(self):
|
||||
"""Label of task item for UI.
|
||||
|
||||
Returns:
|
||||
str: Label of task item.
|
||||
"""
|
||||
|
||||
if self._label is None:
|
||||
self._label = "{} ({})".format(self.name, self.task_type)
|
||||
return self._label
|
||||
if self._full_label is None:
|
||||
label = self.label or self.name
|
||||
self._full_label = f"{label} ({self.task_type})"
|
||||
return self._full_label
|
||||
|
||||
def to_data(self):
|
||||
"""Converts task item to data.
|
||||
|
|
@ -128,6 +142,7 @@ class TaskItem:
|
|||
return {
|
||||
"task_id": self.task_id,
|
||||
"name": self.name,
|
||||
"label": self.label,
|
||||
"parent_id": self.parent_id,
|
||||
"task_type": self.task_type,
|
||||
}
|
||||
|
|
@ -159,6 +174,7 @@ def _get_task_items_from_tasks(tasks):
|
|||
output.append(TaskItem(
|
||||
task["id"],
|
||||
task["name"],
|
||||
task["label"],
|
||||
task["type"],
|
||||
folder_id
|
||||
))
|
||||
|
|
@ -368,7 +384,7 @@ class HierarchyModel(object):
|
|||
sender (Union[str, None]): Who requested the task item.
|
||||
|
||||
Returns:
|
||||
Union[TaskItem, None]: Task item found by name and folder id.
|
||||
Optional[TaskItem]: Task item found by name and folder id.
|
||||
|
||||
"""
|
||||
for task_item in self.get_task_items(project_name, folder_id, sender):
|
||||
|
|
|
|||
8
client/ayon_core/tools/console_interpreter/__init__.py
Normal file
8
client/ayon_core/tools/console_interpreter/__init__.py
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
from .abstract import AbstractInterpreterController
|
||||
from .control import InterpreterController
|
||||
|
||||
|
||||
__all__ = (
|
||||
"AbstractInterpreterController",
|
||||
"InterpreterController",
|
||||
)
|
||||
33
client/ayon_core/tools/console_interpreter/abstract.py
Normal file
33
client/ayon_core/tools/console_interpreter/abstract.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Dict, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class TabItem:
|
||||
name: str
|
||||
code: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class InterpreterConfig:
|
||||
width: Optional[int]
|
||||
height: Optional[int]
|
||||
splitter_sizes: List[int] = field(default_factory=list)
|
||||
tabs: List[TabItem] = field(default_factory=list)
|
||||
|
||||
|
||||
class AbstractInterpreterController(ABC):
|
||||
@abstractmethod
|
||||
def get_config(self) -> InterpreterConfig:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def save_config(
|
||||
self,
|
||||
width: int,
|
||||
height: int,
|
||||
splitter_sizes: List[int],
|
||||
tabs: List[Dict[str, str]],
|
||||
):
|
||||
pass
|
||||
63
client/ayon_core/tools/console_interpreter/control.py
Normal file
63
client/ayon_core/tools/console_interpreter/control.py
Normal file
|
|
@ -0,0 +1,63 @@
|
|||
from typing import List, Dict
|
||||
|
||||
from ayon_core.lib import JSONSettingRegistry
|
||||
from ayon_core.lib.local_settings import get_launcher_local_dir
|
||||
|
||||
from .abstract import (
|
||||
AbstractInterpreterController,
|
||||
TabItem,
|
||||
InterpreterConfig,
|
||||
)
|
||||
|
||||
|
||||
class InterpreterController(AbstractInterpreterController):
|
||||
def __init__(self):
|
||||
self._registry = JSONSettingRegistry(
|
||||
"python_interpreter_tool",
|
||||
get_launcher_local_dir(),
|
||||
)
|
||||
|
||||
def get_config(self):
|
||||
width = None
|
||||
height = None
|
||||
splitter_sizes = []
|
||||
tabs = []
|
||||
try:
|
||||
width = self._registry.get_item("width")
|
||||
height = self._registry.get_item("height")
|
||||
|
||||
except (ValueError, KeyError):
|
||||
pass
|
||||
|
||||
try:
|
||||
splitter_sizes = self._registry.get_item("splitter_sizes")
|
||||
except (ValueError, KeyError):
|
||||
pass
|
||||
|
||||
try:
|
||||
tab_defs = self._registry.get_item("tabs") or []
|
||||
for tab_def in tab_defs:
|
||||
tab_name = tab_def.get("name")
|
||||
if not tab_name:
|
||||
continue
|
||||
code = tab_def.get("code") or ""
|
||||
tabs.append(TabItem(tab_name, code))
|
||||
|
||||
except (ValueError, KeyError):
|
||||
pass
|
||||
|
||||
return InterpreterConfig(
|
||||
width, height, splitter_sizes, tabs
|
||||
)
|
||||
|
||||
def save_config(
|
||||
self,
|
||||
width: int,
|
||||
height: int,
|
||||
splitter_sizes: List[int],
|
||||
tabs: List[Dict[str, str]],
|
||||
):
|
||||
self._registry.set_item("width", width)
|
||||
self._registry.set_item("height", height)
|
||||
self._registry.set_item("splitter_sizes", splitter_sizes)
|
||||
self._registry.set_item("tabs", tabs)
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
from .window import (
|
||||
ConsoleInterpreterWindow
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"ConsoleInterpreterWindow",
|
||||
)
|
||||
42
client/ayon_core/tools/console_interpreter/ui/utils.py
Normal file
42
client/ayon_core/tools/console_interpreter/ui/utils.py
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
import os
|
||||
import sys
|
||||
import collections
|
||||
|
||||
|
||||
class StdOEWrap:
|
||||
def __init__(self):
|
||||
self._origin_stdout_write = None
|
||||
self._origin_stderr_write = None
|
||||
self._listening = False
|
||||
self.lines = collections.deque()
|
||||
|
||||
if not sys.stdout:
|
||||
sys.stdout = open(os.devnull, "w")
|
||||
|
||||
if not sys.stderr:
|
||||
sys.stderr = open(os.devnull, "w")
|
||||
|
||||
if self._origin_stdout_write is None:
|
||||
self._origin_stdout_write = sys.stdout.write
|
||||
|
||||
if self._origin_stderr_write is None:
|
||||
self._origin_stderr_write = sys.stderr.write
|
||||
|
||||
self._listening = True
|
||||
sys.stdout.write = self._stdout_listener
|
||||
sys.stderr.write = self._stderr_listener
|
||||
|
||||
def stop_listen(self):
|
||||
self._listening = False
|
||||
|
||||
def _stdout_listener(self, text):
|
||||
if self._listening:
|
||||
self.lines.append(text)
|
||||
if self._origin_stdout_write is not None:
|
||||
self._origin_stdout_write(text)
|
||||
|
||||
def _stderr_listener(self, text):
|
||||
if self._listening:
|
||||
self.lines.append(text)
|
||||
if self._origin_stderr_write is not None:
|
||||
self._origin_stderr_write(text)
|
||||
251
client/ayon_core/tools/console_interpreter/ui/widgets.py
Normal file
251
client/ayon_core/tools/console_interpreter/ui/widgets.py
Normal file
|
|
@ -0,0 +1,251 @@
|
|||
from code import InteractiveInterpreter
|
||||
|
||||
from qtpy import QtCore, QtWidgets, QtGui
|
||||
|
||||
|
||||
class PythonCodeEditor(QtWidgets.QPlainTextEdit):
|
||||
execute_requested = QtCore.Signal()
|
||||
|
||||
def __init__(self, parent):
|
||||
super().__init__(parent)
|
||||
|
||||
self.setObjectName("PythonCodeEditor")
|
||||
|
||||
self._indent = 4
|
||||
|
||||
def _tab_shift_right(self):
|
||||
cursor = self.textCursor()
|
||||
selected_text = cursor.selectedText()
|
||||
if not selected_text:
|
||||
cursor.insertText(" " * self._indent)
|
||||
return
|
||||
|
||||
sel_start = cursor.selectionStart()
|
||||
sel_end = cursor.selectionEnd()
|
||||
cursor.setPosition(sel_end)
|
||||
end_line = cursor.blockNumber()
|
||||
cursor.setPosition(sel_start)
|
||||
while True:
|
||||
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
|
||||
text = cursor.block().text()
|
||||
spaces = len(text) - len(text.lstrip(" "))
|
||||
new_spaces = spaces % self._indent
|
||||
if not new_spaces:
|
||||
new_spaces = self._indent
|
||||
|
||||
cursor.insertText(" " * new_spaces)
|
||||
if cursor.blockNumber() == end_line:
|
||||
break
|
||||
|
||||
cursor.movePosition(QtGui.QTextCursor.NextBlock)
|
||||
|
||||
def _tab_shift_left(self):
|
||||
tmp_cursor = self.textCursor()
|
||||
sel_start = tmp_cursor.selectionStart()
|
||||
sel_end = tmp_cursor.selectionEnd()
|
||||
|
||||
cursor = QtGui.QTextCursor(self.document())
|
||||
cursor.setPosition(sel_end)
|
||||
end_line = cursor.blockNumber()
|
||||
cursor.setPosition(sel_start)
|
||||
while True:
|
||||
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
|
||||
text = cursor.block().text()
|
||||
spaces = len(text) - len(text.lstrip(" "))
|
||||
if spaces:
|
||||
spaces_to_remove = (spaces % self._indent) or self._indent
|
||||
if spaces_to_remove > spaces:
|
||||
spaces_to_remove = spaces
|
||||
|
||||
cursor.setPosition(
|
||||
cursor.position() + spaces_to_remove,
|
||||
QtGui.QTextCursor.KeepAnchor
|
||||
)
|
||||
cursor.removeSelectedText()
|
||||
|
||||
if cursor.blockNumber() == end_line:
|
||||
break
|
||||
|
||||
cursor.movePosition(QtGui.QTextCursor.NextBlock)
|
||||
|
||||
def keyPressEvent(self, event):
|
||||
if event.key() == QtCore.Qt.Key_Backtab:
|
||||
self._tab_shift_left()
|
||||
event.accept()
|
||||
return
|
||||
|
||||
if event.key() == QtCore.Qt.Key_Tab:
|
||||
if event.modifiers() == QtCore.Qt.NoModifier:
|
||||
self._tab_shift_right()
|
||||
event.accept()
|
||||
return
|
||||
|
||||
if (
|
||||
event.key() == QtCore.Qt.Key_Return
|
||||
and event.modifiers() == QtCore.Qt.ControlModifier
|
||||
):
|
||||
self.execute_requested.emit()
|
||||
event.accept()
|
||||
return
|
||||
|
||||
super().keyPressEvent(event)
|
||||
|
||||
|
||||
class PythonTabWidget(QtWidgets.QWidget):
|
||||
add_tab_requested = QtCore.Signal()
|
||||
before_execute = QtCore.Signal(str)
|
||||
|
||||
def __init__(self, parent):
|
||||
super().__init__(parent)
|
||||
|
||||
code_input = PythonCodeEditor(self)
|
||||
|
||||
self.setFocusProxy(code_input)
|
||||
|
||||
add_tab_btn = QtWidgets.QPushButton("Add tab...", self)
|
||||
add_tab_btn.setDefault(False)
|
||||
add_tab_btn.setToolTip("Add new tab")
|
||||
|
||||
execute_btn = QtWidgets.QPushButton("Execute", self)
|
||||
execute_btn.setDefault(False)
|
||||
execute_btn.setToolTip("Execute command (Ctrl + Enter)")
|
||||
|
||||
btns_layout = QtWidgets.QHBoxLayout()
|
||||
btns_layout.setContentsMargins(0, 0, 0, 0)
|
||||
btns_layout.addWidget(add_tab_btn)
|
||||
btns_layout.addStretch(1)
|
||||
btns_layout.addWidget(execute_btn)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
layout.addWidget(code_input, 1)
|
||||
layout.addLayout(btns_layout, 0)
|
||||
|
||||
add_tab_btn.clicked.connect(self._on_add_tab_clicked)
|
||||
execute_btn.clicked.connect(self._on_execute_clicked)
|
||||
code_input.execute_requested.connect(self.execute)
|
||||
|
||||
self._code_input = code_input
|
||||
self._interpreter = InteractiveInterpreter()
|
||||
|
||||
def _on_add_tab_clicked(self):
|
||||
self.add_tab_requested.emit()
|
||||
|
||||
def _on_execute_clicked(self):
|
||||
self.execute()
|
||||
|
||||
def get_code(self):
|
||||
return self._code_input.toPlainText()
|
||||
|
||||
def set_code(self, code_text):
|
||||
self._code_input.setPlainText(code_text)
|
||||
|
||||
def execute(self):
|
||||
code_text = self._code_input.toPlainText()
|
||||
self.before_execute.emit(code_text)
|
||||
self._interpreter.runcode(code_text)
|
||||
|
||||
|
||||
class TabNameDialog(QtWidgets.QDialog):
|
||||
default_width = 330
|
||||
default_height = 85
|
||||
|
||||
def __init__(self, parent):
|
||||
super().__init__(parent)
|
||||
|
||||
self.setWindowTitle("Enter tab name")
|
||||
|
||||
name_label = QtWidgets.QLabel("Tab name:", self)
|
||||
name_input = QtWidgets.QLineEdit(self)
|
||||
|
||||
inputs_layout = QtWidgets.QHBoxLayout()
|
||||
inputs_layout.addWidget(name_label)
|
||||
inputs_layout.addWidget(name_input)
|
||||
|
||||
ok_btn = QtWidgets.QPushButton("Ok", self)
|
||||
cancel_btn = QtWidgets.QPushButton("Cancel", self)
|
||||
btns_layout = QtWidgets.QHBoxLayout()
|
||||
btns_layout.addStretch(1)
|
||||
btns_layout.addWidget(ok_btn)
|
||||
btns_layout.addWidget(cancel_btn)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.addLayout(inputs_layout)
|
||||
layout.addStretch(1)
|
||||
layout.addLayout(btns_layout)
|
||||
|
||||
ok_btn.clicked.connect(self._on_ok_clicked)
|
||||
cancel_btn.clicked.connect(self._on_cancel_clicked)
|
||||
|
||||
self._name_input = name_input
|
||||
self._ok_btn = ok_btn
|
||||
self._cancel_btn = cancel_btn
|
||||
|
||||
self._result = None
|
||||
|
||||
self.resize(self.default_width, self.default_height)
|
||||
|
||||
def set_tab_name(self, name):
|
||||
self._name_input.setText(name)
|
||||
|
||||
def result(self):
|
||||
return self._result
|
||||
|
||||
def showEvent(self, event):
|
||||
super().showEvent(event)
|
||||
btns_width = max(
|
||||
self._ok_btn.width(),
|
||||
self._cancel_btn.width()
|
||||
)
|
||||
|
||||
self._ok_btn.setMinimumWidth(btns_width)
|
||||
self._cancel_btn.setMinimumWidth(btns_width)
|
||||
|
||||
def _on_ok_clicked(self):
|
||||
self._result = self._name_input.text()
|
||||
self.accept()
|
||||
|
||||
def _on_cancel_clicked(self):
|
||||
self._result = None
|
||||
self.reject()
|
||||
|
||||
|
||||
class OutputTextWidget(QtWidgets.QTextEdit):
|
||||
v_max_offset = 4
|
||||
|
||||
def vertical_scroll_at_max(self):
|
||||
v_scroll = self.verticalScrollBar()
|
||||
return v_scroll.value() > v_scroll.maximum() - self.v_max_offset
|
||||
|
||||
def scroll_to_bottom(self):
|
||||
v_scroll = self.verticalScrollBar()
|
||||
return v_scroll.setValue(v_scroll.maximum())
|
||||
|
||||
|
||||
class EnhancedTabBar(QtWidgets.QTabBar):
|
||||
double_clicked = QtCore.Signal(QtCore.QPoint)
|
||||
right_clicked = QtCore.Signal(QtCore.QPoint)
|
||||
mid_clicked = QtCore.Signal(QtCore.QPoint)
|
||||
|
||||
def __init__(self, parent):
|
||||
super().__init__(parent)
|
||||
|
||||
self.setDrawBase(False)
|
||||
|
||||
def mouseDoubleClickEvent(self, event):
|
||||
self.double_clicked.emit(event.globalPos())
|
||||
event.accept()
|
||||
|
||||
def mouseReleaseEvent(self, event):
|
||||
if event.button() == QtCore.Qt.RightButton:
|
||||
self.right_clicked.emit(event.globalPos())
|
||||
event.accept()
|
||||
return
|
||||
|
||||
elif event.button() == QtCore.Qt.MidButton:
|
||||
self.mid_clicked.emit(event.globalPos())
|
||||
event.accept()
|
||||
|
||||
else:
|
||||
super().mouseReleaseEvent(event)
|
||||
|
||||
324
client/ayon_core/tools/console_interpreter/ui/window.py
Normal file
324
client/ayon_core/tools/console_interpreter/ui/window.py
Normal file
|
|
@ -0,0 +1,324 @@
|
|||
import re
|
||||
from typing import Optional
|
||||
|
||||
from qtpy import QtWidgets, QtGui, QtCore
|
||||
|
||||
from ayon_core import resources
|
||||
from ayon_core.style import load_stylesheet
|
||||
from ayon_core.tools.console_interpreter import (
|
||||
AbstractInterpreterController,
|
||||
InterpreterController,
|
||||
)
|
||||
|
||||
from .utils import StdOEWrap
|
||||
from .widgets import (
|
||||
PythonTabWidget,
|
||||
OutputTextWidget,
|
||||
EnhancedTabBar,
|
||||
TabNameDialog,
|
||||
)
|
||||
|
||||
ANSI_ESCAPE = re.compile(
|
||||
r"(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]"
|
||||
)
|
||||
AYON_ART = r"""
|
||||
|
||||
▄██▄
|
||||
▄███▄ ▀██▄ ▀██▀ ▄██▀ ▄██▀▀▀██▄ ▀███▄ █▄
|
||||
▄▄ ▀██▄ ▀██▄ ▄██▀ ██▀ ▀██▄ ▄ ▀██▄ ███
|
||||
▄██▀ ██▄ ▀ ▄▄ ▀ ██ ▄██ ███ ▀██▄ ███
|
||||
▄██▀ ▀██▄ ██ ▀██▄ ▄██▀ ███ ▀██ ▀█▀
|
||||
▄██▀ ▀██▄ ▀█ ▀██▄▄▄▄██▀ █▀ ▀██▄
|
||||
|
||||
· · - =[ by YNPUT ]:[ http://ayon.ynput.io ]= - · ·
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class ConsoleInterpreterWindow(QtWidgets.QWidget):
|
||||
default_width = 1000
|
||||
default_height = 600
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
controller: Optional[AbstractInterpreterController] = None,
|
||||
parent: Optional[QtWidgets.QWidget] = None,
|
||||
):
|
||||
super().__init__(parent)
|
||||
|
||||
self.setWindowTitle("AYON Console")
|
||||
self.setWindowIcon(QtGui.QIcon(resources.get_ayon_icon_filepath()))
|
||||
|
||||
if controller is None:
|
||||
controller = InterpreterController()
|
||||
|
||||
output_widget = OutputTextWidget(self)
|
||||
output_widget.setObjectName("PythonInterpreterOutput")
|
||||
output_widget.setLineWrapMode(QtWidgets.QTextEdit.NoWrap)
|
||||
output_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction)
|
||||
|
||||
tab_widget = QtWidgets.QTabWidget(self)
|
||||
tab_bar = EnhancedTabBar(tab_widget)
|
||||
tab_widget.setTabBar(tab_bar)
|
||||
tab_widget.setTabsClosable(False)
|
||||
tab_widget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
|
||||
|
||||
widgets_splitter = QtWidgets.QSplitter(self)
|
||||
widgets_splitter.setOrientation(QtCore.Qt.Vertical)
|
||||
widgets_splitter.addWidget(output_widget)
|
||||
widgets_splitter.addWidget(tab_widget)
|
||||
widgets_splitter.setStretchFactor(0, 1)
|
||||
widgets_splitter.setStretchFactor(1, 1)
|
||||
height = int(self.default_height / 2)
|
||||
widgets_splitter.setSizes([height, self.default_height - height])
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.addWidget(widgets_splitter)
|
||||
|
||||
line_check_timer = QtCore.QTimer()
|
||||
line_check_timer.setInterval(200)
|
||||
|
||||
line_check_timer.timeout.connect(self._on_timer_timeout)
|
||||
tab_bar.right_clicked.connect(self._on_tab_right_click)
|
||||
tab_bar.double_clicked.connect(self._on_tab_double_click)
|
||||
tab_bar.mid_clicked.connect(self._on_tab_mid_click)
|
||||
tab_widget.tabCloseRequested.connect(self._on_tab_close_req)
|
||||
|
||||
self._tabs = []
|
||||
|
||||
self._stdout_err_wrapper = StdOEWrap()
|
||||
|
||||
self._widgets_splitter = widgets_splitter
|
||||
self._output_widget = output_widget
|
||||
self._tab_widget = tab_widget
|
||||
self._line_check_timer = line_check_timer
|
||||
|
||||
self._append_lines([AYON_ART])
|
||||
|
||||
self._first_show = True
|
||||
self._controller = controller
|
||||
|
||||
def showEvent(self, event):
|
||||
self._line_check_timer.start()
|
||||
super().showEvent(event)
|
||||
# First show setup
|
||||
if self._first_show:
|
||||
self._first_show = False
|
||||
self._on_first_show()
|
||||
|
||||
if self._tab_widget.count() < 1:
|
||||
self.add_tab("Python")
|
||||
|
||||
self._output_widget.scroll_to_bottom()
|
||||
|
||||
def closeEvent(self, event):
|
||||
self._save_registry()
|
||||
super().closeEvent(event)
|
||||
self._line_check_timer.stop()
|
||||
|
||||
def add_tab(self, tab_name, index=None):
|
||||
widget = PythonTabWidget(self)
|
||||
widget.before_execute.connect(self._on_before_execute)
|
||||
widget.add_tab_requested.connect(self._on_add_requested)
|
||||
if index is None:
|
||||
if self._tab_widget.count() > 0:
|
||||
index = self._tab_widget.currentIndex() + 1
|
||||
else:
|
||||
index = 0
|
||||
|
||||
self._tabs.append(widget)
|
||||
self._tab_widget.insertTab(index, widget, tab_name)
|
||||
self._tab_widget.setCurrentIndex(index)
|
||||
|
||||
if self._tab_widget.count() > 1:
|
||||
self._tab_widget.setTabsClosable(True)
|
||||
widget.setFocus()
|
||||
return widget
|
||||
|
||||
def _on_first_show(self):
|
||||
config = self._controller.get_config()
|
||||
width = config.width
|
||||
height = config.height
|
||||
if width is None or width < 200:
|
||||
width = self.default_width
|
||||
if height is None or height < 200:
|
||||
height = self.default_height
|
||||
|
||||
for tab_item in config.tabs:
|
||||
widget = self.add_tab(tab_item.name)
|
||||
widget.set_code(tab_item.code)
|
||||
|
||||
self.resize(width, height)
|
||||
# Change stylesheet
|
||||
self.setStyleSheet(load_stylesheet())
|
||||
# Check if splitter sizes are set
|
||||
splitters_count = len(self._widgets_splitter.sizes())
|
||||
if len(config.splitter_sizes) == splitters_count:
|
||||
self._widgets_splitter.setSizes(config.splitter_sizes)
|
||||
|
||||
def _save_registry(self):
|
||||
tabs = []
|
||||
for tab_idx in range(self._tab_widget.count()):
|
||||
widget = self._tab_widget.widget(tab_idx)
|
||||
tabs.append({
|
||||
"name": self._tab_widget.tabText(tab_idx),
|
||||
"code": widget.get_code()
|
||||
})
|
||||
|
||||
self._controller.save_config(
|
||||
self.width(),
|
||||
self.height(),
|
||||
self._widgets_splitter.sizes(),
|
||||
tabs
|
||||
)
|
||||
|
||||
def _on_tab_right_click(self, global_point):
|
||||
point = self._tab_widget.mapFromGlobal(global_point)
|
||||
tab_bar = self._tab_widget.tabBar()
|
||||
tab_idx = tab_bar.tabAt(point)
|
||||
last_index = tab_bar.count() - 1
|
||||
if tab_idx < 0 or tab_idx > last_index:
|
||||
return
|
||||
|
||||
menu = QtWidgets.QMenu(self._tab_widget)
|
||||
|
||||
add_tab_action = QtWidgets.QAction("Add tab...", menu)
|
||||
add_tab_action.setToolTip("Add new tab")
|
||||
|
||||
rename_tab_action = QtWidgets.QAction("Rename...", menu)
|
||||
rename_tab_action.setToolTip("Rename tab")
|
||||
|
||||
duplicate_tab_action = QtWidgets.QAction("Duplicate...", menu)
|
||||
duplicate_tab_action.setToolTip("Duplicate code to new tab")
|
||||
|
||||
close_tab_action = QtWidgets.QAction("Close", menu)
|
||||
close_tab_action.setToolTip("Close tab and lose content")
|
||||
close_tab_action.setEnabled(self._tab_widget.tabsClosable())
|
||||
|
||||
menu.addAction(add_tab_action)
|
||||
menu.addAction(rename_tab_action)
|
||||
menu.addAction(duplicate_tab_action)
|
||||
menu.addAction(close_tab_action)
|
||||
|
||||
result = menu.exec_(global_point)
|
||||
if result is None:
|
||||
return
|
||||
|
||||
if result is rename_tab_action:
|
||||
self._rename_tab_req(tab_idx)
|
||||
|
||||
elif result is add_tab_action:
|
||||
self._on_add_requested()
|
||||
|
||||
elif result is duplicate_tab_action:
|
||||
self._duplicate_requested(tab_idx)
|
||||
|
||||
elif result is close_tab_action:
|
||||
self._on_tab_close_req(tab_idx)
|
||||
|
||||
def _rename_tab_req(self, tab_idx):
|
||||
dialog = TabNameDialog(self)
|
||||
dialog.set_tab_name(self._tab_widget.tabText(tab_idx))
|
||||
dialog.exec_()
|
||||
tab_name = dialog.result()
|
||||
if tab_name:
|
||||
self._tab_widget.setTabText(tab_idx, tab_name)
|
||||
|
||||
def _duplicate_requested(self, tab_idx=None):
|
||||
if tab_idx is None:
|
||||
tab_idx = self._tab_widget.currentIndex()
|
||||
|
||||
src_widget = self._tab_widget.widget(tab_idx)
|
||||
dst_widget = self._add_tab()
|
||||
if dst_widget is None:
|
||||
return
|
||||
dst_widget.set_code(src_widget.get_code())
|
||||
|
||||
def _on_tab_mid_click(self, global_point):
|
||||
point = self._tab_widget.mapFromGlobal(global_point)
|
||||
tab_bar = self._tab_widget.tabBar()
|
||||
tab_idx = tab_bar.tabAt(point)
|
||||
last_index = tab_bar.count() - 1
|
||||
if tab_idx < 0 or tab_idx > last_index:
|
||||
return
|
||||
|
||||
self._on_tab_close_req(tab_idx)
|
||||
|
||||
def _on_tab_double_click(self, global_point):
|
||||
point = self._tab_widget.mapFromGlobal(global_point)
|
||||
tab_bar = self._tab_widget.tabBar()
|
||||
tab_idx = tab_bar.tabAt(point)
|
||||
last_index = tab_bar.count() - 1
|
||||
if tab_idx < 0 or tab_idx > last_index:
|
||||
return
|
||||
|
||||
self._rename_tab_req(tab_idx)
|
||||
|
||||
def _on_tab_close_req(self, tab_index):
|
||||
if self._tab_widget.count() == 1:
|
||||
return
|
||||
|
||||
widget = self._tab_widget.widget(tab_index)
|
||||
if widget in self._tabs:
|
||||
self._tabs.remove(widget)
|
||||
self._tab_widget.removeTab(tab_index)
|
||||
|
||||
if self._tab_widget.count() == 1:
|
||||
self._tab_widget.setTabsClosable(False)
|
||||
|
||||
def _append_lines(self, lines):
|
||||
at_max = self._output_widget.vertical_scroll_at_max()
|
||||
tmp_cursor = QtGui.QTextCursor(self._output_widget.document())
|
||||
tmp_cursor.movePosition(QtGui.QTextCursor.End)
|
||||
for line in lines:
|
||||
tmp_cursor.insertText(line)
|
||||
|
||||
if at_max:
|
||||
self._output_widget.scroll_to_bottom()
|
||||
|
||||
def _on_timer_timeout(self):
|
||||
if self._stdout_err_wrapper.lines:
|
||||
lines = []
|
||||
while self._stdout_err_wrapper.lines:
|
||||
line = self._stdout_err_wrapper.lines.popleft()
|
||||
lines.append(ANSI_ESCAPE.sub("", line))
|
||||
self._append_lines(lines)
|
||||
|
||||
def _on_add_requested(self):
|
||||
self._add_tab()
|
||||
|
||||
def _add_tab(self):
|
||||
dialog = TabNameDialog(self)
|
||||
dialog.exec_()
|
||||
tab_name = dialog.result()
|
||||
if tab_name:
|
||||
return self.add_tab(tab_name)
|
||||
|
||||
return None
|
||||
|
||||
def _on_before_execute(self, code_text):
|
||||
at_max = self._output_widget.vertical_scroll_at_max()
|
||||
document = self._output_widget.document()
|
||||
tmp_cursor = QtGui.QTextCursor(document)
|
||||
tmp_cursor.movePosition(QtGui.QTextCursor.End)
|
||||
tmp_cursor.insertText("{}\nExecuting command:\n".format(20 * "-"))
|
||||
|
||||
code_block_format = QtGui.QTextFrameFormat()
|
||||
code_block_format.setBackground(QtGui.QColor(27, 27, 27))
|
||||
code_block_format.setPadding(4)
|
||||
|
||||
tmp_cursor.insertFrame(code_block_format)
|
||||
char_format = tmp_cursor.charFormat()
|
||||
char_format.setForeground(
|
||||
QtGui.QBrush(QtGui.QColor(114, 224, 198))
|
||||
)
|
||||
tmp_cursor.setCharFormat(char_format)
|
||||
tmp_cursor.insertText(code_text)
|
||||
|
||||
# Create new cursor
|
||||
tmp_cursor = QtGui.QTextCursor(document)
|
||||
tmp_cursor.movePosition(QtGui.QTextCursor.End)
|
||||
tmp_cursor.insertText("{}\n".format(20 * "-"))
|
||||
|
||||
if at_max:
|
||||
self._output_widget.scroll_to_bottom()
|
||||
|
|
@ -104,7 +104,7 @@ class ProductNameValidator(RegularExpressionValidatorClass):
|
|||
|
||||
def validate(self, text, pos):
|
||||
results = super(ProductNameValidator, self).validate(text, pos)
|
||||
if results[0] == self.Invalid:
|
||||
if results[0] == RegularExpressionValidatorClass.Invalid:
|
||||
self.invalid.emit(self.invalid_chars(text))
|
||||
return results
|
||||
|
||||
|
|
@ -217,7 +217,9 @@ class ProductTypeDescriptionWidget(QtWidgets.QWidget):
|
|||
|
||||
product_type_label = QtWidgets.QLabel(self)
|
||||
product_type_label.setObjectName("CreatorProductTypeLabel")
|
||||
product_type_label.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft)
|
||||
product_type_label.setAlignment(
|
||||
QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft
|
||||
)
|
||||
|
||||
help_label = QtWidgets.QLabel(self)
|
||||
help_label.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from ayon_core.pipeline.actions import (
|
|||
discover_launcher_actions,
|
||||
LauncherAction,
|
||||
LauncherActionSelection,
|
||||
register_launcher_action_path,
|
||||
)
|
||||
from ayon_core.pipeline.workfile import should_use_last_workfile_on_launch
|
||||
|
||||
|
|
@ -21,9 +22,9 @@ except ImportError:
|
|||
|
||||
Application action based on 'ApplicationManager' system.
|
||||
|
||||
Handling of applications in launcher is not ideal and should be completely
|
||||
redone from scratch. This is just a temporary solution to keep backwards
|
||||
compatibility with AYON launcher.
|
||||
Handling of applications in launcher is not ideal and should be
|
||||
completely redone from scratch. This is just a temporary solution
|
||||
to keep backwards compatibility with AYON launcher.
|
||||
|
||||
Todos:
|
||||
Move handling of errors to frontend.
|
||||
|
|
@ -459,6 +460,14 @@ class ActionsModel:
|
|||
|
||||
def _get_discovered_action_classes(self):
|
||||
if self._discovered_actions is None:
|
||||
# NOTE We don't need to register the paths, but that would
|
||||
# require to change discovery logic and deprecate all functions
|
||||
# related to registering and discovering launcher actions.
|
||||
addons_manager = self._get_addons_manager()
|
||||
actions_paths = addons_manager.collect_launcher_action_paths()
|
||||
for path in actions_paths:
|
||||
if path and os.path.exists(path):
|
||||
register_launcher_action_path(path)
|
||||
self._discovered_actions = (
|
||||
discover_launcher_actions()
|
||||
+ self._get_applications_action_classes()
|
||||
|
|
|
|||
|
|
@ -265,7 +265,7 @@ class ActionDelegate(QtWidgets.QStyledItemDelegate):
|
|||
|
||||
if index.data(FORCE_NOT_OPEN_WORKFILE_ROLE):
|
||||
rect = QtCore.QRectF(
|
||||
option.rect.x(), option.rect.height(), 5, 5)
|
||||
option.rect.x(), option.rect.y() + option.rect.height(), 5, 5)
|
||||
painter.setPen(QtCore.Qt.NoPen)
|
||||
painter.setBrush(QtGui.QColor(200, 0, 0))
|
||||
painter.drawEllipse(rect)
|
||||
|
|
|
|||
|
|
@ -202,8 +202,9 @@ class LauncherWindow(QtWidgets.QWidget):
|
|||
self._go_to_hierarchy_page(project_name)
|
||||
|
||||
def _on_projects_refresh(self):
|
||||
# There is nothing to do, we're on projects page
|
||||
# Refresh only actions on projects page
|
||||
if self._is_on_projects_page:
|
||||
self._actions_widget.refresh()
|
||||
return
|
||||
|
||||
# No projects were found -> go back to projects page
|
||||
|
|
|
|||
|
|
@ -108,6 +108,7 @@ class VersionItem:
|
|||
version (int): Version. Can be negative when is hero version.
|
||||
is_hero (bool): Is hero version.
|
||||
product_id (str): Product id.
|
||||
task_id (Union[str, None]): Task id.
|
||||
thumbnail_id (Union[str, None]): Thumbnail id.
|
||||
published_time (Union[str, None]): Published time in format
|
||||
'%Y%m%dT%H%M%SZ'.
|
||||
|
|
@ -127,6 +128,7 @@ class VersionItem:
|
|||
version,
|
||||
is_hero,
|
||||
product_id,
|
||||
task_id,
|
||||
thumbnail_id,
|
||||
published_time,
|
||||
author,
|
||||
|
|
@ -140,6 +142,7 @@ class VersionItem:
|
|||
):
|
||||
self.version_id = version_id
|
||||
self.product_id = product_id
|
||||
self.task_id = task_id
|
||||
self.thumbnail_id = thumbnail_id
|
||||
self.version = version
|
||||
self.is_hero = is_hero
|
||||
|
|
@ -161,6 +164,7 @@ class VersionItem:
|
|||
and self.version == other.version
|
||||
and self.version_id == other.version_id
|
||||
and self.product_id == other.product_id
|
||||
and self.task_id == other.task_id
|
||||
)
|
||||
|
||||
def __ne__(self, other):
|
||||
|
|
@ -198,6 +202,7 @@ class VersionItem:
|
|||
return {
|
||||
"version_id": self.version_id,
|
||||
"product_id": self.product_id,
|
||||
"task_id": self.task_id,
|
||||
"thumbnail_id": self.thumbnail_id,
|
||||
"version": self.version,
|
||||
"is_hero": self.is_hero,
|
||||
|
|
@ -536,6 +541,55 @@ class FrontendLoaderController(_BaseLoaderController):
|
|||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_task_items(self, project_name, folder_ids, sender=None):
|
||||
"""Task items for folder ids.
|
||||
|
||||
Args:
|
||||
project_name (str): Project name.
|
||||
folder_ids (Iterable[str]): Folder ids.
|
||||
sender (Optional[str]): Sender who requested the items.
|
||||
|
||||
Returns:
|
||||
list[TaskItem]: List of task items.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_task_type_items(self, project_name, sender=None):
|
||||
"""Task type items for a project.
|
||||
|
||||
This function may trigger events with topics
|
||||
'projects.task_types.refresh.started' and
|
||||
'projects.task_types.refresh.finished' which will contain 'sender'
|
||||
value in data.
|
||||
That may help to avoid re-refresh of items in UI elements.
|
||||
|
||||
Args:
|
||||
project_name (str): Project name.
|
||||
sender (str): Who requested task type items.
|
||||
|
||||
Returns:
|
||||
list[TaskTypeItem]: Task type information.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_folder_labels(self, project_name, folder_ids):
|
||||
"""Get folder labels for folder ids.
|
||||
|
||||
Args:
|
||||
project_name (str): Project name.
|
||||
folder_ids (Iterable[str]): Folder ids.
|
||||
|
||||
Returns:
|
||||
dict[str, Optional[str]]: Folder labels by folder id.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_project_status_items(self, project_name, sender=None):
|
||||
"""Items for all projects available on server.
|
||||
|
|
@ -717,8 +771,30 @@ class FrontendLoaderController(_BaseLoaderController):
|
|||
|
||||
Returns:
|
||||
list[str]: Selected folder ids.
|
||||
"""
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_selected_task_ids(self):
|
||||
"""Get selected task ids.
|
||||
|
||||
The information is based on last selection from UI.
|
||||
|
||||
Returns:
|
||||
list[str]: Selected folder ids.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_selected_tasks(self, task_ids):
|
||||
"""Set selected tasks.
|
||||
|
||||
Args:
|
||||
task_ids (Iterable[str]): Selected task ids.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
|
|
@ -729,8 +805,8 @@ class FrontendLoaderController(_BaseLoaderController):
|
|||
|
||||
Returns:
|
||||
list[str]: Selected version ids.
|
||||
"""
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
|
|
|
|||
|
|
@ -198,6 +198,31 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
|
|||
def get_folder_items(self, project_name, sender=None):
|
||||
return self._hierarchy_model.get_folder_items(project_name, sender)
|
||||
|
||||
def get_task_items(self, project_name, folder_ids, sender=None):
|
||||
output = []
|
||||
for folder_id in folder_ids:
|
||||
output.extend(self._hierarchy_model.get_task_items(
|
||||
project_name, folder_id, sender
|
||||
))
|
||||
return output
|
||||
|
||||
def get_task_type_items(self, project_name, sender=None):
|
||||
return self._projects_model.get_task_type_items(
|
||||
project_name, sender
|
||||
)
|
||||
|
||||
def get_folder_labels(self, project_name, folder_ids):
|
||||
folder_items_by_id = self._hierarchy_model.get_folder_items_by_id(
|
||||
project_name, folder_ids
|
||||
)
|
||||
output = {}
|
||||
for folder_id, folder_item in folder_items_by_id.items():
|
||||
label = None
|
||||
if folder_item is not None:
|
||||
label = folder_item.label
|
||||
output[folder_id] = label
|
||||
return output
|
||||
|
||||
def get_product_items(self, project_name, folder_ids, sender=None):
|
||||
return self._products_model.get_product_items(
|
||||
project_name, folder_ids, sender)
|
||||
|
|
@ -299,6 +324,12 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
|
|||
def set_selected_folders(self, folder_ids):
|
||||
self._selection_model.set_selected_folders(folder_ids)
|
||||
|
||||
def get_selected_task_ids(self):
|
||||
return self._selection_model.get_selected_task_ids()
|
||||
|
||||
def set_selected_tasks(self, task_ids):
|
||||
self._selection_model.set_selected_tasks(task_ids)
|
||||
|
||||
def get_selected_version_ids(self):
|
||||
return self._selection_model.get_selected_version_ids()
|
||||
|
||||
|
|
@ -372,17 +403,17 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
|
|||
|
||||
repre_ids = set()
|
||||
for container in containers:
|
||||
repre_id = container.get("representation")
|
||||
# Ignore invalid representation ids.
|
||||
# - invalid representation ids may be available if e.g. is
|
||||
# opened scene from OpenPype whe 'ObjectId' was used instead
|
||||
# of 'uuid'.
|
||||
# NOTE: Server call would crash if there is any invalid id.
|
||||
# That would cause crash we won't get any information.
|
||||
try:
|
||||
repre_id = container.get("representation")
|
||||
# Ignore invalid representation ids.
|
||||
# - invalid representation ids may be available if e.g. is
|
||||
# opened scene from OpenPype whe 'ObjectId' was used
|
||||
# instead of 'uuid'.
|
||||
# NOTE: Server call would crash if there is any invalid id.
|
||||
# That would cause crash we won't get any information.
|
||||
uuid.UUID(repre_id)
|
||||
repre_ids.add(repre_id)
|
||||
except ValueError:
|
||||
except (ValueError, TypeError, AttributeError):
|
||||
pass
|
||||
|
||||
product_ids = self._products_model.get_product_ids_by_repre_ids(
|
||||
|
|
|
|||
|
|
@ -55,6 +55,7 @@ def version_item_from_entity(version):
|
|||
version=version_num,
|
||||
is_hero=is_hero,
|
||||
product_id=version["productId"],
|
||||
task_id=version["taskId"],
|
||||
thumbnail_id=version["thumbnailId"],
|
||||
published_time=published_time,
|
||||
author=author,
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ class SelectionModel(object):
|
|||
|
||||
self._project_name = None
|
||||
self._folder_ids = set()
|
||||
self._task_ids = set()
|
||||
self._version_ids = set()
|
||||
self._representation_ids = set()
|
||||
|
||||
|
|
@ -48,6 +49,23 @@ class SelectionModel(object):
|
|||
self.event_source
|
||||
)
|
||||
|
||||
def get_selected_task_ids(self):
|
||||
return self._task_ids
|
||||
|
||||
def set_selected_tasks(self, task_ids):
|
||||
if task_ids == self._task_ids:
|
||||
return
|
||||
|
||||
self._task_ids = task_ids
|
||||
self._controller.emit_event(
|
||||
"selection.tasks.changed",
|
||||
{
|
||||
"project_name": self._project_name,
|
||||
"task_ids": task_ids,
|
||||
},
|
||||
self.event_source
|
||||
)
|
||||
|
||||
def get_selected_version_ids(self):
|
||||
return self._version_ids
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,10 @@
|
|||
from __future__ import annotations
|
||||
import typing
|
||||
from typing import List, Tuple, Optional, Iterable, Any
|
||||
|
||||
from qtpy import QtWidgets, QtCore, QtGui
|
||||
|
||||
from ayon_core.tools.utils import get_qt_icon
|
||||
from ayon_core.tools.utils.lib import (
|
||||
checkstate_int_to_enum,
|
||||
checkstate_enum_to_int,
|
||||
|
|
@ -11,14 +14,269 @@ from ayon_core.tools.utils.constants import (
|
|||
UNCHECKED_INT,
|
||||
ITEM_IS_USER_TRISTATE,
|
||||
)
|
||||
if typing.TYPE_CHECKING:
|
||||
from ayon_core.tools.loader.abstract import FrontendLoaderController
|
||||
|
||||
VALUE_ITEM_TYPE = 0
|
||||
STANDARD_ITEM_TYPE = 1
|
||||
SEPARATOR_ITEM_TYPE = 2
|
||||
|
||||
VALUE_ITEM_SUBTYPE = 0
|
||||
SELECT_ALL_SUBTYPE = 1
|
||||
DESELECT_ALL_SUBTYPE = 2
|
||||
SWAP_STATE_SUBTYPE = 3
|
||||
|
||||
|
||||
class BaseQtModel(QtGui.QStandardItemModel):
|
||||
_empty_icon = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
item_type_role: int,
|
||||
item_subtype_role: int,
|
||||
empty_values_label: str,
|
||||
controller: FrontendLoaderController,
|
||||
):
|
||||
self._item_type_role = item_type_role
|
||||
self._item_subtype_role = item_subtype_role
|
||||
self._empty_values_label = empty_values_label
|
||||
self._controller = controller
|
||||
|
||||
self._last_project = None
|
||||
|
||||
self._select_project_item = None
|
||||
self._empty_values_item = None
|
||||
|
||||
self._select_all_item = None
|
||||
self._deselect_all_item = None
|
||||
self._swap_states_item = None
|
||||
|
||||
super().__init__()
|
||||
|
||||
self.refresh(None)
|
||||
|
||||
def _get_standard_items(self) -> list[QtGui.QStandardItem]:
|
||||
raise NotImplementedError(
|
||||
"'_get_standard_items' is not implemented"
|
||||
f" for {self.__class__}"
|
||||
)
|
||||
|
||||
def _clear_standard_items(self):
|
||||
raise NotImplementedError(
|
||||
"'_clear_standard_items' is not implemented"
|
||||
f" for {self.__class__}"
|
||||
)
|
||||
|
||||
def _prepare_new_value_items(
|
||||
self, project_name: str, project_changed: bool
|
||||
) -> tuple[
|
||||
list[QtGui.QStandardItem], list[QtGui.QStandardItem]
|
||||
]:
|
||||
raise NotImplementedError(
|
||||
"'_prepare_new_value_items' is not implemented"
|
||||
f" for {self.__class__}"
|
||||
)
|
||||
|
||||
def refresh(self, project_name: Optional[str]):
|
||||
# New project was selected
|
||||
project_changed = False
|
||||
if project_name != self._last_project:
|
||||
self._last_project = project_name
|
||||
project_changed = True
|
||||
|
||||
if project_name is None:
|
||||
self._add_select_project_item()
|
||||
return
|
||||
|
||||
value_items, items_to_remove = self._prepare_new_value_items(
|
||||
project_name, project_changed
|
||||
)
|
||||
if not value_items:
|
||||
self._add_empty_values_item()
|
||||
return
|
||||
|
||||
self._remove_empty_items()
|
||||
|
||||
root_item = self.invisibleRootItem()
|
||||
for row_idx, value_item in enumerate(value_items):
|
||||
if value_item.row() == row_idx:
|
||||
continue
|
||||
if value_item.row() >= 0:
|
||||
root_item.takeRow(value_item.row())
|
||||
root_item.insertRow(row_idx, value_item)
|
||||
|
||||
for item in items_to_remove:
|
||||
root_item.removeRow(item.row())
|
||||
|
||||
self._add_selection_items()
|
||||
|
||||
def setData(self, index, value, role):
|
||||
if role == QtCore.Qt.CheckStateRole and index.isValid():
|
||||
item_subtype = index.data(self._item_subtype_role)
|
||||
if item_subtype == SELECT_ALL_SUBTYPE:
|
||||
for item in self._get_standard_items():
|
||||
item.setCheckState(QtCore.Qt.Checked)
|
||||
return True
|
||||
if item_subtype == DESELECT_ALL_SUBTYPE:
|
||||
for item in self._get_standard_items():
|
||||
item.setCheckState(QtCore.Qt.Unchecked)
|
||||
return True
|
||||
if item_subtype == SWAP_STATE_SUBTYPE:
|
||||
for item in self._get_standard_items():
|
||||
current_state = item.checkState()
|
||||
item.setCheckState(
|
||||
QtCore.Qt.Checked
|
||||
if current_state == QtCore.Qt.Unchecked
|
||||
else QtCore.Qt.Unchecked
|
||||
)
|
||||
return True
|
||||
return super().setData(index, value, role)
|
||||
|
||||
@classmethod
|
||||
def _get_empty_icon(cls):
|
||||
if cls._empty_icon is None:
|
||||
pix = QtGui.QPixmap(1, 1)
|
||||
pix.fill(QtCore.Qt.transparent)
|
||||
cls._empty_icon = QtGui.QIcon(pix)
|
||||
return cls._empty_icon
|
||||
|
||||
def _init_default_items(self):
|
||||
if self._empty_values_item is not None:
|
||||
return
|
||||
|
||||
empty_values_item = QtGui.QStandardItem(self._empty_values_label)
|
||||
select_project_item = QtGui.QStandardItem("Select project...")
|
||||
|
||||
select_all_item = QtGui.QStandardItem("Select all")
|
||||
deselect_all_item = QtGui.QStandardItem("Deselect all")
|
||||
swap_states_item = QtGui.QStandardItem("Swap")
|
||||
|
||||
for item in (
|
||||
empty_values_item,
|
||||
select_project_item,
|
||||
select_all_item,
|
||||
deselect_all_item,
|
||||
swap_states_item,
|
||||
):
|
||||
item.setData(STANDARD_ITEM_TYPE, self._item_type_role)
|
||||
|
||||
select_all_item.setIcon(get_qt_icon({
|
||||
"type": "material-symbols",
|
||||
"name": "done_all",
|
||||
"color": "white"
|
||||
}))
|
||||
deselect_all_item.setIcon(get_qt_icon({
|
||||
"type": "material-symbols",
|
||||
"name": "remove_done",
|
||||
"color": "white"
|
||||
}))
|
||||
swap_states_item.setIcon(get_qt_icon({
|
||||
"type": "material-symbols",
|
||||
"name": "swap_horiz",
|
||||
"color": "white"
|
||||
}))
|
||||
|
||||
for item in (
|
||||
empty_values_item,
|
||||
select_project_item,
|
||||
):
|
||||
item.setFlags(QtCore.Qt.NoItemFlags)
|
||||
|
||||
for item, item_type in (
|
||||
(select_all_item, SELECT_ALL_SUBTYPE),
|
||||
(deselect_all_item, DESELECT_ALL_SUBTYPE),
|
||||
(swap_states_item, SWAP_STATE_SUBTYPE),
|
||||
):
|
||||
item.setData(item_type, self._item_subtype_role)
|
||||
|
||||
for item in (
|
||||
select_all_item,
|
||||
deselect_all_item,
|
||||
swap_states_item,
|
||||
):
|
||||
item.setFlags(
|
||||
QtCore.Qt.ItemIsEnabled
|
||||
| QtCore.Qt.ItemIsSelectable
|
||||
| QtCore.Qt.ItemIsUserCheckable
|
||||
)
|
||||
|
||||
self._empty_values_item = empty_values_item
|
||||
self._select_project_item = select_project_item
|
||||
|
||||
self._select_all_item = select_all_item
|
||||
self._deselect_all_item = deselect_all_item
|
||||
self._swap_states_item = swap_states_item
|
||||
|
||||
def _get_empty_values_item(self):
|
||||
self._init_default_items()
|
||||
return self._empty_values_item
|
||||
|
||||
def _get_select_project_item(self):
|
||||
self._init_default_items()
|
||||
return self._select_project_item
|
||||
|
||||
def _get_empty_items(self):
|
||||
self._init_default_items()
|
||||
return [
|
||||
self._empty_values_item,
|
||||
self._select_project_item,
|
||||
]
|
||||
|
||||
def _get_selection_items(self):
|
||||
self._init_default_items()
|
||||
return [
|
||||
self._select_all_item,
|
||||
self._deselect_all_item,
|
||||
self._swap_states_item,
|
||||
]
|
||||
|
||||
def _get_default_items(self):
|
||||
return self._get_empty_items() + self._get_selection_items()
|
||||
|
||||
def _add_select_project_item(self):
|
||||
item = self._get_select_project_item()
|
||||
if item.row() < 0:
|
||||
self._remove_items()
|
||||
root_item = self.invisibleRootItem()
|
||||
root_item.appendRow(item)
|
||||
|
||||
def _add_empty_values_item(self):
|
||||
item = self._get_empty_values_item()
|
||||
if item.row() < 0:
|
||||
self._remove_items()
|
||||
root_item = self.invisibleRootItem()
|
||||
root_item.appendRow(item)
|
||||
|
||||
def _add_selection_items(self):
|
||||
root_item = self.invisibleRootItem()
|
||||
items = self._get_selection_items()
|
||||
for item in self._get_selection_items():
|
||||
row = item.row()
|
||||
if row >= 0:
|
||||
root_item.takeRow(row)
|
||||
root_item.appendRows(items)
|
||||
|
||||
def _remove_items(self):
|
||||
root_item = self.invisibleRootItem()
|
||||
for item in self._get_default_items():
|
||||
if item.row() < 0:
|
||||
continue
|
||||
root_item.takeRow(item.row())
|
||||
|
||||
root_item.removeRows(0, root_item.rowCount())
|
||||
self._clear_standard_items()
|
||||
|
||||
def _remove_empty_items(self):
|
||||
root_item = self.invisibleRootItem()
|
||||
for item in self._get_empty_items():
|
||||
if item.row() < 0:
|
||||
continue
|
||||
root_item.takeRow(item.row())
|
||||
|
||||
|
||||
class CustomPaintDelegate(QtWidgets.QStyledItemDelegate):
|
||||
"""Delegate showing status name and short name."""
|
||||
_empty_icon = None
|
||||
_checked_value = checkstate_enum_to_int(QtCore.Qt.Checked)
|
||||
_checked_bg_color = QtGui.QColor("#2C3B4C")
|
||||
|
||||
|
|
@ -38,6 +296,14 @@ class CustomPaintDelegate(QtWidgets.QStyledItemDelegate):
|
|||
self._icon_role = icon_role
|
||||
self._item_type_role = item_type_role
|
||||
|
||||
@classmethod
|
||||
def _get_empty_icon(cls):
|
||||
if cls._empty_icon is None:
|
||||
pix = QtGui.QPixmap(1, 1)
|
||||
pix.fill(QtCore.Qt.transparent)
|
||||
cls._empty_icon = QtGui.QIcon(pix)
|
||||
return cls._empty_icon
|
||||
|
||||
def paint(self, painter, option, index):
|
||||
item_type = None
|
||||
if self._item_type_role is not None:
|
||||
|
|
@ -70,6 +336,9 @@ class CustomPaintDelegate(QtWidgets.QStyledItemDelegate):
|
|||
if option.state & QtWidgets.QStyle.State_Open:
|
||||
state = QtGui.QIcon.On
|
||||
icon = self._get_index_icon(index)
|
||||
if icon is None or icon.isNull():
|
||||
icon = self._get_empty_icon()
|
||||
|
||||
option.features |= QtWidgets.QStyleOptionViewItem.HasDecoration
|
||||
|
||||
# Disable visible check indicator
|
||||
|
|
@ -241,6 +510,10 @@ class CustomPaintMultiselectComboBox(QtWidgets.QComboBox):
|
|||
QtCore.Qt.Key_Home,
|
||||
QtCore.Qt.Key_End,
|
||||
}
|
||||
_top_bottom_margins = 1
|
||||
_top_bottom_padding = 2
|
||||
_left_right_padding = 3
|
||||
_item_bg_color = QtGui.QColor("#31424e")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
|
@ -433,14 +706,14 @@ class CustomPaintMultiselectComboBox(QtWidgets.QComboBox):
|
|||
|
||||
idxs = self._get_checked_idx()
|
||||
# draw the icon and text
|
||||
draw_text = True
|
||||
draw_items = False
|
||||
combotext = None
|
||||
if self._custom_text is not None:
|
||||
combotext = self._custom_text
|
||||
elif not idxs:
|
||||
combotext = self._placeholder_text
|
||||
else:
|
||||
draw_text = False
|
||||
draw_items = True
|
||||
|
||||
content_field_rect = self.style().subControlRect(
|
||||
QtWidgets.QStyle.CC_ComboBox,
|
||||
|
|
@ -448,7 +721,9 @@ class CustomPaintMultiselectComboBox(QtWidgets.QComboBox):
|
|||
QtWidgets.QStyle.SC_ComboBoxEditField
|
||||
).adjusted(1, 0, -1, 0)
|
||||
|
||||
if draw_text:
|
||||
if draw_items:
|
||||
self._paint_items(painter, idxs, content_field_rect)
|
||||
else:
|
||||
color = option.palette.color(QtGui.QPalette.Text)
|
||||
color.setAlpha(67)
|
||||
pen = painter.pen()
|
||||
|
|
@ -459,15 +734,12 @@ class CustomPaintMultiselectComboBox(QtWidgets.QComboBox):
|
|||
QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter,
|
||||
combotext
|
||||
)
|
||||
else:
|
||||
self._paint_items(painter, idxs, content_field_rect)
|
||||
|
||||
painter.end()
|
||||
|
||||
def _paint_items(self, painter, indexes, content_rect):
|
||||
origin_rect = QtCore.QRect(content_rect)
|
||||
|
||||
metrics = self.fontMetrics()
|
||||
model = self.model()
|
||||
available_width = content_rect.width()
|
||||
total_used_width = 0
|
||||
|
|
@ -482,31 +754,80 @@ class CustomPaintMultiselectComboBox(QtWidgets.QComboBox):
|
|||
continue
|
||||
|
||||
icon = index.data(self._icon_role)
|
||||
# TODO handle this case
|
||||
if icon is None or icon.isNull():
|
||||
continue
|
||||
text = index.data(self._text_role)
|
||||
valid_icon = icon is not None and not icon.isNull()
|
||||
if valid_icon:
|
||||
sizes = icon.availableSizes()
|
||||
if sizes:
|
||||
valid_icon = any(size.width() > 1 for size in sizes)
|
||||
|
||||
icon_rect = QtCore.QRect(content_rect)
|
||||
diff = icon_rect.height() - metrics.height()
|
||||
if diff < 0:
|
||||
diff = 0
|
||||
top_offset = diff // 2
|
||||
bottom_offset = diff - top_offset
|
||||
icon_rect.adjust(0, top_offset, 0, -bottom_offset)
|
||||
icon_rect.setWidth(metrics.height())
|
||||
icon.paint(
|
||||
painter,
|
||||
icon_rect,
|
||||
QtCore.Qt.AlignCenter,
|
||||
QtGui.QIcon.Normal,
|
||||
QtGui.QIcon.On
|
||||
)
|
||||
content_rect.setLeft(icon_rect.right() + spacing)
|
||||
if total_used_width > 0:
|
||||
total_used_width += spacing
|
||||
total_used_width += icon_rect.width()
|
||||
if total_used_width > available_width:
|
||||
break
|
||||
if valid_icon:
|
||||
metrics = self.fontMetrics()
|
||||
icon_rect = QtCore.QRect(content_rect)
|
||||
diff = icon_rect.height() - metrics.height()
|
||||
if diff < 0:
|
||||
diff = 0
|
||||
top_offset = diff // 2
|
||||
bottom_offset = diff - top_offset
|
||||
icon_rect.adjust(0, top_offset, 0, -bottom_offset)
|
||||
used_width = metrics.height()
|
||||
if total_used_width > 0:
|
||||
total_used_width += spacing
|
||||
total_used_width += used_width
|
||||
if total_used_width > available_width:
|
||||
break
|
||||
|
||||
icon_rect.setWidth(used_width)
|
||||
icon.paint(
|
||||
painter,
|
||||
icon_rect,
|
||||
QtCore.Qt.AlignCenter,
|
||||
QtGui.QIcon.Normal,
|
||||
QtGui.QIcon.On
|
||||
)
|
||||
content_rect.setLeft(icon_rect.right() + spacing)
|
||||
|
||||
elif text:
|
||||
bg_height = (
|
||||
content_rect.height()
|
||||
- (2 * self._top_bottom_margins)
|
||||
)
|
||||
font_height = bg_height - (2 * self._top_bottom_padding)
|
||||
|
||||
bg_top_y = content_rect.y() + self._top_bottom_margins
|
||||
|
||||
font = self.font()
|
||||
font.setPixelSize(font_height)
|
||||
metrics = QtGui.QFontMetrics(font)
|
||||
painter.setFont(font)
|
||||
|
||||
label_rect = metrics.boundingRect(text)
|
||||
|
||||
bg_width = label_rect.width() + (2 * self._left_right_padding)
|
||||
if total_used_width > 0:
|
||||
total_used_width += spacing
|
||||
total_used_width += bg_width
|
||||
if total_used_width > available_width:
|
||||
break
|
||||
|
||||
bg_rect = QtCore.QRectF(label_rect)
|
||||
bg_rect.moveTop(bg_top_y)
|
||||
bg_rect.moveLeft(content_rect.left())
|
||||
bg_rect.setWidth(bg_width)
|
||||
bg_rect.setHeight(bg_height)
|
||||
|
||||
label_rect.moveTop(bg_top_y)
|
||||
label_rect.moveLeft(
|
||||
content_rect.left() + self._left_right_padding
|
||||
)
|
||||
|
||||
path = QtGui.QPainterPath()
|
||||
path.addRoundedRect(bg_rect, 5, 5)
|
||||
|
||||
painter.fillPath(path, self._item_bg_color)
|
||||
painter.drawText(label_rect, QtCore.Qt.AlignCenter, text)
|
||||
|
||||
content_rect.setLeft(bg_rect.right() + spacing)
|
||||
|
||||
painter.restore()
|
||||
|
||||
|
|
@ -517,7 +838,11 @@ class CustomPaintMultiselectComboBox(QtWidgets.QComboBox):
|
|||
def setItemCheckState(self, index, state):
|
||||
self.setItemData(index, state, QtCore.Qt.CheckStateRole)
|
||||
|
||||
def set_value(self, values: Optional[Iterable[Any]], role: Optional[int] = None):
|
||||
def set_value(
|
||||
self,
|
||||
values: Optional[Iterable[Any]],
|
||||
role: Optional[int] = None,
|
||||
):
|
||||
if role is None:
|
||||
role = self._value_role
|
||||
|
||||
|
|
|
|||
169
client/ayon_core/tools/loader/ui/product_types_combo.py
Normal file
169
client/ayon_core/tools/loader/ui/product_types_combo.py
Normal file
|
|
@ -0,0 +1,169 @@
|
|||
from qtpy import QtGui, QtCore
|
||||
|
||||
from ._multicombobox import (
|
||||
CustomPaintMultiselectComboBox,
|
||||
BaseQtModel,
|
||||
)
|
||||
|
||||
STATUS_ITEM_TYPE = 0
|
||||
SELECT_ALL_TYPE = 1
|
||||
DESELECT_ALL_TYPE = 2
|
||||
SWAP_STATE_TYPE = 3
|
||||
|
||||
PRODUCT_TYPE_ROLE = QtCore.Qt.UserRole + 1
|
||||
ITEM_TYPE_ROLE = QtCore.Qt.UserRole + 2
|
||||
ITEM_SUBTYPE_ROLE = QtCore.Qt.UserRole + 3
|
||||
|
||||
|
||||
class ProductTypesQtModel(BaseQtModel):
|
||||
refreshed = QtCore.Signal()
|
||||
|
||||
def __init__(self, controller):
|
||||
self._reset_filters_on_refresh = True
|
||||
self._refreshing = False
|
||||
self._bulk_change = False
|
||||
self._items_by_name = {}
|
||||
|
||||
super().__init__(
|
||||
item_type_role=ITEM_TYPE_ROLE,
|
||||
item_subtype_role=ITEM_SUBTYPE_ROLE,
|
||||
empty_values_label="No product types...",
|
||||
controller=controller,
|
||||
)
|
||||
|
||||
def is_refreshing(self):
|
||||
return self._refreshing
|
||||
|
||||
def refresh(self, project_name):
|
||||
self._refreshing = True
|
||||
super().refresh(project_name)
|
||||
|
||||
self._reset_filters_on_refresh = False
|
||||
self._refreshing = False
|
||||
self.refreshed.emit()
|
||||
|
||||
def reset_product_types_filter_on_refresh(self):
|
||||
self._reset_filters_on_refresh = True
|
||||
|
||||
def _get_standard_items(self) -> list[QtGui.QStandardItem]:
|
||||
return list(self._items_by_name.values())
|
||||
|
||||
def _clear_standard_items(self):
|
||||
self._items_by_name.clear()
|
||||
|
||||
def _prepare_new_value_items(self, project_name: str, _: bool) -> tuple[
|
||||
list[QtGui.QStandardItem], list[QtGui.QStandardItem]
|
||||
]:
|
||||
product_type_items = self._controller.get_product_type_items(
|
||||
project_name)
|
||||
self._last_project = project_name
|
||||
|
||||
names_to_remove = set(self._items_by_name.keys())
|
||||
items = []
|
||||
items_filter_required = {}
|
||||
for product_type_item in product_type_items:
|
||||
name = product_type_item.name
|
||||
names_to_remove.discard(name)
|
||||
item = self._items_by_name.get(name)
|
||||
# Apply filter to new items or if filters reset is requested
|
||||
filter_required = self._reset_filters_on_refresh
|
||||
if item is None:
|
||||
filter_required = True
|
||||
item = QtGui.QStandardItem(name)
|
||||
item.setData(name, PRODUCT_TYPE_ROLE)
|
||||
item.setEditable(False)
|
||||
item.setCheckable(True)
|
||||
self._items_by_name[name] = item
|
||||
|
||||
items.append(item)
|
||||
|
||||
if filter_required:
|
||||
items_filter_required[name] = item
|
||||
|
||||
if items_filter_required:
|
||||
product_types_filter = self._controller.get_product_types_filter()
|
||||
for product_type, item in items_filter_required.items():
|
||||
matching = (
|
||||
int(product_type in product_types_filter.product_types)
|
||||
+ int(product_types_filter.is_allow_list)
|
||||
)
|
||||
item.setCheckState(
|
||||
QtCore.Qt.Checked
|
||||
if matching % 2 == 0
|
||||
else QtCore.Qt.Unchecked
|
||||
)
|
||||
|
||||
items_to_remove = []
|
||||
for name in names_to_remove:
|
||||
items_to_remove.append(
|
||||
self._items_by_name.pop(name)
|
||||
)
|
||||
|
||||
# Uncheck all if all are checked (same result)
|
||||
if all(
|
||||
item.checkState() == QtCore.Qt.Checked
|
||||
for item in items
|
||||
):
|
||||
for item in items:
|
||||
item.setCheckState(QtCore.Qt.Unchecked)
|
||||
|
||||
return items, items_to_remove
|
||||
|
||||
|
||||
class ProductTypesCombobox(CustomPaintMultiselectComboBox):
|
||||
def __init__(self, controller, parent):
|
||||
self._controller = controller
|
||||
model = ProductTypesQtModel(controller)
|
||||
super().__init__(
|
||||
PRODUCT_TYPE_ROLE,
|
||||
PRODUCT_TYPE_ROLE,
|
||||
QtCore.Qt.ForegroundRole,
|
||||
QtCore.Qt.DecorationRole,
|
||||
item_type_role=ITEM_TYPE_ROLE,
|
||||
model=model,
|
||||
parent=parent
|
||||
)
|
||||
|
||||
model.refreshed.connect(self._on_model_refresh)
|
||||
|
||||
self.set_placeholder_text("Product types filter...")
|
||||
self._model = model
|
||||
self._last_project_name = None
|
||||
self._fully_disabled_filter = False
|
||||
|
||||
controller.register_event_callback(
|
||||
"selection.project.changed",
|
||||
self._on_project_change
|
||||
)
|
||||
controller.register_event_callback(
|
||||
"projects.refresh.finished",
|
||||
self._on_projects_refresh
|
||||
)
|
||||
self.setToolTip("Product types filter")
|
||||
self.value_changed.connect(
|
||||
self._on_product_type_filter_change
|
||||
)
|
||||
|
||||
def reset_product_types_filter_on_refresh(self):
|
||||
self._model.reset_product_types_filter_on_refresh()
|
||||
|
||||
def _on_model_refresh(self):
|
||||
self.value_changed.emit()
|
||||
|
||||
def _on_product_type_filter_change(self):
|
||||
lines = ["Product types filter"]
|
||||
for item in self.get_value_info():
|
||||
status_name, enabled = item
|
||||
lines.append(f"{'✔' if enabled else '☐'} {status_name}")
|
||||
|
||||
self.setToolTip("\n".join(lines))
|
||||
|
||||
def _on_project_change(self, event):
|
||||
project_name = event["project_name"]
|
||||
self._last_project_name = project_name
|
||||
self._model.refresh(project_name)
|
||||
|
||||
def _on_projects_refresh(self):
|
||||
if self._last_project_name:
|
||||
self._model.refresh(self._last_project_name)
|
||||
self._on_product_type_filter_change()
|
||||
|
|
@ -1,256 +0,0 @@
|
|||
from qtpy import QtWidgets, QtGui, QtCore
|
||||
|
||||
from ayon_core.tools.utils import get_qt_icon
|
||||
|
||||
PRODUCT_TYPE_ROLE = QtCore.Qt.UserRole + 1
|
||||
|
||||
|
||||
class ProductTypesQtModel(QtGui.QStandardItemModel):
|
||||
refreshed = QtCore.Signal()
|
||||
filter_changed = QtCore.Signal()
|
||||
|
||||
def __init__(self, controller):
|
||||
super(ProductTypesQtModel, self).__init__()
|
||||
self._controller = controller
|
||||
|
||||
self._reset_filters_on_refresh = True
|
||||
self._refreshing = False
|
||||
self._bulk_change = False
|
||||
self._last_project = None
|
||||
self._items_by_name = {}
|
||||
|
||||
controller.register_event_callback(
|
||||
"controller.reset.finished",
|
||||
self._on_controller_reset_finish,
|
||||
)
|
||||
|
||||
def is_refreshing(self):
|
||||
return self._refreshing
|
||||
|
||||
def get_filter_info(self):
|
||||
"""Product types filtering info.
|
||||
|
||||
Returns:
|
||||
dict[str, bool]: Filtering value by product type name. False value
|
||||
means to hide product type.
|
||||
"""
|
||||
|
||||
return {
|
||||
name: item.checkState() == QtCore.Qt.Checked
|
||||
for name, item in self._items_by_name.items()
|
||||
}
|
||||
|
||||
def refresh(self, project_name):
|
||||
self._refreshing = True
|
||||
product_type_items = self._controller.get_product_type_items(
|
||||
project_name)
|
||||
self._last_project = project_name
|
||||
|
||||
items_to_remove = set(self._items_by_name.keys())
|
||||
new_items = []
|
||||
items_filter_required = {}
|
||||
for product_type_item in product_type_items:
|
||||
name = product_type_item.name
|
||||
items_to_remove.discard(name)
|
||||
item = self._items_by_name.get(name)
|
||||
# Apply filter to new items or if filters reset is requested
|
||||
filter_required = self._reset_filters_on_refresh
|
||||
if item is None:
|
||||
filter_required = True
|
||||
item = QtGui.QStandardItem(name)
|
||||
item.setData(name, PRODUCT_TYPE_ROLE)
|
||||
item.setEditable(False)
|
||||
item.setCheckable(True)
|
||||
new_items.append(item)
|
||||
self._items_by_name[name] = item
|
||||
|
||||
if filter_required:
|
||||
items_filter_required[name] = item
|
||||
|
||||
icon = get_qt_icon(product_type_item.icon)
|
||||
item.setData(icon, QtCore.Qt.DecorationRole)
|
||||
|
||||
if items_filter_required:
|
||||
product_types_filter = self._controller.get_product_types_filter()
|
||||
for product_type, item in items_filter_required.items():
|
||||
matching = (
|
||||
int(product_type in product_types_filter.product_types)
|
||||
+ int(product_types_filter.is_allow_list)
|
||||
)
|
||||
state = (
|
||||
QtCore.Qt.Checked
|
||||
if matching % 2 == 0
|
||||
else QtCore.Qt.Unchecked
|
||||
)
|
||||
item.setCheckState(state)
|
||||
|
||||
root_item = self.invisibleRootItem()
|
||||
if new_items:
|
||||
root_item.appendRows(new_items)
|
||||
|
||||
for name in items_to_remove:
|
||||
item = self._items_by_name.pop(name)
|
||||
root_item.removeRow(item.row())
|
||||
|
||||
self._reset_filters_on_refresh = False
|
||||
self._refreshing = False
|
||||
self.refreshed.emit()
|
||||
|
||||
def reset_product_types_filter_on_refresh(self):
|
||||
self._reset_filters_on_refresh = True
|
||||
|
||||
def setData(self, index, value, role=None):
|
||||
checkstate_changed = False
|
||||
if role is None:
|
||||
role = QtCore.Qt.EditRole
|
||||
elif role == QtCore.Qt.CheckStateRole:
|
||||
checkstate_changed = True
|
||||
output = super(ProductTypesQtModel, self).setData(index, value, role)
|
||||
if checkstate_changed and not self._bulk_change:
|
||||
self.filter_changed.emit()
|
||||
return output
|
||||
|
||||
def change_state_for_all(self, checked):
|
||||
if self._items_by_name:
|
||||
self.change_states(checked, self._items_by_name.keys())
|
||||
|
||||
def change_states(self, checked, product_types):
|
||||
product_types = set(product_types)
|
||||
if not product_types:
|
||||
return
|
||||
|
||||
if checked is None:
|
||||
state = None
|
||||
elif checked:
|
||||
state = QtCore.Qt.Checked
|
||||
else:
|
||||
state = QtCore.Qt.Unchecked
|
||||
|
||||
self._bulk_change = True
|
||||
|
||||
changed = False
|
||||
for product_type in product_types:
|
||||
item = self._items_by_name.get(product_type)
|
||||
if item is None:
|
||||
continue
|
||||
new_state = state
|
||||
item_checkstate = item.checkState()
|
||||
if new_state is None:
|
||||
if item_checkstate == QtCore.Qt.Checked:
|
||||
new_state = QtCore.Qt.Unchecked
|
||||
else:
|
||||
new_state = QtCore.Qt.Checked
|
||||
elif item_checkstate == new_state:
|
||||
continue
|
||||
changed = True
|
||||
item.setCheckState(new_state)
|
||||
|
||||
self._bulk_change = False
|
||||
|
||||
if changed:
|
||||
self.filter_changed.emit()
|
||||
|
||||
def _on_controller_reset_finish(self):
|
||||
self.refresh(self._last_project)
|
||||
|
||||
|
||||
class ProductTypesView(QtWidgets.QListView):
|
||||
filter_changed = QtCore.Signal()
|
||||
|
||||
def __init__(self, controller, parent):
|
||||
super(ProductTypesView, self).__init__(parent)
|
||||
|
||||
self.setSelectionMode(
|
||||
QtWidgets.QAbstractItemView.ExtendedSelection
|
||||
)
|
||||
self.setAlternatingRowColors(True)
|
||||
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
|
||||
|
||||
product_types_model = ProductTypesQtModel(controller)
|
||||
product_types_proxy_model = QtCore.QSortFilterProxyModel()
|
||||
product_types_proxy_model.setSourceModel(product_types_model)
|
||||
|
||||
self.setModel(product_types_proxy_model)
|
||||
|
||||
product_types_model.refreshed.connect(self._on_refresh_finished)
|
||||
product_types_model.filter_changed.connect(self._on_filter_change)
|
||||
self.customContextMenuRequested.connect(self._on_context_menu)
|
||||
|
||||
controller.register_event_callback(
|
||||
"selection.project.changed",
|
||||
self._on_project_change
|
||||
)
|
||||
|
||||
self._controller = controller
|
||||
self._refresh_product_types_filter = False
|
||||
|
||||
self._product_types_model = product_types_model
|
||||
self._product_types_proxy_model = product_types_proxy_model
|
||||
|
||||
def get_filter_info(self):
|
||||
return self._product_types_model.get_filter_info()
|
||||
|
||||
def reset_product_types_filter_on_refresh(self):
|
||||
self._product_types_model.reset_product_types_filter_on_refresh()
|
||||
|
||||
def _on_project_change(self, event):
|
||||
project_name = event["project_name"]
|
||||
self._product_types_model.refresh(project_name)
|
||||
|
||||
def _on_refresh_finished(self):
|
||||
# Apply product types filter on first show
|
||||
self.filter_changed.emit()
|
||||
|
||||
def _on_filter_change(self):
|
||||
if not self._product_types_model.is_refreshing():
|
||||
self.filter_changed.emit()
|
||||
|
||||
def _change_selection_state(self, checkstate):
|
||||
selection_model = self.selectionModel()
|
||||
product_types = {
|
||||
index.data(PRODUCT_TYPE_ROLE)
|
||||
for index in selection_model.selectedIndexes()
|
||||
}
|
||||
product_types.discard(None)
|
||||
self._product_types_model.change_states(checkstate, product_types)
|
||||
|
||||
def _on_enable_all(self):
|
||||
self._product_types_model.change_state_for_all(True)
|
||||
|
||||
def _on_disable_all(self):
|
||||
self._product_types_model.change_state_for_all(False)
|
||||
|
||||
def _on_context_menu(self, pos):
|
||||
menu = QtWidgets.QMenu(self)
|
||||
|
||||
# Add enable all action
|
||||
action_check_all = QtWidgets.QAction(menu)
|
||||
action_check_all.setText("Enable All")
|
||||
action_check_all.triggered.connect(self._on_enable_all)
|
||||
# Add disable all action
|
||||
action_uncheck_all = QtWidgets.QAction(menu)
|
||||
action_uncheck_all.setText("Disable All")
|
||||
action_uncheck_all.triggered.connect(self._on_disable_all)
|
||||
|
||||
menu.addAction(action_check_all)
|
||||
menu.addAction(action_uncheck_all)
|
||||
|
||||
# Get mouse position
|
||||
global_pos = self.viewport().mapToGlobal(pos)
|
||||
menu.exec_(global_pos)
|
||||
|
||||
def event(self, event):
|
||||
if event.type() == QtCore.QEvent.KeyPress:
|
||||
if event.key() == QtCore.Qt.Key_Space:
|
||||
self._change_selection_state(None)
|
||||
return True
|
||||
|
||||
if event.key() == QtCore.Qt.Key_Backspace:
|
||||
self._change_selection_state(False)
|
||||
return True
|
||||
|
||||
if event.key() == QtCore.Qt.Key_Return:
|
||||
self._change_selection_state(True)
|
||||
return True
|
||||
|
||||
return super(ProductTypesView, self).event(event)
|
||||
|
|
@ -19,6 +19,7 @@ from .products_model import (
|
|||
)
|
||||
|
||||
STATUS_NAME_ROLE = QtCore.Qt.UserRole + 1
|
||||
TASK_ID_ROLE = QtCore.Qt.UserRole + 2
|
||||
|
||||
|
||||
class VersionsModel(QtGui.QStandardItemModel):
|
||||
|
|
@ -48,6 +49,7 @@ class VersionsModel(QtGui.QStandardItemModel):
|
|||
item.setData(version_id, QtCore.Qt.UserRole)
|
||||
self._items_by_id[version_id] = item
|
||||
item.setData(version_item.status, STATUS_NAME_ROLE)
|
||||
item.setData(version_item.task_id, TASK_ID_ROLE)
|
||||
|
||||
if item.row() != idx:
|
||||
root_item.insertRow(idx, item)
|
||||
|
|
@ -57,17 +59,30 @@ class VersionsFilterModel(QtCore.QSortFilterProxyModel):
|
|||
def __init__(self):
|
||||
super().__init__()
|
||||
self._status_filter = None
|
||||
self._task_ids_filter = None
|
||||
|
||||
def filterAcceptsRow(self, row, parent):
|
||||
if self._status_filter is None:
|
||||
return True
|
||||
if self._status_filter is not None:
|
||||
if not self._status_filter:
|
||||
return False
|
||||
|
||||
if not self._status_filter:
|
||||
return False
|
||||
index = self.sourceModel().index(row, 0, parent)
|
||||
status = index.data(STATUS_NAME_ROLE)
|
||||
if status not in self._status_filter:
|
||||
return False
|
||||
|
||||
index = self.sourceModel().index(row, 0, parent)
|
||||
status = index.data(STATUS_NAME_ROLE)
|
||||
return status in self._status_filter
|
||||
if self._task_ids_filter:
|
||||
index = self.sourceModel().index(row, 0, parent)
|
||||
task_id = index.data(TASK_ID_ROLE)
|
||||
if task_id not in self._task_ids_filter:
|
||||
return False
|
||||
return True
|
||||
|
||||
def set_tasks_filter(self, task_ids):
|
||||
if self._task_ids_filter == task_ids:
|
||||
return
|
||||
self._task_ids_filter = task_ids
|
||||
self.invalidateFilter()
|
||||
|
||||
def set_statuses_filter(self, status_names):
|
||||
if self._status_filter == status_names:
|
||||
|
|
@ -101,6 +116,13 @@ class VersionComboBox(QtWidgets.QComboBox):
|
|||
def get_product_id(self):
|
||||
return self._product_id
|
||||
|
||||
def set_tasks_filter(self, task_ids):
|
||||
self._proxy_model.set_tasks_filter(task_ids)
|
||||
if self.count() == 0:
|
||||
return
|
||||
if self.currentIndex() != 0:
|
||||
self.setCurrentIndex(0)
|
||||
|
||||
def set_statuses_filter(self, status_names):
|
||||
self._proxy_model.set_statuses_filter(status_names)
|
||||
if self.count() == 0:
|
||||
|
|
@ -149,6 +171,7 @@ class VersionDelegate(QtWidgets.QStyledItemDelegate):
|
|||
super().__init__(*args, **kwargs)
|
||||
|
||||
self._editor_by_id: Dict[str, VersionComboBox] = {}
|
||||
self._task_ids_filter = None
|
||||
self._statuses_filter = None
|
||||
|
||||
def displayText(self, value, locale):
|
||||
|
|
@ -156,6 +179,11 @@ class VersionDelegate(QtWidgets.QStyledItemDelegate):
|
|||
return "N/A"
|
||||
return format_version(value)
|
||||
|
||||
def set_tasks_filter(self, task_ids):
|
||||
self._task_ids_filter = set(task_ids)
|
||||
for widget in self._editor_by_id.values():
|
||||
widget.set_tasks_filter(task_ids)
|
||||
|
||||
def set_statuses_filter(self, status_names):
|
||||
self._statuses_filter = set(status_names)
|
||||
for widget in self._editor_by_id.values():
|
||||
|
|
@ -222,6 +250,7 @@ class VersionDelegate(QtWidgets.QStyledItemDelegate):
|
|||
|
||||
editor = VersionComboBox(product_id, parent)
|
||||
editor.setProperty("itemId", item_id)
|
||||
editor.setFocusPolicy(QtCore.Qt.NoFocus)
|
||||
|
||||
editor.value_changed.connect(self._on_editor_change)
|
||||
editor.destroyed.connect(self._on_destroy)
|
||||
|
|
@ -238,6 +267,7 @@ class VersionDelegate(QtWidgets.QStyledItemDelegate):
|
|||
version_id = index.data(VERSION_ID_ROLE)
|
||||
|
||||
editor.update_versions(versions, version_id)
|
||||
editor.set_tasks_filter(self._task_ids_filter)
|
||||
editor.set_statuses_filter(self._statuses_filter)
|
||||
|
||||
def setModelData(self, editor, model, index):
|
||||
|
|
|
|||
|
|
@ -12,34 +12,35 @@ GROUP_TYPE_ROLE = QtCore.Qt.UserRole + 1
|
|||
MERGED_COLOR_ROLE = QtCore.Qt.UserRole + 2
|
||||
FOLDER_LABEL_ROLE = QtCore.Qt.UserRole + 3
|
||||
FOLDER_ID_ROLE = QtCore.Qt.UserRole + 4
|
||||
PRODUCT_ID_ROLE = QtCore.Qt.UserRole + 5
|
||||
PRODUCT_NAME_ROLE = QtCore.Qt.UserRole + 6
|
||||
PRODUCT_TYPE_ROLE = QtCore.Qt.UserRole + 7
|
||||
PRODUCT_TYPE_ICON_ROLE = QtCore.Qt.UserRole + 8
|
||||
PRODUCT_IN_SCENE_ROLE = QtCore.Qt.UserRole + 9
|
||||
VERSION_ID_ROLE = QtCore.Qt.UserRole + 10
|
||||
VERSION_HERO_ROLE = QtCore.Qt.UserRole + 11
|
||||
VERSION_NAME_ROLE = QtCore.Qt.UserRole + 12
|
||||
VERSION_NAME_EDIT_ROLE = QtCore.Qt.UserRole + 13
|
||||
VERSION_PUBLISH_TIME_ROLE = QtCore.Qt.UserRole + 14
|
||||
VERSION_STATUS_NAME_ROLE = QtCore.Qt.UserRole + 15
|
||||
VERSION_STATUS_SHORT_ROLE = QtCore.Qt.UserRole + 16
|
||||
VERSION_STATUS_COLOR_ROLE = QtCore.Qt.UserRole + 17
|
||||
VERSION_STATUS_ICON_ROLE = QtCore.Qt.UserRole + 18
|
||||
VERSION_AUTHOR_ROLE = QtCore.Qt.UserRole + 19
|
||||
VERSION_FRAME_RANGE_ROLE = QtCore.Qt.UserRole + 20
|
||||
VERSION_DURATION_ROLE = QtCore.Qt.UserRole + 21
|
||||
VERSION_HANDLES_ROLE = QtCore.Qt.UserRole + 22
|
||||
VERSION_STEP_ROLE = QtCore.Qt.UserRole + 23
|
||||
VERSION_AVAILABLE_ROLE = QtCore.Qt.UserRole + 24
|
||||
VERSION_THUMBNAIL_ID_ROLE = QtCore.Qt.UserRole + 25
|
||||
ACTIVE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 26
|
||||
REMOTE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 27
|
||||
REPRESENTATIONS_COUNT_ROLE = QtCore.Qt.UserRole + 28
|
||||
SYNC_ACTIVE_SITE_AVAILABILITY = QtCore.Qt.UserRole + 29
|
||||
SYNC_REMOTE_SITE_AVAILABILITY = QtCore.Qt.UserRole + 30
|
||||
TASK_ID_ROLE = QtCore.Qt.UserRole + 5
|
||||
PRODUCT_ID_ROLE = QtCore.Qt.UserRole + 6
|
||||
PRODUCT_NAME_ROLE = QtCore.Qt.UserRole + 7
|
||||
PRODUCT_TYPE_ROLE = QtCore.Qt.UserRole + 8
|
||||
PRODUCT_TYPE_ICON_ROLE = QtCore.Qt.UserRole + 9
|
||||
PRODUCT_IN_SCENE_ROLE = QtCore.Qt.UserRole + 10
|
||||
VERSION_ID_ROLE = QtCore.Qt.UserRole + 11
|
||||
VERSION_HERO_ROLE = QtCore.Qt.UserRole + 12
|
||||
VERSION_NAME_ROLE = QtCore.Qt.UserRole + 13
|
||||
VERSION_NAME_EDIT_ROLE = QtCore.Qt.UserRole + 14
|
||||
VERSION_PUBLISH_TIME_ROLE = QtCore.Qt.UserRole + 15
|
||||
VERSION_STATUS_NAME_ROLE = QtCore.Qt.UserRole + 16
|
||||
VERSION_STATUS_SHORT_ROLE = QtCore.Qt.UserRole + 17
|
||||
VERSION_STATUS_COLOR_ROLE = QtCore.Qt.UserRole + 18
|
||||
VERSION_STATUS_ICON_ROLE = QtCore.Qt.UserRole + 19
|
||||
VERSION_AUTHOR_ROLE = QtCore.Qt.UserRole + 20
|
||||
VERSION_FRAME_RANGE_ROLE = QtCore.Qt.UserRole + 21
|
||||
VERSION_DURATION_ROLE = QtCore.Qt.UserRole + 22
|
||||
VERSION_HANDLES_ROLE = QtCore.Qt.UserRole + 23
|
||||
VERSION_STEP_ROLE = QtCore.Qt.UserRole + 24
|
||||
VERSION_AVAILABLE_ROLE = QtCore.Qt.UserRole + 25
|
||||
VERSION_THUMBNAIL_ID_ROLE = QtCore.Qt.UserRole + 26
|
||||
ACTIVE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 27
|
||||
REMOTE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 28
|
||||
REPRESENTATIONS_COUNT_ROLE = QtCore.Qt.UserRole + 29
|
||||
SYNC_ACTIVE_SITE_AVAILABILITY = QtCore.Qt.UserRole + 30
|
||||
SYNC_REMOTE_SITE_AVAILABILITY = QtCore.Qt.UserRole + 31
|
||||
|
||||
STATUS_NAME_FILTER_ROLE = QtCore.Qt.UserRole + 31
|
||||
STATUS_NAME_FILTER_ROLE = QtCore.Qt.UserRole + 32
|
||||
|
||||
|
||||
class ProductsModel(QtGui.QStandardItemModel):
|
||||
|
|
@ -368,6 +369,7 @@ class ProductsModel(QtGui.QStandardItemModel):
|
|||
|
||||
"""
|
||||
model_item.setData(version_item.version_id, VERSION_ID_ROLE)
|
||||
model_item.setData(version_item.task_id, TASK_ID_ROLE)
|
||||
model_item.setData(version_item.version, VERSION_NAME_ROLE)
|
||||
model_item.setData(version_item.is_hero, VERSION_HERO_ROLE)
|
||||
model_item.setData(
|
||||
|
|
@ -499,8 +501,10 @@ class ProductsModel(QtGui.QStandardItemModel):
|
|||
version_item.version_id
|
||||
for version_item in last_version_by_product_id.values()
|
||||
}
|
||||
repre_count_by_version_id = self._controller.get_versions_representation_count(
|
||||
project_name, version_ids
|
||||
repre_count_by_version_id = (
|
||||
self._controller.get_versions_representation_count(
|
||||
project_name, version_ids
|
||||
)
|
||||
)
|
||||
sync_availability_by_version_id = (
|
||||
self._controller.get_version_sync_availability(
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue