Compare commits

..

No commits in common. "develop" and "1.6.12" have entirely different histories.

64 changed files with 737 additions and 2341 deletions

View file

@ -35,9 +35,6 @@ body:
label: Version
description: What version are you running? Look to AYON Tray
options:
- 1.7.0
- 1.6.13
- 1.6.12
- 1.6.11
- 1.6.10
- 1.6.9

View file

@ -185,14 +185,9 @@ class IPluginPaths(AYONInterface):
"""
return self._get_plugin_paths_by_type("inventory")
def get_loader_action_plugin_paths(
self, host_name: Optional[str]
) -> list[str]:
def get_loader_action_plugin_paths(self) -> list[str]:
"""Receive loader action plugin paths.
Args:
host_name (Optional[str]): Current host name.
Returns:
list[str]: Paths to loader action plugins.

View file

@ -6,6 +6,7 @@ import logging
import code
import traceback
from pathlib import Path
import warnings
import click
@ -89,6 +90,54 @@ def addon(ctx):
pass
@main_cli.command()
@click.pass_context
@click.argument("output_json_path")
@click.option("--project", help="Project name", default=None)
@click.option("--asset", help="Folder path", default=None)
@click.option("--task", help="Task name", default=None)
@click.option("--app", help="Application name", default=None)
@click.option(
"--envgroup", help="Environment group (e.g. \"farm\")", default=None
)
def extractenvironments(
ctx, output_json_path, project, asset, task, app, envgroup
):
"""Extract environment variables for entered context to a json file.
Entered output filepath will be created if does not exists.
All context options must be passed otherwise only AYON's global
environments will be extracted.
Context options are "project", "asset", "task", "app"
Deprecated:
This function is deprecated and will be removed in future. Please use
'addon applications extractenvironments ...' instead.
"""
warnings.warn(
(
"Command 'extractenvironments' is deprecated and will be"
" removed in future. Please use"
" 'addon applications extractenvironments ...' instead."
),
DeprecationWarning
)
addons_manager = ctx.obj["addons_manager"]
applications_addon = addons_manager.get_enabled_addon("applications")
if applications_addon is None:
raise RuntimeError(
"Applications addon is not available or enabled."
)
# Please ignore the fact this is using private method
applications_addon._cli_extract_environments(
output_json_path, project, asset, task, app, envgroup
)
@main_cli.command()
@click.pass_context
@click.argument("path", required=True)

View file

@ -137,7 +137,7 @@ class HostBase(AbstractHost):
def get_current_folder_path(self) -> Optional[str]:
"""
Returns:
Optional[str]: Current folder path.
Optional[str]: Current asset name.
"""
return os.environ.get("AYON_FOLDER_PATH")

View file

@ -1,4 +1,3 @@
from __future__ import annotations
import os
import re
import logging
@ -13,8 +12,6 @@ from typing import Optional
import xml.etree.ElementTree
import clique
from .execute import run_subprocess
from .vendor_bin_utils import (
get_ffmpeg_tool_args,
@ -134,29 +131,16 @@ def get_transcode_temp_directory():
)
def get_oiio_info_for_input(
filepath: str,
*,
subimages: bool = False,
verbose: bool = True,
logger: logging.Logger = None,
):
def get_oiio_info_for_input(filepath, logger=None, subimages=False):
"""Call oiiotool to get information about input and return stdout.
Args:
filepath (str): Path to file.
subimages (bool): include info about subimages in the output.
verbose (bool): get the full metadata about each input image.
logger (logging.Logger): Logger used for logging.
Stdout should contain xml format string.
"""
args = get_oiio_tool_args(
"oiiotool",
"--info",
"-v"
)
if verbose:
args.append("-v")
if subimages:
args.append("-a")
@ -586,10 +570,7 @@ def get_review_layer_name(src_filepath):
return None
# Load info about file from oiio tool
input_info = get_oiio_info_for_input(
src_filepath,
verbose=False,
)
input_info = get_oiio_info_for_input(src_filepath)
if not input_info:
return None
@ -653,37 +634,6 @@ def should_convert_for_ffmpeg(src_filepath):
return False
def _get_attributes_to_erase(
input_info: dict, logger: logging.Logger
) -> list[str]:
"""FFMPEG does not support some attributes in metadata."""
erase_attrs: dict[str, str] = {} # Attr name to reason mapping
for attr_name, attr_value in input_info["attribs"].items():
if not isinstance(attr_value, str):
continue
# Remove attributes that have string value longer than allowed length
# for ffmpeg or when contain prohibited symbols
if len(attr_value) > MAX_FFMPEG_STRING_LEN:
reason = f"has too long value ({len(attr_value)} chars)."
erase_attrs[attr_name] = reason
continue
for char in NOT_ALLOWED_FFMPEG_CHARS:
if char not in attr_value:
continue
reason = f"contains unsupported character \"{char}\"."
erase_attrs[attr_name] = reason
break
for attr_name, reason in erase_attrs.items():
logger.info(
f"Removed attribute \"{attr_name}\" from metadata"
f" because {reason}."
)
return list(erase_attrs.keys())
def convert_input_paths_for_ffmpeg(
input_paths,
output_dir,
@ -709,7 +659,7 @@ def convert_input_paths_for_ffmpeg(
Raises:
ValueError: If input filepath has extension not supported by function.
Currently, only ".exr" extension is supported.
Currently is supported only ".exr" extension.
"""
if logger is None:
logger = logging.getLogger(__name__)
@ -734,22 +684,7 @@ def convert_input_paths_for_ffmpeg(
# Collect channels to export
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
# Find which attributes to strip
erase_attributes: list[str] = _get_attributes_to_erase(
input_info, logger=logger
)
# clique.PATTERNS["frames"] supports only `.1001.exr` not `_1001.exr` so
# we use a customized pattern.
pattern = "[_.](?P<index>(?P<padding>0*)\\d+)\\.\\D+\\d?$"
input_collections, input_remainder = clique.assemble(
input_paths,
patterns=[pattern],
assume_padded_when_ambiguous=True,
)
input_items = list(input_collections)
input_items.extend(input_remainder)
for input_item in input_items:
for input_path in input_paths:
# Prepare subprocess arguments
oiio_cmd = get_oiio_tool_args(
"oiiotool",
@ -760,23 +695,8 @@ def convert_input_paths_for_ffmpeg(
if compression:
oiio_cmd.extend(["--compression", compression])
# Convert a sequence of files using a single oiiotool command
# using its sequence syntax
if isinstance(input_item, clique.Collection):
frames = input_item.format("{head}#{tail}").replace(" ", "")
oiio_cmd.extend([
"--framepadding", input_item.padding,
"--frames", frames,
"--parallel-frames"
])
input_item: str = input_item.format("{head}#{tail}")
elif not isinstance(input_item, str):
raise TypeError(
f"Input is not a string or Collection: {input_item}"
)
oiio_cmd.extend([
input_arg, input_item,
input_arg, input_path,
# Tell oiiotool which channels should be put to top stack
# (and output)
"--ch", channels_arg,
@ -784,11 +704,38 @@ def convert_input_paths_for_ffmpeg(
"--subimage", "0"
])
for attr_name in erase_attributes:
for attr_name, attr_value in input_info["attribs"].items():
if not isinstance(attr_value, str):
continue
# Remove attributes that have string value longer than allowed
# length for ffmpeg or when containing prohibited symbols
erase_reason = "Missing reason"
erase_attribute = False
if len(attr_value) > MAX_FFMPEG_STRING_LEN:
erase_reason = "has too long value ({} chars).".format(
len(attr_value)
)
erase_attribute = True
if not erase_attribute:
for char in NOT_ALLOWED_FFMPEG_CHARS:
if char in attr_value:
erase_attribute = True
erase_reason = (
"contains unsupported character \"{}\"."
).format(char)
break
if erase_attribute:
# Set attribute to empty string
logger.info((
"Removed attribute \"{}\" from metadata because {}."
).format(attr_name, erase_reason))
oiio_cmd.extend(["--eraseattrib", attr_name])
# Add last argument - path to output
base_filename = os.path.basename(input_item)
base_filename = os.path.basename(input_path)
output_path = os.path.join(output_dir, base_filename)
oiio_cmd.extend([
"-o", output_path
@ -1189,10 +1136,7 @@ def oiio_color_convert(
target_display=None,
target_view=None,
additional_command_args=None,
frames: Optional[str] = None,
frame_padding: Optional[int] = None,
parallel_frames: bool = False,
logger: Optional[logging.Logger] = None,
logger=None,
):
"""Transcode source file to other with colormanagement.
@ -1204,7 +1148,7 @@ def oiio_color_convert(
input_path (str): Path that should be converted. It is expected that
contains single file or image sequence of same type
(sequence in format 'file.FRAMESTART-FRAMEEND#.ext', see oiio docs,
eg `big.1-3#.tif` or `big.1-3%d.ext` with `frames` argument)
eg `big.1-3#.tif`)
output_path (str): Path to output filename.
(must follow format of 'input_path', eg. single file or
sequence in 'file.FRAMESTART-FRAMEEND#.ext', `output.1-3#.tif`)
@ -1225,13 +1169,6 @@ def oiio_color_convert(
both 'view' and 'display' must be filled (if 'target_colorspace')
additional_command_args (list): arguments for oiiotool (like binary
depth for .dpx)
frames (Optional[str]): Complex frame range to process. This requires
input path and output path to use frame token placeholder like
`#` or `%d`, e.g. file.#.exr
frame_padding (Optional[int]): Frame padding to use for the input and
output when using a sequence filepath.
parallel_frames (bool): If True, process frames in parallel inside
the `oiiotool` process. Only supported in OIIO 2.5.20.0+.
logger (logging.Logger): Logger used for logging.
Raises:
@ -1241,20 +1178,7 @@ def oiio_color_convert(
if logger is None:
logger = logging.getLogger(__name__)
# Get oiioinfo only from first image, otherwise file can't be found
first_input_path = input_path
if frames:
frames: str
first_frame = int(re.split("[ x-]", frames, 1)[0])
first_frame = str(first_frame).zfill(frame_padding or 0)
for token in ["#", "%d"]:
first_input_path = first_input_path.replace(token, first_frame)
input_info = get_oiio_info_for_input(
first_input_path,
verbose=False,
logger=logger,
)
input_info = get_oiio_info_for_input(input_path, logger=logger)
# Collect channels to export
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
@ -1267,22 +1191,6 @@ def oiio_color_convert(
"--colorconfig", config_path
)
if frames:
# If `frames` is specified, then process the input and output
# as if it's a sequence of frames (must contain `%04d` as frame
# token placeholder in filepaths)
oiio_cmd.extend([
"--frames", frames,
])
if frame_padding:
oiio_cmd.extend([
"--framepadding", str(frame_padding),
])
if parallel_frames:
oiio_cmd.append("--parallel-frames")
oiio_cmd.extend([
input_arg, input_path,
# Tell oiiotool which channels should be put to top stack
@ -1326,11 +1234,17 @@ def oiio_color_convert(
if source_view and source_display:
color_convert_args = None
ocio_display_args = None
oiio_cmd.extend([
"--ociodisplay:inverse=1:subimages=0",
source_display,
source_view,
])
if target_colorspace:
# This is a two-step conversion process since there's no direct
# display/view to colorspace command
# This could be a config parameter or determined from OCIO config
# Use temporary role space 'scene_linear'
# Use temporarty role space 'scene_linear'
color_convert_args = ("scene_linear", target_colorspace)
elif source_display != target_display or source_view != target_view:
# Complete display/view pair conversion
@ -1342,15 +1256,6 @@ def oiio_color_convert(
" No color conversion needed."
)
if color_convert_args or ocio_display_args:
# Invert source display/view so that we can go from there to the
# target colorspace or display/view
oiio_cmd.extend([
"--ociodisplay:inverse=1:subimages=0",
source_display,
source_view,
])
if color_convert_args:
# Use colorconvert for colorspace target
oiio_cmd.extend([
@ -1468,11 +1373,7 @@ def get_rescaled_command_arguments(
command_args.extend(["-vf", "{0},{1}".format(scale, pad)])
elif application == "oiiotool":
input_info = get_oiio_info_for_input(
input_path,
verbose=False,
logger=log,
)
input_info = get_oiio_info_for_input(input_path, logger=log)
# Collect channels to export
_, channels_arg = get_oiio_input_and_channel_args(
input_info, alpha_default=1.0)
@ -1563,11 +1464,7 @@ def _get_image_dimensions(application, input_path, log):
# fallback for weird files with width=0, height=0
if (input_width == 0 or input_height == 0) and application == "oiiotool":
# Load info about file from oiio tool
input_info = get_oiio_info_for_input(
input_path,
verbose=False,
logger=log,
)
input_info = get_oiio_info_for_input(input_path, logger=log)
if input_info:
input_width = int(input_info["width"])
input_height = int(input_info["height"])
@ -1616,13 +1513,10 @@ def get_oiio_input_and_channel_args(oiio_input_info, alpha_default=None):
"""Get input and channel arguments for oiiotool.
Args:
oiio_input_info (dict): Information about input from oiio tool.
Should be output of function 'get_oiio_info_for_input' (can be
called with 'verbose=False').
Should be output of function `get_oiio_info_for_input`.
alpha_default (float, optional): Default value for alpha channel.
Returns:
tuple[str, str]: Tuple of input and channel arguments.
"""
channel_names = oiio_input_info["channelnames"]
review_channels = get_convert_rgb_channels(channel_names)

View file

@ -70,7 +70,7 @@ from dataclasses import dataclass
import ayon_api
from ayon_core import AYON_CORE_ROOT
from ayon_core.lib import StrEnum, Logger, is_func_signature_supported
from ayon_core.lib import StrEnum, Logger
from ayon_core.host import AbstractHost
from ayon_core.addon import AddonsManager, IPluginPaths
from ayon_core.settings import get_studio_settings, get_project_settings
@ -752,7 +752,6 @@ class LoaderActionsContext:
def _get_plugins(self) -> dict[str, LoaderActionPlugin]:
if self._plugins is None:
host_name = self.get_host_name()
addons_manager = self.get_addons_manager()
all_paths = [
os.path.join(AYON_CORE_ROOT, "plugins", "loader")
@ -760,24 +759,7 @@ class LoaderActionsContext:
for addon in addons_manager.addons:
if not isinstance(addon, IPluginPaths):
continue
try:
if is_func_signature_supported(
addon.get_loader_action_plugin_paths,
host_name
):
paths = addon.get_loader_action_plugin_paths(
host_name
)
else:
paths = addon.get_loader_action_plugin_paths()
except Exception:
self._log.warning(
"Failed to get plugin paths for addon",
exc_info=True
)
continue
if paths:
all_paths.extend(paths)

View file

@ -1,5 +1,4 @@
"""Package to handle compatibility checks for pipeline components."""
import ayon_api
def is_product_base_type_supported() -> bool:
@ -14,7 +13,4 @@ def is_product_base_type_supported() -> bool:
bool: True if product base types are supported, False otherwise.
"""
if not hasattr(ayon_api, "is_product_base_type_supported"):
return False
return ayon_api.is_product_base_type_supported()

View file

@ -15,7 +15,6 @@ from typing import (
Any,
Callable,
)
from warnings import warn
import pyblish.logic
import pyblish.api
@ -753,13 +752,13 @@ class CreateContext:
manual_creators = {}
report = discover_creator_plugins(return_report=True)
self.creator_discover_result = report
for creator_class in report.abstract_plugins:
self.log.debug(
"Skipping abstract Creator '%s'",
str(creator_class)
)
for creator_class in report.plugins:
if inspect.isabstract(creator_class):
self.log.debug(
"Skipping abstract Creator {}".format(str(creator_class))
)
continue
creator_identifier = creator_class.identifier
if creator_identifier in creators:
self.log.warning(
@ -773,17 +772,19 @@ class CreateContext:
creator_class.host_name
and creator_class.host_name != self.host_name
):
self.log.info(
(
'Creator\'s host name "{}"'
' is not supported for current host "{}"'
).format(creator_class.host_name, self.host_name)
)
self.log.info((
"Creator's host name \"{}\""
" is not supported for current host \"{}\""
).format(creator_class.host_name, self.host_name))
continue
# TODO report initialization error
try:
creator = creator_class(project_settings, self, self.headless)
creator = creator_class(
project_settings,
self,
self.headless
)
except Exception:
self.log.error(
f"Failed to initialize plugin: {creator_class}",
@ -791,19 +792,6 @@ class CreateContext:
)
continue
if not creator.product_base_type:
message = (
f"Provided creator {creator!r} doesn't have "
"product base type attribute defined. This will be "
"required in future."
)
warn(
message,
DeprecationWarning,
stacklevel=2
)
self.log.warning(message)
if not creator.enabled:
disabled_creators[creator_identifier] = creator
continue
@ -1301,12 +1289,8 @@ class CreateContext:
"folderPath": folder_entity["path"],
"task": task_entity["name"] if task_entity else None,
"productType": creator.product_type,
# Add product base type if supported. Fallback to product type
"productBaseType": (
creator.product_base_type or creator.product_type),
"variant": variant
}
if active is not None:
if not isinstance(active, bool):
self.log.warning(

View file

@ -1,21 +1,20 @@
"""Creator plugins for the create process."""
from __future__ import annotations
import collections
import copy
# -*- coding: utf-8 -*-
import os
import copy
import collections
from typing import TYPE_CHECKING, Optional, Dict, Any
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Dict, Optional
from ayon_core.lib import Logger, get_version_from_path
from ayon_core.pipeline.plugin_discover import (
deregister_plugin,
deregister_plugin_path,
discover,
register_plugin,
register_plugin_path,
deregister_plugin,
deregister_plugin_path
)
from ayon_core.pipeline.staging_dir import StagingDir, get_staging_dir_info
from ayon_core.pipeline.staging_dir import get_staging_dir_info, StagingDir
from .constants import DEFAULT_VARIANT_VALUE
from .product_name import get_product_name
@ -24,7 +23,6 @@ from .structures import CreatedInstance
if TYPE_CHECKING:
from ayon_core.lib import AbstractAttrDef
# Avoid cyclic imports
from .context import CreateContext, UpdateData # noqa: F401
@ -68,6 +66,7 @@ class ProductConvertorPlugin(ABC):
Returns:
logging.Logger: Logger with name of the plugin.
"""
if self._log is None:
self._log = Logger.get_logger(self.__class__.__name__)
return self._log
@ -83,9 +82,10 @@ class ProductConvertorPlugin(ABC):
Returns:
str: Converted identifier unique for all converters in host.
"""
pass
@abstractmethod
def find_instances(self):
"""Look for legacy instances in the scene.
@ -94,10 +94,14 @@ class ProductConvertorPlugin(ABC):
convert.
"""
pass
@abstractmethod
def convert(self):
"""Conversion code."""
pass
@property
def create_context(self):
"""Quick access to create context.
@ -105,6 +109,7 @@ class ProductConvertorPlugin(ABC):
Returns:
CreateContext: Context which initialized the plugin.
"""
return self._create_context
@property
@ -117,6 +122,7 @@ class ProductConvertorPlugin(ABC):
Raises:
UnavailableSharedData: When called out of collection phase.
"""
return self._create_context.collection_shared_data
def add_convertor_item(self, label):
@ -125,10 +131,12 @@ class ProductConvertorPlugin(ABC):
Args:
label (str): Label of item which will show in UI.
"""
self._create_context.add_convertor_item(self.identifier, label)
def remove_convertor_item(self):
"""Remove legacy item from create context when conversion finished."""
self._create_context.remove_convertor_item(self.identifier)
@ -146,14 +154,7 @@ class BaseCreator(ABC):
project_settings (dict[str, Any]): Project settings.
create_context (CreateContext): Context which initialized creator.
headless (bool): Running in headless mode.
"""
# Attribute 'skip_discovery' is used during discovery phase to skip
# plugins, which can be used to mark base plugins that should not be
# considered as plugins "to use". The discovery logic does NOT use
# the attribute value from parent classes. Each base class has to define
# the attribute again.
skip_discovery = True
# Label shown in UI
label = None
@ -218,6 +219,7 @@ class BaseCreator(ABC):
Returns:
Optional[dict[str, Any]]: Settings values or None.
"""
settings = project_settings.get(category_name)
if not settings:
return None
@ -263,6 +265,7 @@ class BaseCreator(ABC):
Args:
project_settings (dict[str, Any]): Project settings.
"""
settings_category = self.settings_category
if not settings_category:
return
@ -274,17 +277,18 @@ class BaseCreator(ABC):
project_settings, settings_category, settings_name
)
if settings is None:
self.log.debug(f"No settings found for {cls_name}")
self.log.debug("No settings found for {}".format(cls_name))
return
for key, value in settings.items():
# Log out attributes that are not defined on plugin object
# - those may be potential dangerous typos in settings
if not hasattr(self, key):
self.log.debug(
"Applying settings to unknown attribute '%s' on '%s'.",
self.log.debug((
"Applying settings to unknown attribute '{}' on '{}'."
).format(
key, cls_name
)
))
setattr(self, key, value)
def register_callbacks(self):
@ -293,39 +297,23 @@ class BaseCreator(ABC):
Default implementation does nothing. It can be overridden to register
callbacks for creator.
"""
pass
@property
def identifier(self):
"""Identifier of creator (must be unique).
Default implementation returns plugin's product base type,
or falls back to product type if product base type is not set.
Default implementation returns plugin's product type.
"""
identifier = self.product_base_type
if not identifier:
identifier = self.product_type
return identifier
return self.product_type
@property
@abstractmethod
def product_type(self):
"""Family that plugin represents."""
@property
def product_base_type(self) -> Optional[str]:
"""Base product type that plugin represents.
Todo (antirotor): This should be required in future - it
should be made abstract then.
Returns:
Optional[str]: Base product type that plugin represents.
If not set, it is assumed that the creator plugin is obsolete
and does not support product base type.
"""
return None
pass
@property
def project_name(self):
@ -334,6 +322,7 @@ class BaseCreator(ABC):
Returns:
str: Name of a project.
"""
return self.create_context.project_name
@property
@ -343,6 +332,7 @@ class BaseCreator(ABC):
Returns:
Anatomy: Project anatomy object.
"""
return self.create_context.project_anatomy
@property
@ -354,14 +344,13 @@ class BaseCreator(ABC):
Default implementation use attributes in this order:
- 'group_label' -> 'label' -> 'identifier'
Keep in mind that 'identifier' uses 'product_base_type' by default.
Keep in mind that 'identifier' use 'product_type' by default.
Returns:
str: Group label that can be used for grouping of instances in UI.
Group label can be overridden by the instance itself.
Group label can be overridden by instance itself.
"""
if self._cached_group_label is None:
label = self.identifier
if self.group_label:
@ -378,6 +367,7 @@ class BaseCreator(ABC):
Returns:
logging.Logger: Logger with name of the plugin.
"""
if self._log is None:
self._log = Logger.get_logger(self.__class__.__name__)
return self._log
@ -386,8 +376,7 @@ class BaseCreator(ABC):
self,
product_name: str,
data: Dict[str, Any],
product_type: Optional[str] = None,
product_base_type: Optional[str] = None
product_type: Optional[str] = None
) -> CreatedInstance:
"""Create instance and add instance to context.
@ -396,8 +385,6 @@ class BaseCreator(ABC):
data (Dict[str, Any]): Instance data.
product_type (Optional[str]): Product type, object attribute
'product_type' is used if not passed.
product_base_type (Optional[str]): Product base type, object
attribute 'product_base_type' is used if not passed.
Returns:
CreatedInstance: Created instance.
@ -405,16 +392,11 @@ class BaseCreator(ABC):
"""
if product_type is None:
product_type = self.product_type
if not product_base_type and not self.product_base_type:
product_base_type = product_type
instance = CreatedInstance(
product_type=product_type,
product_name=product_name,
data=data,
product_type,
product_name,
data,
creator=self,
product_base_type=product_base_type,
)
self._add_instance_to_context(instance)
return instance
@ -430,6 +412,7 @@ class BaseCreator(ABC):
Args:
instance (CreatedInstance): New created instance.
"""
self.create_context.creator_adds_instance(instance)
def _remove_instance_from_context(self, instance):
@ -442,6 +425,7 @@ class BaseCreator(ABC):
Args:
instance (CreatedInstance): Instance which should be removed.
"""
self.create_context.creator_removed_instance(instance)
@abstractmethod
@ -453,6 +437,8 @@ class BaseCreator(ABC):
implementation
"""
pass
@abstractmethod
def collect_instances(self):
"""Collect existing instances related to this creator plugin.
@ -478,6 +464,8 @@ class BaseCreator(ABC):
```
"""
pass
@abstractmethod
def update_instances(self, update_list):
"""Store changes of existing instances so they can be recollected.
@ -487,6 +475,8 @@ class BaseCreator(ABC):
contain changed instance and it's changes.
"""
pass
@abstractmethod
def remove_instances(self, instances):
"""Method called on instance removal.
@ -499,11 +489,14 @@ class BaseCreator(ABC):
removed.
"""
pass
def get_icon(self):
"""Icon of creator (product type).
Can return path to image file or awesome icon name.
"""
return self.icon
def get_dynamic_data(
@ -519,18 +512,19 @@ class BaseCreator(ABC):
These may be dynamically created based on current context of workfile.
"""
return {}
def get_product_name(
self,
project_name: str,
folder_entity: dict[str, Any],
task_entity: Optional[dict[str, Any]],
variant: str,
host_name: Optional[str] = None,
instance: Optional[CreatedInstance] = None,
project_entity: Optional[dict[str, Any]] = None,
) -> str:
project_name,
folder_entity,
task_entity,
variant,
host_name=None,
instance=None,
project_entity=None,
):
"""Return product name for passed context.
Method is also called on product name update. In that case origin
@ -552,6 +546,11 @@ class BaseCreator(ABC):
if host_name is None:
host_name = self.create_context.host_name
task_name = task_type = None
if task_entity:
task_name = task_entity["name"]
task_type = task_entity["taskType"]
dynamic_data = self.get_dynamic_data(
project_name,
folder_entity,
@ -567,12 +566,11 @@ class BaseCreator(ABC):
return get_product_name(
project_name,
folder_entity=folder_entity,
task_entity=task_entity,
product_base_type=self.product_base_type,
product_type=self.product_type,
host_name=host_name,
variant=variant,
task_name,
task_type,
host_name,
self.product_type,
variant,
dynamic_data=dynamic_data,
project_settings=self.project_settings,
project_entity=project_entity,
@ -585,15 +583,15 @@ class BaseCreator(ABC):
and values are stored to metadata for future usage and for publishing
purposes.
Note:
Convert method should be implemented which should care about
updating keys/values when plugin attributes change.
NOTE:
Convert method should be implemented which should care about updating
keys/values when plugin attributes change.
Returns:
list[AbstractAttrDef]: Attribute definitions that can be tweaked
for created instance.
"""
return self.instance_attr_defs
def get_attr_defs_for_instance(self, instance):
@ -616,10 +614,12 @@ class BaseCreator(ABC):
Raises:
UnavailableSharedData: When called out of collection phase.
"""
return self.create_context.collection_shared_data
def set_instance_thumbnail_path(self, instance_id, thumbnail_path=None):
"""Set path to thumbnail for instance."""
self.create_context.thumbnail_paths_by_instance_id[instance_id] = (
thumbnail_path
)
@ -640,6 +640,7 @@ class BaseCreator(ABC):
Returns:
dict[str, int]: Next versions by instance id.
"""
return get_next_versions_for_instances(
self.create_context.project_name, instances
)
@ -650,7 +651,7 @@ class Creator(BaseCreator):
Creation requires prepared product name and instance data.
"""
skip_discovery = True
# GUI Purposes
# - default_variants may not be used if `get_default_variants`
# is overridden
@ -706,6 +707,7 @@ class Creator(BaseCreator):
int: Order in which is creator shown (less == earlier). By default
is using Creator's 'order' or processing.
"""
return self.order
@abstractmethod
@ -720,9 +722,11 @@ class Creator(BaseCreator):
pre_create_data(dict): Data based on pre creation attributes.
Those may affect how creator works.
"""
# instance = CreatedInstance(
# self.product_type, product_name, instance_data
# )
pass
def get_description(self):
"""Short description of product type and plugin.
@ -730,6 +734,7 @@ class Creator(BaseCreator):
Returns:
str: Short description of product type.
"""
return self.description
def get_detail_description(self):
@ -740,6 +745,7 @@ class Creator(BaseCreator):
Returns:
str: Detailed description of product type for artist.
"""
return self.detailed_description
def get_default_variants(self):
@ -753,6 +759,7 @@ class Creator(BaseCreator):
Returns:
list[str]: Whisper variants for user input.
"""
return copy.deepcopy(self.default_variants)
def get_default_variant(self, only_explicit=False):
@ -772,6 +779,7 @@ class Creator(BaseCreator):
Returns:
str: Variant value.
"""
if only_explicit or self._default_variant:
return self._default_variant
@ -792,6 +800,7 @@ class Creator(BaseCreator):
Returns:
str: Variant value.
"""
return self.get_default_variant()
def _set_default_variant_wrap(self, variant):
@ -803,6 +812,7 @@ class Creator(BaseCreator):
Args:
variant (str): New default variant value.
"""
self._default_variant = variant
default_variant = property(
@ -939,8 +949,6 @@ class Creator(BaseCreator):
class HiddenCreator(BaseCreator):
skip_discovery = True
@abstractmethod
def create(self, instance_data, source_data):
pass
@ -951,10 +959,10 @@ class AutoCreator(BaseCreator):
Can be used e.g. for `workfile`.
"""
skip_discovery = True
def remove_instances(self, instances):
"""Skip removal."""
pass
def discover_creator_plugins(*args, **kwargs):
@ -1012,6 +1020,7 @@ def cache_and_get_instances(creator, shared_key, list_instances_func):
dict[str, dict[str, Any]]: Cached instances by creator identifier from
result of passed function.
"""
if shared_key not in creator.collection_shared_data:
value = collections.defaultdict(list)
for instance in list_instances_func():

View file

@ -1,38 +1,24 @@
"""Functions for handling product names."""
from __future__ import annotations
import warnings
from functools import wraps
from typing import Any, Optional, Union, overload
from warnings import warn
import ayon_api
from ayon_core.lib import (
StringTemplate,
filter_profiles,
prepare_template_data,
Logger,
is_func_signature_supported,
)
from ayon_core.lib.path_templates import TemplateResult
from ayon_core.settings import get_project_settings
from .constants import DEFAULT_PRODUCT_TEMPLATE
from .exceptions import TaskNotSetError, TemplateFillError
log = Logger.get_logger(__name__)
def get_product_name_template(
project_name: str,
product_type: str,
task_name: Optional[str],
task_type: Optional[str],
host_name: str,
default_template: Optional[str] = None,
project_settings: Optional[dict[str, Any]] = None,
product_base_type: Optional[str] = None
) -> str:
project_name,
product_type,
task_name,
task_type,
host_name,
default_template=None,
project_settings=None
):
"""Get product name template based on passed context.
Args:
@ -40,32 +26,26 @@ def get_product_name_template(
product_type (str): Product type for which the product name is
calculated.
host_name (str): Name of host in which the product name is calculated.
task_name (Optional[str]): Name of task in which context the
product is created.
task_type (Optional[str]): Type of task in which context the
product is created.
default_template (Optional[str]): Default template which is used if
task_name (str): Name of task in which context the product is created.
task_type (str): Type of task in which context the product is created.
default_template (Union[str, None]): Default template which is used if
settings won't find any matching possibility. Constant
'DEFAULT_PRODUCT_TEMPLATE' is used if not defined.
project_settings (Optional[dict[str, Any]]): Prepared settings for
project_settings (Union[Dict[str, Any], None]): Prepared settings for
project. Settings are queried if not passed.
product_base_type (Optional[str]): Base type of product.
Returns:
str: Product name template.
"""
if project_settings is None:
project_settings = get_project_settings(project_name)
tools_settings = project_settings["core"]["tools"]
profiles = tools_settings["creator"]["product_name_profiles"]
filtering_criteria = {
"product_base_types": product_base_type or product_type,
"product_types": product_type,
"host_names": host_name,
"task_names": task_name,
"task_types": task_type,
"task_types": task_type
}
matching_profile = filter_profiles(profiles, filtering_criteria)
template = None
if matching_profile:
@ -89,214 +69,6 @@ def get_product_name_template(
return template
def _get_product_name_old(
project_name: str,
task_name: Optional[str],
task_type: Optional[str],
host_name: str,
product_type: str,
variant: str,
default_template: Optional[str] = None,
dynamic_data: Optional[dict[str, Any]] = None,
project_settings: Optional[dict[str, Any]] = None,
product_type_filter: Optional[str] = None,
project_entity: Optional[dict[str, Any]] = None,
product_base_type: Optional[str] = None,
) -> TemplateResult:
warnings.warn(
"Used deprecated 'task_name' and 'task_type' arguments."
" Please use new signature with 'folder_entity' and 'task_entity'.",
DeprecationWarning,
stacklevel=2
)
if not product_type:
return StringTemplate("").format({})
template = get_product_name_template(
project_name=project_name,
product_type=product_type_filter or product_type,
task_name=task_name,
task_type=task_type,
host_name=host_name,
default_template=default_template,
project_settings=project_settings,
product_base_type=product_base_type,
)
template_low = template.lower()
# Simple check of task name existence for template with {task[name]} in
if not task_name and "{task" in template_low:
raise TaskNotSetError()
task_value = {
"name": task_name,
"type": task_type,
}
if "{task}" in template_low:
task_value = task_name
# NOTE this is message for TDs and Admins -> not really for users
# TODO validate this in settings and not allow it
log.warning(
"Found deprecated task key '{task}' in product name template."
" Please use '{task[name]}' instead."
)
elif "{task[short]}" in template_low:
if project_entity is None:
project_entity = ayon_api.get_project(project_name)
task_types_by_name = {
task["name"]: task for task in
project_entity["taskTypes"]
}
task_short = task_types_by_name.get(task_type, {}).get("shortName")
task_value["short"] = task_short
if not product_base_type and "{product[basetype]}" in template.lower():
warn(
"You have Product base type in product name template, "
"but it is not provided by the creator, please update your "
"creation code to include it. It will be required in "
"the future.",
DeprecationWarning,
stacklevel=2)
fill_pairs: dict[str, Union[str, dict[str, str]]] = {
"variant": variant,
"family": product_type,
"task": task_value,
"product": {
"type": product_type,
"basetype": product_base_type or product_type,
}
}
if dynamic_data:
# Dynamic data may override default values
for key, value in dynamic_data.items():
fill_pairs[key] = value
try:
return StringTemplate.format_strict_template(
template=template,
data=prepare_template_data(fill_pairs)
)
except KeyError as exp:
msg = (
f"Value for {exp} key is missing in template '{template}'."
f" Available values are {fill_pairs}"
)
raise TemplateFillError(msg) from exp
def _backwards_compatibility_product_name(func):
"""Helper to decide which variant of 'get_product_name' to use.
The old version expected 'task_name' and 'task_type' arguments. The new
version expects 'folder_entity' and 'task_entity' arguments instead.
The function is also marked with an attribute 'version' so other addons
can check if the function is using the new signature or is using
the old signature. That should allow addons to adapt to new signature.
>>> if getattr(get_product_name, "use_entities", None):
>>> # New signature is used
>>> path = get_product_name(project_name, folder_entity, ...)
>>> else:
>>> # Old signature is used
>>> path = get_product_name(project_name, taks_name, ...)
"""
# Add attribute to function to identify it as the new function
# so other addons can easily identify it.
# >>> geattr(get_product_name, "use_entities", False)
setattr(func, "use_entities", True)
@wraps(func)
def inner(*args, **kwargs):
# ---
# Decide which variant of the function is used based on
# passed arguments.
# ---
# Entities in key-word arguments mean that the new function is used
if "folder_entity" in kwargs or "task_entity" in kwargs:
return func(*args, **kwargs)
# Using more than 7 positional arguments is not allowed
# in the new function
if len(args) > 7:
return _get_product_name_old(*args, **kwargs)
if len(args) > 1:
arg_2 = args[1]
# The second argument is a string -> task name
if isinstance(arg_2, str):
return _get_product_name_old(*args, **kwargs)
if is_func_signature_supported(func, *args, **kwargs):
return func(*args, **kwargs)
return _get_product_name_old(*args, **kwargs)
return inner
@overload
def get_product_name(
project_name: str,
folder_entity: dict[str, Any],
task_entity: Optional[dict[str, Any]],
product_base_type: str,
product_type: str,
host_name: str,
variant: str,
*,
dynamic_data: Optional[dict[str, Any]] = None,
project_settings: Optional[dict[str, Any]] = None,
project_entity: Optional[dict[str, Any]] = None,
default_template: Optional[str] = None,
product_base_type_filter: Optional[str] = None,
) -> TemplateResult:
"""Calculate product name based on passed context and AYON settings.
Subst name templates are defined in `project_settings/global/tools/creator
/product_name_profiles` where are profiles with host name, product type,
task name and task type filters. If context does not match any profile
then `DEFAULT_PRODUCT_TEMPLATE` is used as default template.
That's main reason why so many arguments are required to calculate product
name.
Args:
project_name (str): Project name.
folder_entity (Optional[dict[str, Any]]): Folder entity.
task_entity (Optional[dict[str, Any]]): Task entity.
host_name (str): Host name.
product_base_type (str): Product base type.
product_type (str): Product type.
variant (str): In most of the cases it is user input during creation.
dynamic_data (Optional[dict[str, Any]]): Dynamic data specific for
a creator which creates instance.
project_settings (Optional[dict[str, Any]]): Prepared settings
for project. Settings are queried if not passed.
project_entity (Optional[dict[str, Any]]): Project entity used when
task short name is required by template.
default_template (Optional[str]): Default template if any profile does
not match passed context. Constant 'DEFAULT_PRODUCT_TEMPLATE'
is used if is not passed.
product_base_type_filter (Optional[str]): Use different product base
type for product template filtering. Value of
`product_base_type_filter` is used when not passed.
Returns:
TemplateResult: Product name.
Raises:
TaskNotSetError: If template requires task which is not provided.
TemplateFillError: If filled template contains placeholder key which
is not collected.
"""
@overload
def get_product_name(
project_name,
task_name,
@ -309,25 +81,25 @@ def get_product_name(
project_settings=None,
product_type_filter=None,
project_entity=None,
) -> TemplateResult:
):
"""Calculate product name based on passed context and AYON settings.
Product name templates are defined in `project_settings/global/tools
/creator/product_name_profiles` where are profiles with host name,
product type, task name and task type filters. If context does not match
any profile then `DEFAULT_PRODUCT_TEMPLATE` is used as default template.
Subst name templates are defined in `project_settings/global/tools/creator
/product_name_profiles` where are profiles with host name, product type,
task name and task type filters. If context does not match any profile
then `DEFAULT_PRODUCT_TEMPLATE` is used as default template.
That's main reason why so many arguments are required to calculate product
name.
Deprecated:
This function is using deprecated signature that does not support
folder entity data to be used.
Todos:
Find better filtering options to avoid requirement of
argument 'family_filter'.
Args:
project_name (str): Project name.
task_name (Optional[str]): Task name.
task_type (Optional[str]): Task type.
task_name (Union[str, None]): Task name.
task_type (Union[str, None]): Task type.
host_name (str): Host name.
product_type (str): Product type.
variant (str): In most of the cases it is user input during creation.
@ -345,63 +117,7 @@ def get_product_name(
task short name is required by template.
Returns:
TemplateResult: Product name.
"""
pass
@_backwards_compatibility_product_name
def get_product_name(
project_name: str,
folder_entity: dict[str, Any],
task_entity: Optional[dict[str, Any]],
product_base_type: str,
product_type: str,
host_name: str,
variant: str,
*,
dynamic_data: Optional[dict[str, Any]] = None,
project_settings: Optional[dict[str, Any]] = None,
project_entity: Optional[dict[str, Any]] = None,
default_template: Optional[str] = None,
product_base_type_filter: Optional[str] = None,
) -> TemplateResult:
"""Calculate product name based on passed context and AYON settings.
Product name templates are defined in `project_settings/global/tools
/creator/product_name_profiles` where are profiles with host name,
product base type, product type, task name and task type filters.
If context does not match any profile then `DEFAULT_PRODUCT_TEMPLATE`
is used as default template.
That's main reason why so many arguments are required to calculate product
name.
Args:
project_name (str): Project name.
folder_entity (Optional[dict[str, Any]]): Folder entity.
task_entity (Optional[dict[str, Any]]): Task entity.
host_name (str): Host name.
product_base_type (str): Product base type.
product_type (str): Product type.
variant (str): In most of the cases it is user input during creation.
dynamic_data (Optional[dict[str, Any]]): Dynamic data specific for
a creator which creates instance.
project_settings (Optional[dict[str, Any]]): Prepared settings
for project. Settings are queried if not passed.
project_entity (Optional[dict[str, Any]]): Project entity used when
task short name is required by template.
default_template (Optional[str]): Default template if any profile does
not match passed context. Constant 'DEFAULT_PRODUCT_TEMPLATE'
is used if is not passed.
product_base_type_filter (Optional[str]): Use different product base
type for product template filtering. Value of
`product_base_type_filter` is used when not passed.
Returns:
TemplateResult: Product name.
str: Product name.
Raises:
TaskNotSetError: If template requires task which is not provided.
@ -410,68 +126,47 @@ def get_product_name(
"""
if not product_type:
return StringTemplate("").format({})
task_name = task_type = None
if task_entity:
task_name = task_entity["name"]
task_type = task_entity["taskType"]
return ""
template = get_product_name_template(
project_name=project_name,
product_base_type=product_base_type_filter or product_base_type,
product_type=product_type,
task_name=task_name,
task_type=task_type,
host_name=host_name,
project_name,
product_type_filter or product_type,
task_name,
task_type,
host_name,
default_template=default_template,
project_settings=project_settings,
project_settings=project_settings
)
template_low = template.lower()
# Simple check of task name existence for template with {task[name]} in
if not task_name and "{task" in template_low:
# Simple check of task name existence for template with {task} in
# - missing task should be possible only in Standalone publisher
if not task_name and "{task" in template.lower():
raise TaskNotSetError()
task_value = {
"name": task_name,
"type": task_type,
}
if "{task}" in template_low:
if "{task}" in template.lower():
task_value = task_name
# NOTE this is message for TDs and Admins -> not really for users
# TODO validate this in settings and not allow it
log.warning(
"Found deprecated task key '{task}' in product name template."
" Please use '{task[name]}' instead."
)
elif "{task[short]}" in template_low:
elif "{task[short]}" in template.lower():
if project_entity is None:
project_entity = ayon_api.get_project(project_name)
task_types_by_name = {
task["name"]: task
for task in project_entity["taskTypes"]
task["name"]: task for task in
project_entity["taskTypes"]
}
task_short = task_types_by_name.get(task_type, {}).get("shortName")
task_value["short"] = task_short
fill_pairs = {
"variant": variant,
# TODO We should stop support 'family' key.
"family": product_type,
"task": task_value,
"product": {
"type": product_type,
"basetype": product_base_type,
"type": product_type
}
}
if folder_entity:
fill_pairs["folder"] = {
"name": folder_entity["name"],
"type": folder_entity["folderType"],
}
if dynamic_data:
# Dynamic data may override default values
for key, value in dynamic_data.items():
@ -483,8 +178,7 @@ def get_product_name(
data=prepare_template_data(fill_pairs)
)
except KeyError as exp:
msg = (
f"Value for {exp} key is missing in template '{template}'."
f" Available values are {fill_pairs}"
raise TemplateFillError(
"Value for {} key is missing in template '{}'."
" Available values are {}".format(str(exp), template, fill_pairs)
)
raise TemplateFillError(msg)

View file

@ -11,8 +11,6 @@ from ayon_core.lib.attribute_definitions import (
serialize_attr_defs,
deserialize_attr_defs,
)
from ayon_core.pipeline import (
AYON_INSTANCE_ID,
AVALON_INSTANCE_ID,
@ -482,10 +480,6 @@ class CreatedInstance:
data (Dict[str, Any]): Data used for filling product name or override
data from already existing instance.
creator (BaseCreator): Creator responsible for instance.
product_base_type (Optional[str]): Product base type that will be
created. If not provided then product base type is taken from
creator plugin. If creator does not have product base type then
deprecation warning is raised.
"""
# Keys that can't be changed or removed from data after loading using
@ -496,7 +490,6 @@ class CreatedInstance:
"id",
"instance_id",
"productType",
"productBaseType",
"creator_identifier",
"creator_attributes",
"publish_attributes"
@ -516,13 +509,7 @@ class CreatedInstance:
data: Dict[str, Any],
creator: "BaseCreator",
transient_data: Optional[Dict[str, Any]] = None,
product_base_type: Optional[str] = None
):
"""Initialize CreatedInstance."""
# fallback to product type for backward compatibility
if not product_base_type:
product_base_type = creator.product_base_type or product_type
self._creator = creator
creator_identifier = creator.identifier
group_label = creator.get_group_label()
@ -575,9 +562,6 @@ class CreatedInstance:
self._data["id"] = item_id
self._data["productType"] = product_type
self._data["productName"] = product_name
self._data["productBaseType"] = product_base_type
self._data["active"] = data.get("active", True)
self._data["creator_identifier"] = creator_identifier

View file

@ -21,13 +21,6 @@ from .utils import get_representation_path_from_context
class LoaderPlugin(list):
"""Load representation into host application"""
# Attribute 'skip_discovery' is used during discovery phase to skip
# plugins, which can be used to mark base plugins that should not be
# considered as plugins "to use". The discovery logic does NOT use
# the attribute value from parent classes. Each base class has to define
# the attribute again.
skip_discovery = True
product_types: set[str] = set()
product_base_types: Optional[set[str]] = None
representations = set()

View file

@ -948,7 +948,7 @@ def get_representation_by_names(
version_name: Union[int, str],
representation_name: str,
) -> Optional[dict]:
"""Get representation entity for folder and product.
"""Get representation entity for asset and subset.
If version_name is "hero" then return the hero version
If version_name is "latest" then return the latest version
@ -966,7 +966,7 @@ def get_representation_by_names(
return None
if isinstance(product_name, dict) and "name" in product_name:
# Allow explicitly passing product entity document
# Allow explicitly passing subset document
product_entity = product_name
else:
product_entity = ayon_api.get_product_by_name(

View file

@ -138,14 +138,7 @@ def discover_plugins(
for item in modules:
filepath, module = item
result.add_module(module)
for cls in classes_from_module(base_class, module):
if cls is base_class:
continue
# Class has defined 'skip_discovery = True'
skip_discovery = cls.__dict__.get("skip_discovery")
if skip_discovery is True:
continue
all_plugins.append(cls)
all_plugins.extend(classes_from_module(base_class, module))
if base_class not in ignored_classes:
ignored_classes.append(base_class)

View file

@ -29,7 +29,6 @@ from .lib import (
get_publish_template_name,
publish_plugins_discover,
filter_crashed_publish_paths,
load_help_content_from_plugin,
load_help_content_from_filepath,
@ -88,7 +87,6 @@ __all__ = (
"get_publish_template_name",
"publish_plugins_discover",
"filter_crashed_publish_paths",
"load_help_content_from_plugin",
"load_help_content_from_filepath",

View file

@ -1,8 +1,6 @@
"""Library functions for publishing."""
from __future__ import annotations
import os
import platform
import re
import sys
import inspect
import copy
@ -10,19 +8,19 @@ import warnings
import hashlib
import xml.etree.ElementTree
from typing import TYPE_CHECKING, Optional, Union, List, Any
import clique
import speedcopy
import logging
import pyblish.util
import pyblish.plugin
import pyblish.api
from ayon_api import (
get_server_api_connection,
get_representations,
get_last_version_by_product_name
)
import clique
import pyblish.util
import pyblish.plugin
import pyblish.api
import speedcopy
from ayon_core.lib import (
import_filepath,
Logger,
@ -124,8 +122,7 @@ def get_publish_template_name(
task_type,
project_settings=None,
hero=False,
product_base_type: Optional[str] = None,
logger=None,
logger=None
):
"""Get template name which should be used for passed context.
@ -143,29 +140,17 @@ def get_publish_template_name(
task_type (str): Task type on which is instance working.
project_settings (Dict[str, Any]): Prepared project settings.
hero (bool): Template is for hero version publishing.
product_base_type (Optional[str]): Product type for which should
be found template.
logger (logging.Logger): Custom logger used for 'filter_profiles'
function.
Returns:
str: Template name which should be used for integration.
"""
if not product_base_type:
msg = (
"Argument 'product_base_type' is not provided to"
" 'get_publish_template_name' function. This argument"
" will be required in future versions."
)
warnings.warn(msg, DeprecationWarning)
if logger:
logger.warning(msg)
template = None
filter_criteria = {
"hosts": host_name,
"product_types": product_type,
"product_base_types": product_base_type,
"task_names": task_name,
"task_types": task_type,
}
@ -194,9 +179,7 @@ class HelpContent:
self.detail = detail
def load_help_content_from_filepath(
filepath: str
) -> dict[str, dict[str, HelpContent]]:
def load_help_content_from_filepath(filepath):
"""Load help content from xml file.
Xml file may contain errors and warnings.
"""
@ -231,84 +214,18 @@ def load_help_content_from_filepath(
return output
def load_help_content_from_plugin(
plugin: pyblish.api.Plugin,
help_filename: Optional[str] = None,
) -> dict[str, dict[str, HelpContent]]:
def load_help_content_from_plugin(plugin):
cls = plugin
if not inspect.isclass(plugin):
cls = plugin.__class__
plugin_filepath = inspect.getfile(cls)
plugin_dir = os.path.dirname(plugin_filepath)
if help_filename is None:
basename = os.path.splitext(os.path.basename(plugin_filepath))[0]
help_filename = basename + ".xml"
filepath = os.path.join(plugin_dir, "help", help_filename)
filename = basename + ".xml"
filepath = os.path.join(plugin_dir, "help", filename)
return load_help_content_from_filepath(filepath)
def filter_crashed_publish_paths(
project_name: str,
crashed_paths: set[str],
*,
project_settings: Optional[dict[str, Any]] = None,
) -> set[str]:
"""Filter crashed paths happened during plugins discovery.
Check if plugins discovery has enabled strict mode and filter crashed
paths that happened during discover based on regexes from settings.
Publishing should not start if any paths are returned.
Args:
project_name (str): Project name in which context plugins discovery
happened.
crashed_paths (set[str]): Crashed paths from plugins discovery report.
project_settings (Optional[dict[str, Any]]): Project settings.
Returns:
set[str]: Filtered crashed paths.
"""
filtered_paths = set()
# Nothing crashed all good...
if not crashed_paths:
return filtered_paths
if project_settings is None:
project_settings = get_project_settings(project_name)
discover_validation = (
project_settings["core"]["tools"]["publish"]["discover_validation"]
)
# Strict mode is not enabled.
if not discover_validation["enabled"]:
return filtered_paths
regexes = [
re.compile(value, re.IGNORECASE)
for value in discover_validation["ignore_paths"]
if value
]
is_windows = platform.system().lower() == "windows"
# Fitler path with regexes from settings
for path in crashed_paths:
# Normalize paths to use forward slashes on windows
if is_windows:
path = path.replace("\\", "/")
is_invalid = True
for regex in regexes:
if regex.match(path):
is_invalid = False
break
if is_invalid:
filtered_paths.add(path)
return filtered_paths
def publish_plugins_discover(
paths: Optional[list[str]] = None) -> DiscoverResult:
"""Find and return available pyblish plug-ins.
@ -1162,16 +1079,14 @@ def main_cli_publish(
except ValueError:
pass
context = get_global_context()
project_settings = get_project_settings(context["project_name"])
install_ayon_plugins()
if addons_manager is None:
addons_manager = AddonsManager(project_settings)
addons_manager = AddonsManager()
applications_addon = addons_manager.get_enabled_addon("applications")
if applications_addon is not None:
context = get_global_context()
env = applications_addon.get_farm_publish_environment_variables(
context["project_name"],
context["folder_path"],
@ -1194,33 +1109,17 @@ def main_cli_publish(
log.info("Running publish ...")
discover_result = publish_plugins_discover()
publish_plugins = discover_result.plugins
print(discover_result.get_report(only_errors=False))
filtered_crashed_paths = filter_crashed_publish_paths(
context["project_name"],
set(discover_result.crashed_file_paths),
project_settings=project_settings,
)
if filtered_crashed_paths:
joined_paths = "\n".join([
f"- {path}"
for path in filtered_crashed_paths
])
log.error(
"Plugin discovery strict mode is enabled."
" Crashed plugin paths that prevent from publishing:"
f"\n{joined_paths}"
)
sys.exit(1)
publish_plugins = discover_result.plugins
# Error exit as soon as any error occurs.
error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}"
error_format = ("Failed {plugin.__name__}: "
"{error} -- {error.traceback}")
for result in pyblish.util.publish_iter(plugins=publish_plugins):
if result["error"]:
log.error(error_format.format(**result))
# uninstall()
sys.exit(1)
log.info("Publish finished.")

View file

@ -1,7 +1,7 @@
import inspect
from abc import ABCMeta
import typing
from typing import Optional, Any
from typing import Optional
import pyblish.api
import pyblish.logic
@ -82,51 +82,22 @@ class PublishValidationError(PublishError):
class PublishXmlValidationError(PublishValidationError):
"""Raise an error from a dedicated xml file.
Can be useful to have one xml file with different possible messages that
helps to avoid flood code with dedicated artist messages.
XML files should live relative to the plugin file location:
'{plugin dir}/help/some_plugin.xml'.
Args:
plugin (pyblish.api.Plugin): Plugin that raised an error. Is used
to get path to xml file.
message (str): Exception message, can be technical, is used for
console output.
key (Optional[str]): XML file can contain multiple error messages, key
is used to get one of them. By default is used 'main'.
formatting_data (Optional[dict[str, Any]): Error message can have
variables to fill.
help_filename (Optional[str]): Name of xml file with messages. By
default, is used filename where plugin lives with .xml extension.
"""
def __init__(
self,
plugin: pyblish.api.Plugin,
message: str,
key: Optional[str] = None,
formatting_data: Optional[dict[str, Any]] = None,
help_filename: Optional[str] = None,
) -> None:
self, plugin, message, key=None, formatting_data=None
):
if key is None:
key = "main"
if not formatting_data:
formatting_data = {}
result = load_help_content_from_plugin(plugin, help_filename)
result = load_help_content_from_plugin(plugin)
content_obj = result["errors"][key]
description = content_obj.description.format(**formatting_data)
detail = content_obj.detail
if detail:
detail = detail.format(**formatting_data)
super().__init__(
message,
content_obj.title,
description,
detail
super(PublishXmlValidationError, self).__init__(
message, content_obj.title, description, detail
)

View file

@ -96,6 +96,7 @@ def get_folder_template_data(folder_entity, project_name):
Output dictionary contains keys:
- 'folder' - dictionary with 'name' key filled with folder name
- 'asset' - folder name
- 'hierarchy' - parent folder names joined with '/'
- 'parent' - direct parent name, project name used if is under
project
@ -131,6 +132,7 @@ def get_folder_template_data(folder_entity, project_name):
"path": path,
"parents": parents,
},
"asset": folder_name,
"hierarchy": hierarchy,
"parent": parent_name
}

View file

@ -299,6 +299,7 @@ def add_ordered_sublayer(layer, contribution_path, layer_id, order=None,
sdf format args metadata if enabled)
"""
# Add the order with the contribution path so that for future
# contributions we can again use it to magically fit into the
# ordering. We put this in the path because sublayer paths do
@ -316,25 +317,20 @@ def add_ordered_sublayer(layer, contribution_path, layer_id, order=None,
# If the layer was already in the layers, then replace it
for index, existing_path in enumerate(layer.subLayerPaths):
args = get_sdf_format_args(existing_path)
existing_layer_id = args.get("layer_id")
if existing_layer_id == layer_id:
existing_layer = layer.subLayerPaths[index]
existing_order = args.get("order")
existing_order = int(existing_order) if existing_order else None
if order is not None and order != existing_order:
# We need to move the layer, so we will remove this index
# and then re-insert it below at the right order
log.debug(f"Removing existing layer: {existing_layer}")
del layer.subLayerPaths[index]
break
existing_layer = args.get("layer_id")
if existing_layer == layer_id:
# Put it in the same position where it was before when swapping
# it with the original, also take over its order metadata
order = args.get("order")
if order is not None:
order = int(order)
else:
order = None
contribution_path = _format_path(contribution_path,
order=existing_order,
order=order,
layer_id=layer_id)
log.debug(
f"Replacing existing layer: {existing_layer} "
f"Replacing existing layer: {layer.subLayerPaths[index]} "
f"-> {contribution_path}"
)
layer.subLayerPaths[index] = contribution_path

View file

@ -1,19 +1,16 @@
from __future__ import annotations
from typing import Optional, Any
from ayon_core.lib.profiles_filtering import filter_profiles
from ayon_core.settings import get_project_settings
def get_versioning_start(
project_name: str,
host_name: str,
task_name: Optional[str] = None,
task_type: Optional[str] = None,
product_type: Optional[str] = None,
product_name: Optional[str] = None,
project_settings: Optional[dict[str, Any]] = None,
) -> int:
project_name,
host_name,
task_name=None,
task_type=None,
product_type=None,
product_name=None,
project_settings=None,
):
"""Get anatomy versioning start"""
if not project_settings:
project_settings = get_project_settings(project_name)
@ -25,12 +22,14 @@ def get_versioning_start(
if not profiles:
return version_start
# TODO use 'product_types' and 'product_name' instead of
# 'families' and 'subsets'
filtering_criteria = {
"host_names": host_name,
"product_types": product_type,
"product_names": product_name,
"families": product_type,
"task_names": task_name,
"task_types": task_type,
"subsets": product_name
}
profile = filter_profiles(profiles, filtering_criteria)

View file

@ -1483,7 +1483,7 @@ class PlaceholderLoadMixin(object):
tooltip=(
"Link Type\n"
"\nDefines what type of link will be used to"
" link the product to the current folder."
" link the asset to the current folder."
)
),
attribute_definitions.EnumDef(

View file

@ -62,8 +62,8 @@ class CreateHeroVersion(load.ProductLoaderPlugin):
ignored_representation_names: list[str] = []
db_representation_context_keys = [
"project", "folder", "hierarchy", "task", "product",
"representation", "username", "user", "output"
"project", "folder", "asset", "hierarchy", "task", "product",
"subset", "family", "representation", "username", "user", "output"
]
use_hardlinks = False

View file

@ -301,6 +301,8 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
product_name = instance.data["productName"]
product_type = instance.data["productType"]
anatomy_data.update({
"family": product_type,
"subset": product_name,
"product": {
"name": product_name,
"type": product_type,

View file

@ -25,7 +25,7 @@ class CollectManagedStagingDir(pyblish.api.InstancePlugin):
Location of the folder is configured in:
`ayon+anatomy://_/templates/staging`.
Which product type/task type/product is applicable is configured in:
Which family/task type/subset is applicable is configured in:
`ayon+settings://core/tools/publish/custom_staging_dir_profiles`
"""

View file

@ -1,5 +1,3 @@
from __future__ import annotations
from typing import Any
import ayon_api
import ayon_api.utils
@ -34,8 +32,6 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin):
self.log.debug("No loaded containers found in scene.")
return
containers = self._filter_invalid_containers(containers)
repre_ids = {
container["representation"]
for container in containers
@ -82,28 +78,3 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin):
self.log.debug(f"Collected {len(loaded_versions)} loaded versions.")
context.data["loadedVersions"] = loaded_versions
def _filter_invalid_containers(
self,
containers: list[dict[str, Any]]
) -> list[dict[str, Any]]:
"""Filter out invalid containers lacking required keys.
Skip any invalid containers that lack 'representation' or 'name'
keys to avoid KeyError.
"""
# Only filter by what's required for this plug-in instead of validating
# a full container schema.
required_keys = {"name", "representation"}
valid = []
for container in containers:
missing = [key for key in required_keys if key not in container]
if missing:
self.log.warning(
"Skipping invalid container, missing required keys:"
" {}. {}".format(", ".join(missing), container)
)
continue
valid.append(container)
return valid

View file

@ -316,8 +316,22 @@ class ExtractBurnin(publish.Extractor):
burnin_values = {}
for key in self.positions:
value = burnin_def.get(key)
if value:
burnin_values[key] = value
if not value:
continue
# TODO remove replacements
burnin_values[key] = (
value
.replace("{task}", "{task[name]}")
.replace("{product[name]}", "{subset}")
.replace("{Product[name]}", "{Subset}")
.replace("{PRODUCT[NAME]}", "{SUBSET}")
.replace("{product[type]}", "{family}")
.replace("{Product[type]}", "{Family}")
.replace("{PRODUCT[TYPE]}", "{FAMILY}")
.replace("{folder[name]}", "{asset}")
.replace("{Folder[name]}", "{Asset}")
.replace("{FOLDER[NAME]}", "{ASSET}")
)
# Remove "delete" tag from new representation
if "delete" in new_repre["tags"]:

View file

@ -172,33 +172,20 @@ class ExtractOIIOTranscode(publish.Extractor):
additional_command_args = (output_def["oiiotool_args"]
["additional_command_args"])
sequence_files = self._translate_to_sequence(
files_to_convert)
sequence_files = self._translate_to_sequence(files_to_convert)
self.log.debug("Files to convert: {}".format(sequence_files))
missing_rgba_review_channels = False
for file_name in sequence_files:
if isinstance(file_name, clique.Collection):
# Support sequences with holes by supplying
# dedicated `--frames` argument to `oiiotool`
# Create `frames` string like "1001-1002,1004,1010-1012
# Create `filename` string like "file.#.exr"
frames = file_name.format("{ranges}").replace(" ", "")
frame_padding = file_name.padding
file_name = file_name.format("{head}#{tail}")
parallel_frames = True
elif isinstance(file_name, str):
# Single file
frames = None
frame_padding = None
parallel_frames = False
else:
raise TypeError(
f"Unsupported file name type: {type(file_name)}."
" Expected str or clique.Collection."
# Convert to filepath that can be directly converted
# by oiio like `frame.1001-1025%04d.exr`
file_name: str = file_name.format(
"{head}{range}{padding}{tail}"
)
self.log.debug("Transcoding file: `{}`".format(file_name))
input_path = os.path.join(original_staging_dir, file_name)
input_path = os.path.join(original_staging_dir,
file_name)
output_path = self._get_output_file_path(input_path,
new_staging_dir,
output_extension)
@ -214,9 +201,6 @@ class ExtractOIIOTranscode(publish.Extractor):
source_display=source_display,
source_view=source_view,
additional_command_args=additional_command_args,
frames=frames,
frame_padding=frame_padding,
parallel_frames=parallel_frames,
logger=self.log
)
except MissingRGBAChannelsError as exc:
@ -310,18 +294,16 @@ class ExtractOIIOTranscode(publish.Extractor):
new_repre["files"] = renamed_files
def _translate_to_sequence(self, files_to_convert):
"""Returns original individual filepaths or list of clique.Collection.
"""Returns original list or a clique.Collection of a sequence.
Uses clique to find frame sequence, and return the collections instead.
If sequence not detected in input filenames, it returns original list.
Uses clique to find frame sequence Collection.
If sequence not found, it returns original list.
Args:
files_to_convert (list[str]): list of file names
files_to_convert (list): list of file names
Returns:
list[str | clique.Collection]: List of
filepaths ['fileA.exr', 'fileB.exr']
or clique.Collection for a sequence.
list[str | clique.Collection]: List of filepaths or a list
of Collections (usually one, unless there are holes)
"""
pattern = [clique.PATTERNS["frames"]]
collections, _ = clique.assemble(
@ -332,7 +314,14 @@ class ExtractOIIOTranscode(publish.Extractor):
raise ValueError(
"Too many collections {}".format(collections))
return collections
collection = collections[0]
# TODO: Technically oiiotool supports holes in the sequence as well
# using the dedicated --frames argument to specify the frames.
# We may want to use that too so conversions of sequences with
# holes will perform faster as well.
# Separate the collection so that we have no holes/gaps per
# collection.
return collection.separate()
return files_to_convert

View file

@ -169,9 +169,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
settings_category = "core"
# Supported extensions
image_exts = {
"exr", "jpg", "jpeg", "png", "dpx", "tga", "tiff", "tif", "psd"
}
image_exts = {"exr", "jpg", "jpeg", "png", "dpx", "tga", "tiff", "tif"}
video_exts = {"mov", "mp4"}
supported_exts = image_exts | video_exts
@ -403,10 +401,6 @@ class ExtractReview(pyblish.api.InstancePlugin):
new_staging_dir,
self.log
)
# The OIIO conversion will remap the RGBA channels just to
# `R,G,B,A` so we will pass the intermediate file to FFMPEG
# without layer name.
layer_name = ""
try:
self._render_output_definitions(

View file

@ -1,9 +1,8 @@
import copy
from dataclasses import dataclass, field, fields
import os
import subprocess
import tempfile
from typing import Dict, Any, List, Tuple, Optional
import re
import pyblish.api
from ayon_core.lib import (
@ -16,7 +15,6 @@ from ayon_core.lib import (
path_to_subprocess_arg,
run_subprocess,
filter_profiles,
)
from ayon_core.lib.transcoding import (
MissingRGBAChannelsError,
@ -28,61 +26,6 @@ from ayon_core.lib.transcoding import (
from ayon_core.lib.transcoding import VIDEO_EXTENSIONS, IMAGE_EXTENSIONS
@dataclass
class ThumbnailDef:
"""
Data class representing the full configuration for selected profile
Any change of controllable fields in Settings must propagate here!
"""
integrate_thumbnail: bool = False
target_size: Dict[str, Any] = field(
default_factory=lambda: {
"type": "source",
"resize": {"width": 1920, "height": 1080},
}
)
duration_split: float = 0.5
oiiotool_defaults: Dict[str, str] = field(
default_factory=lambda: {
"type": "colorspace",
"colorspace": "color_picking"
}
)
ffmpeg_args: Dict[str, List[Any]] = field(
default_factory=lambda: {"input": [], "output": []}
)
# Background color defined as (R, G, B, A) tuple.
# Note: Use float for alpha channel (0.0 to 1.0).
background_color: Tuple[int, int, int, float] = (0, 0, 0, 0.0)
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "ThumbnailDef":
"""
Creates a ThumbnailDef instance from a dictionary, safely ignoring
any keys in the dictionary that are not fields in the dataclass.
Args:
data (Dict[str, Any]): The dictionary containing configuration data
Returns:
MediaConfig: A new instance of the dataclass.
"""
# Get all field names defined in the dataclass
field_names = {f.name for f in fields(cls)}
# Filter the input dictionary to include only keys matching field names
filtered_data = {k: v for k, v in data.items() if k in field_names}
# Unpack the filtered dictionary into the constructor
return cls(**filtered_data)
class ExtractThumbnail(pyblish.api.InstancePlugin):
"""Create jpg thumbnail from sequence using ffmpeg"""
@ -109,7 +52,30 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
settings_category = "core"
enabled = False
profiles = []
integrate_thumbnail = False
target_size = {
"type": "source",
"resize": {
"width": 1920,
"height": 1080
}
}
background_color = (0, 0, 0, 0.0)
duration_split = 0.5
# attribute presets from settings
oiiotool_defaults = {
"type": "colorspace",
"colorspace": "color_picking",
"display_and_view": {
"display": "default",
"view": "sRGB"
}
}
ffmpeg_args = {
"input": [],
"output": []
}
product_names = []
def process(self, instance):
# run main process
@ -132,13 +98,6 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
instance.data["representations"].remove(repre)
def _main_process(self, instance):
if not self.profiles:
self.log.debug("No profiles present for extract review thumbnail.")
return
thumbnail_def = self._get_config_from_profile(instance)
if not thumbnail_def:
return
product_name = instance.data["productName"]
instance_repres = instance.data.get("representations")
if not instance_repres:
@ -171,6 +130,24 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
self.log.debug("Skipping crypto passes.")
return
# We only want to process the produces needed from settings.
def validate_string_against_patterns(input_str, patterns):
for pattern in patterns:
if re.match(pattern, input_str):
return True
return False
product_names = self.product_names
if product_names:
result = validate_string_against_patterns(
product_name, product_names
)
if not result:
self.log.debug((
"Product name \"{}\" did not match settings filters: {}"
).format(product_name, product_names))
return
# first check for any explicitly marked representations for thumbnail
explicit_repres = self._get_explicit_repres_for_thumbnail(instance)
if explicit_repres:
@ -215,8 +192,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
)
file_path = self._create_frame_from_video(
video_file_path,
dst_staging,
thumbnail_def
dst_staging
)
if file_path:
src_staging, input_file = os.path.split(file_path)
@ -229,8 +205,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
if "slate-frame" in repre.get("tags", []):
repre_files_thumb = repre_files_thumb[1:]
file_index = int(
float(len(repre_files_thumb)) * thumbnail_def.duration_split # noqa: E501
)
float(len(repre_files_thumb)) * self.duration_split)
input_file = repre_files[file_index]
full_input_path = os.path.join(src_staging, input_file)
@ -259,8 +234,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
repre_thumb_created = self._create_colorspace_thumbnail(
full_input_path,
full_output_path,
colorspace_data,
thumbnail_def,
colorspace_data
)
# Try to use FFMPEG if OIIO is not supported or for cases when
@ -268,13 +242,13 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
# colorspace data
if not repre_thumb_created:
repre_thumb_created = self._create_thumbnail_ffmpeg(
full_input_path, full_output_path, thumbnail_def
full_input_path, full_output_path
)
# Skip representation and try next one if wasn't created
if not repre_thumb_created and oiio_supported:
repre_thumb_created = self._create_thumbnail_oiio(
full_input_path, full_output_path, thumbnail_def
full_input_path, full_output_path
)
if not repre_thumb_created:
@ -302,7 +276,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
new_repre_tags = ["thumbnail"]
# for workflows which needs to have thumbnails published as
# separate representations `delete` tag should not be added
if not thumbnail_def.integrate_thumbnail:
if not self.integrate_thumbnail:
new_repre_tags.append("delete")
new_repre = {
@ -401,7 +375,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
return review_repres + other_repres
def _is_valid_images_repre(self, repre: dict) -> bool:
def _is_valid_images_repre(self, repre):
"""Check if representation contains valid image files
Args:
@ -421,10 +395,9 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
def _create_colorspace_thumbnail(
self,
src_path: str,
dst_path: str,
colorspace_data: dict,
thumbnail_def: ThumbnailDef,
src_path,
dst_path,
colorspace_data,
):
"""Create thumbnail using OIIO tool oiiotool
@ -437,15 +410,12 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
config (dict)
display (Optional[str])
view (Optional[str])
thumbnail_def (ThumbnailDefinition): Thumbnail definition.
Returns:
str: path to created thumbnail
"""
self.log.info(f"Extracting thumbnail {dst_path}")
resolution_arg = self._get_resolution_args(
"oiiotool", src_path, thumbnail_def
)
self.log.info("Extracting thumbnail {}".format(dst_path))
resolution_arg = self._get_resolution_arg("oiiotool", src_path)
repre_display = colorspace_data.get("display")
repre_view = colorspace_data.get("view")
@ -464,13 +434,12 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
)
# if representation doesn't have display and view then use
# oiiotool_defaults
elif thumbnail_def.oiiotool_defaults:
oiiotool_defaults = thumbnail_def.oiiotool_defaults
oiio_default_type = oiiotool_defaults["type"]
elif self.oiiotool_defaults:
oiio_default_type = self.oiiotool_defaults["type"]
if "colorspace" == oiio_default_type:
oiio_default_colorspace = oiiotool_defaults["colorspace"]
oiio_default_colorspace = self.oiiotool_defaults["colorspace"]
else:
display_and_view = oiiotool_defaults["display_and_view"]
display_and_view = self.oiiotool_defaults["display_and_view"]
oiio_default_display = display_and_view["display"]
oiio_default_view = display_and_view["view"]
@ -497,24 +466,18 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
return True
def _create_thumbnail_oiio(self, src_path, dst_path, thumbnail_def):
def _create_thumbnail_oiio(self, src_path, dst_path):
self.log.debug(f"Extracting thumbnail with OIIO: {dst_path}")
try:
resolution_arg = self._get_resolution_args(
"oiiotool", src_path, thumbnail_def
)
resolution_arg = self._get_resolution_arg("oiiotool", src_path)
except RuntimeError:
self.log.warning(
"Failed to create thumbnail using oiio", exc_info=True
)
return False
input_info = get_oiio_info_for_input(
src_path,
logger=self.log,
verbose=False,
)
input_info = get_oiio_info_for_input(src_path, logger=self.log)
try:
input_arg, channels_arg = get_oiio_input_and_channel_args(
input_info
@ -547,11 +510,9 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
)
return False
def _create_thumbnail_ffmpeg(self, src_path, dst_path, thumbnail_def):
def _create_thumbnail_ffmpeg(self, src_path, dst_path):
try:
resolution_arg = self._get_resolution_args(
"ffmpeg", src_path, thumbnail_def
)
resolution_arg = self._get_resolution_arg("ffmpeg", src_path)
except RuntimeError:
self.log.warning(
"Failed to create thumbnail using ffmpeg", exc_info=True
@ -559,7 +520,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
return False
ffmpeg_path_args = get_ffmpeg_tool_args("ffmpeg")
ffmpeg_args = thumbnail_def.ffmpeg_args or {}
ffmpeg_args = self.ffmpeg_args or {}
jpeg_items = [
subprocess.list2cmdline(ffmpeg_path_args)
@ -599,12 +560,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
)
return False
def _create_frame_from_video(
self,
video_file_path: str,
output_dir: str,
thumbnail_def: ThumbnailDef,
) -> Optional[str]:
def _create_frame_from_video(self, video_file_path, output_dir):
"""Convert video file to one frame image via ffmpeg"""
# create output file path
base_name = os.path.basename(video_file_path)
@ -629,7 +585,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
seek_position = 0.0
# Only use timestamp calculation for videos longer than 0.1 seconds
if duration > 0.1:
seek_position = duration * thumbnail_def.duration_split
seek_position = duration * self.duration_split
# Build command args
cmd_args = []
@ -703,17 +659,16 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
):
os.remove(output_thumb_file_path)
def _get_resolution_args(
def _get_resolution_arg(
self,
application: str,
input_path: str,
thumbnail_def: ThumbnailDef,
) -> list:
application,
input_path,
):
# get settings
if thumbnail_def.target_size["type"] == "source":
if self.target_size["type"] == "source":
return []
resize = thumbnail_def.target_size["resize"]
resize = self.target_size["resize"]
target_width = resize["width"]
target_height = resize["height"]
@ -723,43 +678,6 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
input_path,
target_width,
target_height,
bg_color=thumbnail_def.background_color,
bg_color=self.background_color,
log=self.log
)
def _get_config_from_profile(
self,
instance: pyblish.api.Instance
) -> Optional[ThumbnailDef]:
"""Returns profile if and how repre should be color transcoded."""
host_name = instance.context.data["hostName"]
product_type = instance.data["productType"]
product_name = instance.data["productName"]
task_data = instance.data["anatomyData"].get("task", {})
task_name = task_data.get("name")
task_type = task_data.get("type")
filtering_criteria = {
"host_names": host_name,
"product_types": product_type,
"product_names": product_name,
"task_names": task_name,
"task_types": task_type,
}
profile = filter_profiles(
self.profiles,
filtering_criteria,
logger=self.log
)
if not profile:
self.log.debug(
"Skipped instance. None of profiles in presets are for"
f' Host: "{host_name}"'
f' | Product types: "{product_type}"'
f' | Product names: "{product_name}"'
f' | Task name "{task_name}"'
f' | Task type "{task_type}"'
)
return None
return ThumbnailDef.from_dict(profile)

View file

@ -14,7 +14,6 @@ Todos:
import os
import tempfile
from typing import List, Optional
import pyblish.api
from ayon_core.lib import (
@ -23,7 +22,6 @@ from ayon_core.lib import (
is_oiio_supported,
run_subprocess,
get_rescaled_command_arguments,
)
@ -33,20 +31,17 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin):
Thumbnail source must be a single image or video filepath.
"""
label = "Extract Thumbnail from source"
label = "Extract Thumbnail (from source)"
# Before 'ExtractThumbnail' in global plugins
order = pyblish.api.ExtractorOrder - 0.00001
# Settings
target_size = {
"type": "resize",
"resize": {"width": 1920, "height": 1080}
}
background_color = (0, 0, 0, 0.0)
def process(self, instance: pyblish.api.Instance):
def process(self, instance):
self._create_context_thumbnail(instance.context)
product_name = instance.data["productName"]
self.log.debug(
"Processing instance with product name {}".format(product_name)
)
thumbnail_source = instance.data.get("thumbnailSource")
if not thumbnail_source:
self.log.debug("Thumbnail source not filled. Skipping.")
@ -74,8 +69,6 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin):
"outputName": "thumbnail",
}
new_repre["tags"].append("delete")
# adding representation
self.log.debug(
"Adding thumbnail representation: {}".format(new_repre)
@ -83,11 +76,7 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin):
instance.data["representations"].append(new_repre)
instance.data["thumbnailPath"] = dst_filepath
def _create_thumbnail(
self,
context: pyblish.api.Context,
thumbnail_source: str,
) -> Optional[str]:
def _create_thumbnail(self, context, thumbnail_source):
if not thumbnail_source:
self.log.debug("Thumbnail source not filled. Skipping.")
return
@ -142,7 +131,7 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin):
self.log.warning("Thumbnail has not been created.")
def _instance_has_thumbnail(self, instance: pyblish.api.Instance) -> bool:
def _instance_has_thumbnail(self, instance):
if "representations" not in instance.data:
self.log.warning(
"Instance does not have 'representations' key filled"
@ -154,29 +143,14 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin):
return True
return False
def create_thumbnail_oiio(
self,
src_path: str,
dst_path: str,
) -> bool:
def create_thumbnail_oiio(self, src_path, dst_path):
self.log.debug("Outputting thumbnail with OIIO: {}".format(dst_path))
try:
resolution_args = self._get_resolution_args(
"oiiotool", src_path
oiio_cmd = get_oiio_tool_args(
"oiiotool",
"-a", src_path,
"--ch", "R,G,B",
"-o", dst_path
)
except Exception:
self.log.warning("Failed to get resolution args for OIIO.")
return False
oiio_cmd = get_oiio_tool_args("oiiotool", "-a", src_path)
if resolution_args:
# resize must be before -o
oiio_cmd.extend(resolution_args)
else:
# resize provides own -ch, must be only one
oiio_cmd.extend(["--ch", "R,G,B"])
oiio_cmd.extend(["-o", dst_path])
self.log.debug("Running: {}".format(" ".join(oiio_cmd)))
try:
run_subprocess(oiio_cmd, logger=self.log)
@ -188,19 +162,7 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin):
)
return False
def create_thumbnail_ffmpeg(
self,
src_path: str,
dst_path: str,
) -> bool:
try:
resolution_args = self._get_resolution_args(
"ffmpeg", src_path
)
except Exception:
self.log.warning("Failed to get resolution args for ffmpeg.")
return False
def create_thumbnail_ffmpeg(self, src_path, dst_path):
max_int = str(2147483647)
ffmpeg_cmd = get_ffmpeg_tool_args(
"ffmpeg",
@ -209,13 +171,9 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin):
"-probesize", max_int,
"-i", src_path,
"-frames:v", "1",
dst_path
)
ffmpeg_cmd.extend(resolution_args)
# possible resize must be before output args
ffmpeg_cmd.append(dst_path)
self.log.debug("Running: {}".format(" ".join(ffmpeg_cmd)))
try:
run_subprocess(ffmpeg_cmd, logger=self.log)
@ -227,37 +185,10 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin):
)
return False
def _create_context_thumbnail(
self,
context: pyblish.api.Context,
):
def _create_context_thumbnail(self, context):
if "thumbnailPath" in context.data:
return
thumbnail_source = context.data.get("thumbnailSource")
context.data["thumbnailPath"] = self._create_thumbnail(
context, thumbnail_source
)
def _get_resolution_args(
self,
application: str,
input_path: str,
) -> List[str]:
# get settings
if self.target_size["type"] == "source":
return []
resize = self.target_size["resize"]
target_width = resize["width"]
target_height = resize["height"]
# form arg string per application
return get_rescaled_command_arguments(
application,
input_path,
target_width,
target_height,
bg_color=self.background_color,
log=self.log,
)
thumbnail_path = self._create_thumbnail(context, thumbnail_source)
context.data["thumbnailPath"] = thumbnail_path

View file

@ -2,7 +2,6 @@ from operator import attrgetter
import dataclasses
import os
import platform
from collections import defaultdict
from typing import Any, Dict, List
import pyblish.api
@ -14,11 +13,10 @@ except ImportError:
from ayon_core.lib import (
TextDef,
BoolDef,
NumberDef,
UISeparatorDef,
UILabelDef,
EnumDef,
filter_profiles,
filter_profiles
)
try:
from ayon_core.pipeline.usdlib import (
@ -277,27 +275,23 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
# the contributions so that we can design a system where custom
# contributions outside the predefined orders are possible to be
# managed. So that if a particular asset requires an extra contribution
# level, you can add it directly from the publisher at that particular
# level, you can add itdirectly from the publisher at that particular
# order. Future publishes will then see the existing contribution and will
# persist adding it to future bootstraps at that order
contribution_layers: Dict[str, Dict[str, int]] = {
contribution_layers: Dict[str, int] = {
# asset layers
"asset": {
"model": 100,
"assembly": 150,
"groom": 175,
"look": 200,
"rig": 300,
},
# shot layers
"shot": {
"layout": 200,
"animation": 300,
"simulation": 400,
"fx": 500,
"lighting": 600,
}
}
# Default profiles to set certain instance attribute defaults based on
# profiles in settings
profiles: List[Dict[str, Any]] = []
@ -311,18 +305,12 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
cls.enabled = plugin_settings.get("enabled", cls.enabled)
# Define contribution layers via settings by their scope
contribution_layers = defaultdict(dict)
# Define contribution layers via settings
contribution_layers = {}
for entry in plugin_settings.get("contribution_layers", []):
for scope in entry.get("scope", []):
contribution_layers[scope][entry["name"]] = int(entry["order"])
contribution_layers[entry["name"]] = int(entry["order"])
if contribution_layers:
cls.contribution_layers = dict(contribution_layers)
else:
cls.log.warning(
"No scoped contribution layers found in settings, falling back"
" to CollectUSDLayerContributions plug-in defaults..."
)
cls.contribution_layers = contribution_layers
cls.profiles = plugin_settings.get("profiles", [])
@ -346,7 +334,10 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
attr_values[key] = attr_values[key].format(**data)
# Define contribution
in_layer_order: int = attr_values.get("contribution_in_layer_order", 0)
order = self.contribution_layers.get(
attr_values["contribution_layer"], 0
)
if attr_values["contribution_apply_as_variant"]:
contribution = VariantContribution(
instance=instance,
@ -355,23 +346,19 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
variant_set_name=attr_values["contribution_variant_set_name"],
variant_name=attr_values["contribution_variant"],
variant_is_default=attr_values["contribution_variant_is_default"], # noqa: E501
order=in_layer_order
order=order
)
else:
contribution = SublayerContribution(
instance=instance,
layer_id=attr_values["contribution_layer"],
target_product=attr_values["contribution_target_product"],
order=in_layer_order
order=order
)
asset_product = contribution.target_product
layer_product = "{}_{}".format(asset_product, contribution.layer_id)
scope: str = attr_values["contribution_target_product_init"]
layer_order: int = (
self.contribution_layers[scope][attr_values["contribution_layer"]]
)
# Layer contribution instance
layer_instance = self.get_or_create_instance(
product_name=layer_product,
@ -383,7 +370,7 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
contribution
)
layer_instance.data["usd_layer_id"] = contribution.layer_id
layer_instance.data["usd_layer_order"] = layer_order
layer_instance.data["usd_layer_order"] = contribution.order
layer_instance.data["productGroup"] = (
instance.data.get("productGroup") or "USD Layer"
@ -502,14 +489,14 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
profile = {}
# Define defaults
default_enabled: bool = profile.get("contribution_enabled", True)
default_enabled = profile.get("contribution_enabled", True)
default_contribution_layer = profile.get(
"contribution_layer", None)
default_apply_as_variant: bool = profile.get(
default_apply_as_variant = profile.get(
"contribution_apply_as_variant", False)
default_target_product: str = profile.get(
default_target_product = profile.get(
"contribution_target_product", "usdAsset")
default_init_as: str = (
default_init_as = (
"asset"
if profile.get("contribution_target_product") == "usdAsset"
else "shot")
@ -522,12 +509,6 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
visible = publish_attributes.get("contribution_enabled", True)
variant_visible = visible and publish_attributes.get(
"contribution_apply_as_variant", True)
init_as: str = publish_attributes.get(
"contribution_target_product_init", default_init_as)
contribution_layers = cls.contribution_layers.get(
init_as, {}
)
return [
UISeparatorDef("usd_container_settings1"),
@ -577,22 +558,9 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
"predefined ordering.\nA higher order (further down "
"the list) will contribute as a stronger opinion."
),
items=list(contribution_layers.keys()),
items=list(cls.contribution_layers.keys()),
default=default_contribution_layer,
visible=visible),
# TODO: We may want to make the visibility of this optional
# based on studio preference, to avoid complexity when not needed
NumberDef("contribution_in_layer_order",
label="Strength order",
tooltip=(
"The contribution inside the department layer will be "
"made with this offset applied. A higher number means "
"a stronger opinion."
),
default=0,
minimum=-99999,
maximum=99999,
visible=visible),
BoolDef("contribution_apply_as_variant",
label="Add as variant",
tooltip=(
@ -638,11 +606,7 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
# Update attributes if any of the following plug-in attributes
# change:
keys = {
"contribution_enabled",
"contribution_apply_as_variant",
"contribution_target_product_init",
}
keys = ["contribution_enabled", "contribution_apply_as_variant"]
for instance_change in event["changes"]:
instance = instance_change["instance"]
@ -765,7 +729,7 @@ class ExtractUSDLayerContribution(publish.Extractor):
layer=sdf_layer,
contribution_path=path,
layer_id=product_name,
order=contribution.order,
order=None, # unordered
add_sdf_arguments_metadata=True
)
else:

View file

@ -1,21 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>{upload_type} upload timed out</title>
<description>
## {upload_type} upload failed after retries
The connection to the AYON server timed out while uploading a file.
### How to resolve?
1. Try publishing again. Intermittent network hiccups often resolve on retry.
2. Ensure your network/VPN is stable and large uploads are allowed.
3. If it keeps failing, try again later or contact your admin.
<pre>File: {file}
Error: {error}</pre>
</description>
</error>
</root>

View file

@ -28,7 +28,6 @@ from ayon_core.pipeline.publish import (
KnownPublishError,
get_publish_template_name,
)
from ayon_core.pipeline import is_product_base_type_supported
log = logging.getLogger(__name__)
@ -123,6 +122,10 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
"representation",
"username",
"output",
# OpenPype keys - should be removed
"asset", # folder[name]
"subset", # product[name]
"family", # product[type]
]
def process(self, instance):
@ -364,8 +367,6 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
folder_entity = instance.data["folderEntity"]
product_name = instance.data["productName"]
product_type = instance.data["productType"]
product_base_type = instance.data.get("productBaseType")
self.log.debug("Product: {}".format(product_name))
# Get existing product if it exists
@ -393,34 +394,15 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
product_id = None
if existing_product_entity:
product_id = existing_product_entity["id"]
new_product_entity_kwargs = {
"name": product_name,
"product_type": product_type,
"folder_id": folder_entity["id"],
"data": data,
"attribs": attributes,
"entity_id": product_id,
"product_base_type": product_base_type,
}
if not is_product_base_type_supported():
new_product_entity_kwargs.pop("product_base_type")
if (
product_base_type is not None
and product_base_type != product_type):
self.log.warning((
"Product base type %s is not supported by the server, "
"but it's defined - and it differs from product type %s. "
"Using product base type as product type."
), product_base_type, product_type)
new_product_entity_kwargs["product_type"] = (
product_base_type
product_entity = new_product_entity(
product_name,
product_type,
folder_entity["id"],
data=data,
attribs=attributes,
entity_id=product_id
)
product_entity = new_product_entity(**new_product_entity_kwargs)
if existing_product_entity is None:
# Create a new product
self.log.info(
@ -920,12 +902,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
# Include optional data if present in
optionals = [
"frameStart", "frameEnd",
"handleEnd", "handleStart",
"step",
"resolutionWidth", "resolutionHeight",
"pixelAspect",
"sourceHashes"
"frameStart", "frameEnd", "step",
"handleEnd", "handleStart", "sourceHashes"
]
for key in optionals:
if key in instance.data:
@ -949,7 +927,6 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
host_name = context.data["hostName"]
anatomy_data = instance.data["anatomyData"]
product_type = instance.data["productType"]
product_base_type = instance.data.get("productBaseType")
task_info = anatomy_data.get("task") or {}
return get_publish_template_name(
@ -959,8 +936,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
task_name=task_info.get("name"),
task_type=task_info.get("type"),
project_settings=context.data["project_settings"],
logger=self.log,
product_base_type=product_base_type
logger=self.log
)
def get_rootless_path(self, anatomy, path):

View file

@ -1,8 +1,11 @@
import os
import sys
import copy
import errno
import itertools
import shutil
from concurrent.futures import ThreadPoolExecutor
from speedcopy import copyfile
import clique
import pyblish.api
@ -13,15 +16,11 @@ from ayon_api.operations import (
)
from ayon_api.utils import create_entity_id
from ayon_core.lib import source_hash
from ayon_core.lib.file_transaction import (
FileTransaction,
DuplicateDestinationError,
)
from ayon_core.lib import create_hard_link, source_hash
from ayon_core.lib.file_transaction import wait_for_future_errors
from ayon_core.pipeline.publish import (
get_publish_template_name,
OptionalPyblishPluginMixin,
KnownPublishError,
)
@ -82,9 +81,12 @@ class IntegrateHeroVersion(
db_representation_context_keys = [
"project",
"folder",
"asset",
"hierarchy",
"task",
"product",
"subset",
"family",
"representation",
"username",
"output"
@ -422,40 +424,19 @@ class IntegrateHeroVersion(
(repre_entity, dst_paths)
)
file_transactions = FileTransaction(
log=self.log,
# Enforce unique transfers
allow_queue_replacements=False
)
mode = FileTransaction.MODE_COPY
if self.use_hardlinks:
mode = FileTransaction.MODE_LINK
self.path_checks = []
try:
# Copy(hardlink) paths of source and destination files
# TODO should we *only* create hardlinks?
# TODO should we keep files for deletion until this is successful?
with ThreadPoolExecutor(max_workers=8) as executor:
futures = [
executor.submit(self.copy_file, src_path, dst_path)
for src_path, dst_path in itertools.chain(
src_to_dst_file_paths,
other_file_paths_mapping
):
file_transactions.add(src_path, dst_path, mode=mode)
self.log.debug("Integrating source files to destination ...")
file_transactions.process()
except DuplicateDestinationError as exc:
# Raise DuplicateDestinationError as KnownPublishError
# and rollback the transactions
file_transactions.rollback()
raise KnownPublishError(exc).with_traceback(sys.exc_info()[2])
except Exception as exc:
# Rollback the transactions
file_transactions.rollback()
self.log.critical("Error when copying files", exc_info=True)
raise exc
# Finalizing can't rollback safely so no use for moving it to
# the try, except.
file_transactions.finalize()
src_to_dst_file_paths, other_file_paths_mapping
)
]
wait_for_future_errors(executor, futures)
# Update prepared representation etity data with files
# and integrate it to server.
@ -644,6 +625,48 @@ class IntegrateHeroVersion(
).format(path))
return path
def copy_file(self, src_path, dst_path):
# TODO check drives if are the same to check if cas hardlink
dirname = os.path.dirname(dst_path)
try:
os.makedirs(dirname)
self.log.debug("Folder(s) created: \"{}\"".format(dirname))
except OSError as exc:
if exc.errno != errno.EEXIST:
self.log.error("An unexpected error occurred.", exc_info=True)
raise
self.log.debug("Folder already exists: \"{}\"".format(dirname))
if self.use_hardlinks:
# First try hardlink and copy if paths are cross drive
self.log.debug("Hardlinking file \"{}\" to \"{}\"".format(
src_path, dst_path
))
try:
create_hard_link(src_path, dst_path)
# Return when successful
return
except OSError as exc:
# re-raise exception if different than
# EXDEV - cross drive path
# EINVAL - wrong format, must be NTFS
self.log.debug(
"Hardlink failed with errno:'{}'".format(exc.errno))
if exc.errno not in [errno.EXDEV, errno.EINVAL]:
raise
self.log.debug(
"Hardlinking failed, falling back to regular copy...")
self.log.debug("Copying file \"{}\" to \"{}\"".format(
src_path, dst_path
))
copyfile(src_path, dst_path)
def version_from_representations(self, project_name, repres):
for repre in repres:
version = ayon_api.get_version_by_id(

View file

@ -105,7 +105,7 @@ class IntegrateInputLinksAYON(pyblish.api.ContextPlugin):
created links by its type
"""
if workfile_instance is None:
self.log.debug("No workfile in this publish session.")
self.log.warning("No workfile in this publish session.")
return
workfile_version_id = workfile_instance.data["versionEntity"]["id"]

View file

@ -62,8 +62,10 @@ class IntegrateProductGroup(pyblish.api.InstancePlugin):
product_type = instance.data["productType"]
fill_pairs = prepare_template_data({
"family": product_type,
"task": filter_criteria["tasks"],
"host": filter_criteria["hosts"],
"subset": product_name,
"product": {
"name": product_name,
"type": product_type,

View file

@ -1,17 +1,11 @@
import os
import time
import ayon_api
from ayon_api import TransferProgress
from ayon_api.server_api import RequestTypes
import pyblish.api
import ayon_api
from ayon_api.server_api import RequestTypes
from ayon_core.lib import get_media_mime_type, format_file_size
from ayon_core.pipeline.publish import (
PublishXmlValidationError,
get_publish_repre_path,
)
import requests.exceptions
from ayon_core.lib import get_media_mime_type
from ayon_core.pipeline.publish import get_publish_repre_path
class IntegrateAYONReview(pyblish.api.InstancePlugin):
@ -50,7 +44,7 @@ class IntegrateAYONReview(pyblish.api.InstancePlugin):
if "webreview" not in repre_tags:
continue
# exclude representations going to be published on farm
# exclude representations with are going to be published on farm
if "publish_on_farm" in repre_tags:
continue
@ -81,13 +75,18 @@ class IntegrateAYONReview(pyblish.api.InstancePlugin):
f"/projects/{project_name}"
f"/versions/{version_id}/reviewables{query}"
)
filename = os.path.basename(repre_path)
# Upload the reviewable
self.log.info(f"Uploading reviewable '{label or filename}' ...")
headers = ayon_con.get_headers(content_type)
headers["x-file-name"] = filename
self.log.info(f"Uploading reviewable {repre_path}")
# Upload with retries and clear help if it keeps failing
self._upload_with_retries(
ayon_con,
ayon_con.upload_file(
endpoint,
repre_path,
content_type,
headers=headers,
request_type=RequestTypes.post,
)
def _get_review_label(self, repre, uploaded_labels):
@ -101,74 +100,3 @@ class IntegrateAYONReview(pyblish.api.InstancePlugin):
idx += 1
label = f"{orig_label}_{idx}"
return label
def _upload_with_retries(
self,
ayon_con: ayon_api.ServerAPI,
endpoint: str,
repre_path: str,
content_type: str,
):
"""Upload file with simple retries."""
filename = os.path.basename(repre_path)
headers = ayon_con.get_headers(content_type)
headers["x-file-name"] = filename
max_retries = ayon_con.get_default_max_retries()
# Retries are already implemented in 'ayon_api.upload_file'
# - added in ayon api 1.2.7
if hasattr(TransferProgress, "get_attempt"):
max_retries = 1
size = os.path.getsize(repre_path)
self.log.info(
f"Uploading '{repre_path}' (size: {format_file_size(size)})"
)
# How long to sleep before next attempt
wait_time = 1
last_error = None
for attempt in range(max_retries):
attempt += 1
start = time.time()
try:
output = ayon_con.upload_file(
endpoint,
repre_path,
headers=headers,
request_type=RequestTypes.post,
)
self.log.debug(f"Uploaded in {time.time() - start}s.")
return output
except (
requests.exceptions.Timeout,
requests.exceptions.ConnectionError
) as exc:
# Log and retry with backoff if attempts remain
if attempt >= max_retries:
last_error = exc
break
self.log.warning(
f"Review upload failed ({attempt}/{max_retries})"
f" after {time.time() - start}s."
f" Retrying in {wait_time}s...",
exc_info=True,
)
time.sleep(wait_time)
# Exhausted retries - raise a user-friendly validation error with help
raise PublishXmlValidationError(
self,
(
"Upload of reviewable timed out or failed after multiple"
" attempts. Please try publishing again."
),
formatting_data={
"upload_type": "Review",
"file": repre_path,
"error": str(last_error),
},
help_filename="upload_file.xml",
)

View file

@ -24,16 +24,11 @@
import os
import collections
import time
import ayon_api
from ayon_api import RequestTypes, TransferProgress
from ayon_api.operations import OperationsSession
import pyblish.api
import requests
from ayon_core.lib import get_media_mime_type, format_file_size
from ayon_core.pipeline.publish import PublishXmlValidationError
import ayon_api
from ayon_api import RequestTypes
from ayon_api.operations import OperationsSession
InstanceFilterResult = collections.namedtuple(
@ -169,17 +164,25 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
return os.path.normpath(filled_path)
def _create_thumbnail(self, project_name: str, src_filepath: str) -> str:
"""Upload thumbnail to AYON and return its id."""
mime_type = get_media_mime_type(src_filepath)
if mime_type is None:
return ayon_api.create_thumbnail(
project_name, src_filepath
)
"""Upload thumbnail to AYON and return its id.
response = self._upload_with_retries(
This is temporary fix of 'create_thumbnail' function in ayon_api to
fix jpeg mime type.
"""
mime_type = None
with open(src_filepath, "rb") as stream:
if b"\xff\xd8\xff" == stream.read(3):
mime_type = "image/jpeg"
if mime_type is None:
return ayon_api.create_thumbnail(project_name, src_filepath)
response = ayon_api.upload_file(
f"projects/{project_name}/thumbnails",
src_filepath,
mime_type,
request_type=RequestTypes.post,
headers={"Content-Type": mime_type},
)
response.raise_for_status()
return response.json()["id"]
@ -245,71 +248,3 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
or instance.data.get("name")
or "N/A"
)
def _upload_with_retries(
self,
endpoint: str,
repre_path: str,
content_type: str,
):
"""Upload file with simple retries."""
ayon_con = ayon_api.get_server_api_connection()
headers = ayon_con.get_headers(content_type)
max_retries = ayon_con.get_default_max_retries()
# Retries are already implemented in 'ayon_api.upload_file'
# - added in ayon api 1.2.7
if hasattr(TransferProgress, "get_attempt"):
max_retries = 1
size = os.path.getsize(repre_path)
self.log.info(
f"Uploading '{repre_path}' (size: {format_file_size(size)})"
)
# How long to sleep before next attempt
wait_time = 1
last_error = None
for attempt in range(max_retries):
attempt += 1
start = time.time()
try:
output = ayon_con.upload_file(
endpoint,
repre_path,
headers=headers,
request_type=RequestTypes.post,
)
self.log.debug(f"Uploaded in {time.time() - start}s.")
return output
except (
requests.exceptions.Timeout,
requests.exceptions.ConnectionError
) as exc:
# Log and retry with backoff if attempts remain
if attempt >= max_retries:
last_error = exc
break
self.log.warning(
f"Review upload failed ({attempt}/{max_retries})"
f" after {time.time() - start}s."
f" Retrying in {wait_time}s...",
exc_info=True,
)
time.sleep(wait_time)
# Exhausted retries - raise a user-friendly validation error with help
raise PublishXmlValidationError(
self,
(
"Upload of thumbnail timed out or failed after multiple"
" attempts. Please try publishing again."
),
formatting_data={
"upload_type": "Thumbnail",
"file": repre_path,
"error": str(last_error),
},
help_filename="upload_file.xml",
)

View file

@ -969,6 +969,12 @@ SearchItemDisplayWidget #ValueWidget {
background: {color:bg-buttons};
}
/* Subset Manager */
#SubsetManagerDetailsText {}
#SubsetManagerDetailsText[state="invalid"] {
border: 1px solid #ff0000;
}
/* Creator */
#CreatorsView::item {
padding: 1px 5px;

View file

@ -1,8 +1,6 @@
from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Dict, Optional
@dataclass
@ -15,8 +13,8 @@ class TabItem:
class InterpreterConfig:
width: Optional[int]
height: Optional[int]
splitter_sizes: list[int] = field(default_factory=list)
tabs: list[TabItem] = field(default_factory=list)
splitter_sizes: List[int] = field(default_factory=list)
tabs: List[TabItem] = field(default_factory=list)
class AbstractInterpreterController(ABC):
@ -29,7 +27,7 @@ class AbstractInterpreterController(ABC):
self,
width: int,
height: int,
splitter_sizes: list[int],
tabs: list[dict[str, str]],
) -> None:
splitter_sizes: List[int],
tabs: List[Dict[str, str]],
):
pass

View file

@ -1,5 +1,4 @@
from __future__ import annotations
from typing import Optional
from typing import List, Dict
from ayon_core.lib import JSONSettingRegistry
from ayon_core.lib.local_settings import get_launcher_local_dir
@ -12,15 +11,13 @@ from .abstract import (
class InterpreterController(AbstractInterpreterController):
def __init__(self, name: Optional[str] = None) -> None:
if name is None:
name = "python_interpreter_tool"
def __init__(self):
self._registry = JSONSettingRegistry(
name,
"python_interpreter_tool",
get_launcher_local_dir(),
)
def get_config(self) -> InterpreterConfig:
def get_config(self):
width = None
height = None
splitter_sizes = []
@ -57,9 +54,9 @@ class InterpreterController(AbstractInterpreterController):
self,
width: int,
height: int,
splitter_sizes: list[int],
tabs: list[dict[str, str]],
) -> None:
splitter_sizes: List[int],
tabs: List[Dict[str, str]],
):
self._registry.set_item("width", width)
self._registry.set_item("height", height)
self._registry.set_item("splitter_sizes", splitter_sizes)

View file

@ -1,42 +1,42 @@
import os
import sys
import collections
class _CustomSTD:
def __init__(self, orig_std, write_callback):
self.orig_std = orig_std
self._valid_orig = bool(orig_std)
self._write_callback = write_callback
def __getattr__(self, attr):
return getattr(self.orig_std, attr)
def __setattr__(self, key, value):
if key in ("orig_std", "_valid_orig", "_write_callback"):
super().__setattr__(key, value)
else:
setattr(self.orig_std, key, value)
def write(self, text):
if self._valid_orig:
self.orig_std.write(text)
self._write_callback(text)
class StdOEWrap:
def __init__(self):
self._origin_stdout_write = None
self._origin_stderr_write = None
self._listening = False
self.lines = collections.deque()
if not sys.stdout:
sys.stdout = open(os.devnull, "w")
if not sys.stderr:
sys.stderr = open(os.devnull, "w")
if self._origin_stdout_write is None:
self._origin_stdout_write = sys.stdout.write
if self._origin_stderr_write is None:
self._origin_stderr_write = sys.stderr.write
self._listening = True
self._stdout_wrap = _CustomSTD(sys.stdout, self._listener)
self._stderr_wrap = _CustomSTD(sys.stderr, self._listener)
sys.stdout = self._stdout_wrap
sys.stderr = self._stderr_wrap
sys.stdout.write = self._stdout_listener
sys.stderr.write = self._stderr_listener
def stop_listen(self):
self._listening = False
def _listener(self, text):
def _stdout_listener(self, text):
if self._listening:
self.lines.append(text)
if self._origin_stdout_write is not None:
self._origin_stdout_write(text)
def _stderr_listener(self, text):
if self._listening:
self.lines.append(text)
if self._origin_stderr_write is not None:
self._origin_stderr_write(text)

View file

@ -112,7 +112,6 @@ class HierarchyPage(QtWidgets.QWidget):
self._is_visible = False
self._controller = controller
self._filters_widget = filters_widget
self._btn_back = btn_back
self._projects_combobox = projects_combobox
self._folders_widget = folders_widget
@ -137,10 +136,6 @@ class HierarchyPage(QtWidgets.QWidget):
self._folders_widget.refresh()
self._tasks_widget.refresh()
self._workfiles_page.refresh()
# Update my tasks
self._on_my_tasks_checkbox_state_changed(
self._filters_widget.is_my_tasks_checked()
)
def _on_back_clicked(self):
self._controller.set_selected_project(None)
@ -160,7 +155,6 @@ class HierarchyPage(QtWidgets.QWidget):
)
folder_ids = entity_ids["folder_ids"]
task_ids = entity_ids["task_ids"]
self._folders_widget.set_folder_ids_filter(folder_ids)
self._tasks_widget.set_task_ids_filter(task_ids)

View file

@ -527,10 +527,6 @@ class LoaderWindow(QtWidgets.QWidget):
if not self._refresh_handler.project_refreshed:
self._projects_combobox.refresh()
self._update_filters()
# Update my tasks
self._on_my_tasks_checkbox_state_changed(
self._filters_widget.is_my_tasks_checked()
)
def _on_load_finished(self, event):
error_info = event["error_info"]

View file

@ -35,7 +35,6 @@ from ayon_core.pipeline.create import (
ConvertorsOperationFailed,
ConvertorItem,
)
from ayon_core.tools.publisher.abstract import (
AbstractPublisherBackend,
CardMessageTypes,

View file

@ -21,7 +21,6 @@ from ayon_core.pipeline.plugin_discover import DiscoverResult
from ayon_core.pipeline.publish import (
get_publish_instance_label,
PublishError,
filter_crashed_publish_paths,
)
from ayon_core.tools.publisher.abstract import AbstractPublisherBackend
@ -108,14 +107,11 @@ class PublishReportMaker:
creator_discover_result: Optional[DiscoverResult] = None,
convertor_discover_result: Optional[DiscoverResult] = None,
publish_discover_result: Optional[DiscoverResult] = None,
blocking_crashed_paths: Optional[list[str]] = None,
):
self._create_discover_result: Union[DiscoverResult, None] = None
self._convert_discover_result: Union[DiscoverResult, None] = None
self._publish_discover_result: Union[DiscoverResult, None] = None
self._blocking_crashed_paths: list[str] = []
self._all_instances_by_id: Dict[str, pyblish.api.Instance] = {}
self._plugin_data_by_id: Dict[str, Any] = {}
self._current_plugin_id: Optional[str] = None
@ -124,7 +120,6 @@ class PublishReportMaker:
creator_discover_result,
convertor_discover_result,
publish_discover_result,
blocking_crashed_paths,
)
def reset(
@ -132,14 +127,12 @@ class PublishReportMaker:
creator_discover_result: Union[DiscoverResult, None],
convertor_discover_result: Union[DiscoverResult, None],
publish_discover_result: Union[DiscoverResult, None],
blocking_crashed_paths: list[str],
):
"""Reset report and clear all data."""
self._create_discover_result = creator_discover_result
self._convert_discover_result = convertor_discover_result
self._publish_discover_result = publish_discover_result
self._blocking_crashed_paths = blocking_crashed_paths
self._all_instances_by_id = {}
self._plugin_data_by_id = {}
@ -249,10 +242,9 @@ class PublishReportMaker:
"instances": instances_details,
"context": self._extract_context_data(publish_context),
"crashed_file_paths": crashed_file_paths,
"blocking_crashed_paths": list(self._blocking_crashed_paths),
"id": uuid.uuid4().hex,
"created_at": now.isoformat(),
"report_version": "1.1.1",
"report_version": "1.1.0",
}
def _add_plugin_data_item(self, plugin: pyblish.api.Plugin):
@ -967,16 +959,11 @@ class PublishModel:
self._publish_plugins_proxy = PublishPluginsProxy(
publish_plugins
)
blocking_crashed_paths = filter_crashed_publish_paths(
create_context.get_current_project_name(),
set(create_context.publish_discover_result.crashed_file_paths),
project_settings=create_context.get_current_project_settings(),
)
self._publish_report.reset(
create_context.creator_discover_result,
create_context.convertor_discover_result,
create_context.publish_discover_result,
blocking_crashed_paths,
)
for plugin in create_context.publish_plugins_mismatch_targets:
self._publish_report.set_plugin_skipped(plugin.id)

View file

@ -139,6 +139,3 @@ class PublishReport:
self.logs = logs
self.crashed_plugin_paths = report_data["crashed_file_paths"]
self.blocking_crashed_paths = report_data.get(
"blocking_crashed_paths", []
)

View file

@ -7,7 +7,6 @@ from ayon_core.tools.utils import (
SeparatorWidget,
IconButton,
paint_image_with_color,
get_qt_icon,
)
from ayon_core.resources import get_image_path
from ayon_core.style import get_objected_colors
@ -47,13 +46,10 @@ def get_pretty_milliseconds(value):
class PluginLoadReportModel(QtGui.QStandardItemModel):
_blocking_icon = None
def __init__(self):
super().__init__()
self._traceback_by_filepath = {}
self._items_by_filepath = {}
self._blocking_crashed_paths = set()
self._is_active = True
self._need_refresh = False
@ -79,7 +75,6 @@ class PluginLoadReportModel(QtGui.QStandardItemModel):
for filepath in to_remove:
self._traceback_by_filepath.pop(filepath)
self._blocking_crashed_paths = set(report.blocking_crashed_paths)
self._update_items()
def _update_items(self):
@ -88,7 +83,6 @@ class PluginLoadReportModel(QtGui.QStandardItemModel):
parent = self.invisibleRootItem()
if not self._traceback_by_filepath:
parent.removeRows(0, parent.rowCount())
self._items_by_filepath = {}
return
new_items = []
@ -97,19 +91,13 @@ class PluginLoadReportModel(QtGui.QStandardItemModel):
set(self._items_by_filepath) - set(self._traceback_by_filepath)
)
for filepath in self._traceback_by_filepath:
item = self._items_by_filepath.get(filepath)
if item is None:
if filepath in self._items_by_filepath:
continue
item = QtGui.QStandardItem(filepath)
new_items.append(item)
new_items_by_filepath[filepath] = item
self._items_by_filepath[filepath] = item
icon = None
if filepath.replace("\\", "/") in self._blocking_crashed_paths:
icon = self._get_blocking_icon()
item.setData(icon, QtCore.Qt.DecorationRole)
if new_items:
parent.appendRows(new_items)
@ -125,16 +113,6 @@ class PluginLoadReportModel(QtGui.QStandardItemModel):
item = self._items_by_filepath.pop(filepath)
parent.removeRow(item.row())
@classmethod
def _get_blocking_icon(cls):
if cls._blocking_icon is None:
cls._blocking_icon = get_qt_icon({
"type": "material-symbols",
"name": "block",
"color": "red",
})
return cls._blocking_icon
class DetailWidget(QtWidgets.QTextEdit):
def __init__(self, text, *args, **kwargs):
@ -878,7 +856,7 @@ class PublishReportViewerWidget(QtWidgets.QFrame):
report = PublishReport(report_data)
self.set_report(report)
def set_report(self, report: PublishReport) -> None:
def set_report(self, report):
self._ignore_selection_changes = True
self._report_item = report
@ -888,10 +866,6 @@ class PublishReportViewerWidget(QtWidgets.QFrame):
self._logs_text_widget.set_report(report)
self._plugin_load_report_widget.set_report(report)
self._plugins_details_widget.set_report(report)
if report.blocking_crashed_paths:
self._details_tab_widget.setCurrentWidget(
self._plugin_load_report_widget
)
self._ignore_selection_changes = False

View file

@ -221,7 +221,6 @@ class CreateContextWidget(QtWidgets.QWidget):
filters_widget.text_changed.connect(self._on_folder_filter_change)
filters_widget.my_tasks_changed.connect(self._on_my_tasks_change)
self._filters_widget = filters_widget
self._current_context_btn = current_context_btn
self._folders_widget = folders_widget
self._tasks_widget = tasks_widget
@ -291,10 +290,6 @@ class CreateContextWidget(QtWidgets.QWidget):
self._hierarchy_controller.set_expected_selection(
self._last_project_name, folder_id, task_name
)
# Update my tasks
self._on_my_tasks_change(
self._filters_widget.is_my_tasks_checked()
)
def _clear_selection(self):
self._folders_widget.set_selected_folder(None)

View file

@ -310,6 +310,9 @@ class CreateWidget(QtWidgets.QWidget):
folder_path = None
if self._context_change_is_enabled():
folder_path = self._context_widget.get_selected_folder_path()
if folder_path is None:
folder_path = self.get_current_folder_path()
return folder_path or None
def _get_folder_id(self):
@ -325,6 +328,9 @@ class CreateWidget(QtWidgets.QWidget):
folder_path = self._context_widget.get_selected_folder_path()
if folder_path:
task_name = self._context_widget.get_selected_task_name()
if not task_name:
task_name = self.get_current_task_name()
return task_name
def _set_context_enabled(self, enabled):

View file

@ -113,7 +113,6 @@ class FoldersDialog(QtWidgets.QDialog):
self._soft_reset_enabled = False
self._folders_widget.set_project_name(self._project_name)
self._on_my_tasks_change(self._filters_widget.is_my_tasks_checked())
def get_selected_folder_path(self):
"""Get selected folder path."""

View file

@ -1,11 +1,9 @@
from __future__ import annotations
import os
import json
import time
import collections
import copy
from typing import Optional, Any
from typing import Optional
from qtpy import QtWidgets, QtCore, QtGui
@ -395,9 +393,6 @@ class PublisherWindow(QtWidgets.QDialog):
self._publish_frame_visible = None
self._tab_on_reset = None
self._create_context_valid: bool = True
self._blocked_by_crashed_paths: bool = False
self._error_messages_to_show = collections.deque()
self._errors_dialog_message_timer = errors_dialog_message_timer
@ -411,8 +406,6 @@ class PublisherWindow(QtWidgets.QDialog):
self._show_counter = 0
self._window_is_visible = False
self._update_footer_state()
@property
def controller(self) -> AbstractPublisherFrontend:
"""Kept for compatibility with traypublisher."""
@ -671,32 +664,10 @@ class PublisherWindow(QtWidgets.QDialog):
self._tab_on_reset = tab
def set_current_tab(self, tab):
if tab == "create":
self._go_to_create_tab()
elif tab == "publish":
self._go_to_publish_tab()
elif tab == "report":
self._go_to_report_tab()
elif tab == "details":
self._go_to_details_tab()
if not self._window_is_visible:
self.set_tab_on_reset(tab)
def _update_publish_details_widget(
self,
force: bool = False,
report_data: Optional[dict[str, Any]] = None,
) -> None:
if (
report_data is None
and not force
and not self._is_on_details_tab()
):
def _update_publish_details_widget(self, force=False):
if not force and not self._is_on_details_tab():
return
if report_data is None:
report_data = self._controller.get_publish_report()
self._publish_details_widget.set_report_data(report_data)
@ -781,6 +752,19 @@ class PublisherWindow(QtWidgets.QDialog):
def _set_current_tab(self, identifier):
self._tabs_widget.set_current_tab(identifier)
def set_current_tab(self, tab):
if tab == "create":
self._go_to_create_tab()
elif tab == "publish":
self._go_to_publish_tab()
elif tab == "report":
self._go_to_report_tab()
elif tab == "details":
self._go_to_details_tab()
if not self._window_is_visible:
self.set_tab_on_reset(tab)
def _is_current_tab(self, identifier):
return self._tabs_widget.is_current_tab(identifier)
@ -881,40 +865,15 @@ class PublisherWindow(QtWidgets.QDialog):
# Reset style
self._comment_input.setStyleSheet("")
def _set_create_context_valid(self, valid: bool) -> None:
self._create_context_valid = valid
self._update_footer_state()
def _set_blocked(self, blocked: bool) -> None:
self._blocked_by_crashed_paths = blocked
self._overview_widget.setEnabled(not blocked)
self._update_footer_state()
if not blocked:
return
self.set_tab_on_reset("details")
self._go_to_details_tab()
QtWidgets.QMessageBox.critical(
self,
"Failed to load plugins",
(
"Failed to load plugins that do prevent you from"
" using publish tool.\n"
"Please contact your TD or administrator."
)
)
def _update_footer_state(self) -> None:
enabled = (
not self._blocked_by_crashed_paths
and self._create_context_valid
)
save_enabled = not self._blocked_by_crashed_paths
self._save_btn.setEnabled(save_enabled)
def _set_footer_enabled(self, enabled):
self._save_btn.setEnabled(True)
self._reset_btn.setEnabled(True)
if enabled:
self._stop_btn.setEnabled(False)
self._validate_btn.setEnabled(True)
self._publish_btn.setEnabled(True)
else:
self._stop_btn.setEnabled(enabled)
self._validate_btn.setEnabled(enabled)
self._publish_btn.setEnabled(enabled)
@ -923,14 +882,9 @@ class PublisherWindow(QtWidgets.QDialog):
self._set_comment_input_visiblity(True)
self._set_publish_overlay_visibility(False)
self._set_publish_visibility(False)
report_data = self._controller.get_publish_report()
blocked = bool(report_data["blocking_crashed_paths"])
self._set_blocked(blocked)
self._update_publish_details_widget(report_data=report_data)
self._update_publish_details_widget()
def _on_controller_reset(self):
self._update_publish_details_widget(force=True)
self._first_reset, first_reset = False, self._first_reset
if self._tab_on_reset is not None:
self._tab_on_reset, new_tab = None, self._tab_on_reset
@ -998,7 +952,7 @@ class PublisherWindow(QtWidgets.QDialog):
def _validate_create_instances(self):
if not self._controller.is_host_valid():
self._set_create_context_valid(True)
self._set_footer_enabled(True)
return
active_instances_by_id = {
@ -1019,7 +973,7 @@ class PublisherWindow(QtWidgets.QDialog):
if all_valid is None:
all_valid = True
self._set_create_context_valid(bool(all_valid))
self._set_footer_enabled(bool(all_valid))
def _on_create_model_reset(self):
self._validate_create_instances()

View file

@ -1045,23 +1045,10 @@ class ProjectPushItemProcess:
copied_tags = self._get_transferable_tags(src_version_entity)
copied_status = self._get_transferable_status(src_version_entity)
description_parts = []
dst_attr_description = dst_attrib.get("description")
if dst_attr_description:
description_parts.append(dst_attr_description)
description = self._create_src_version_description(
self._item.src_project_name,
src_version_entity
)
if description:
description_parts.append(description)
dst_attrib["description"] = "\n\n".join(description_parts)
version_entity = new_version_entity(
dst_version,
product_id,
author=src_version_entity["author"],
status=copied_status,
tags=copied_tags,
task_id=self._task_info.get("id"),
@ -1142,6 +1129,8 @@ class ProjectPushItemProcess:
self.host_name
)
formatting_data.update({
"subset": self._product_name,
"family": self._product_type,
"product": {
"name": self._product_name,
"type": self._product_type,
@ -1383,30 +1372,6 @@ class ProjectPushItemProcess:
return copied_status["name"]
return None
def _create_src_version_description(
self,
src_project_name: str,
src_version_entity: dict[str, Any]
) -> str:
"""Creates description text about source version."""
src_version_id = src_version_entity["id"]
src_author = src_version_entity["author"]
query = "&".join([
f"project={src_project_name}",
"type=version",
f"id={src_version_id}"
])
version_url = (
f"{ayon_api.get_base_url()}"
f"/projects/{src_project_name}/products?{query}"
)
description = (
f"Version copied from from {version_url} "
f"created by '{src_author}', "
)
return description
class IntegrateModel:
def __init__(self, controller):

View file

@ -1114,8 +1114,6 @@ class SceneInventoryView(QtWidgets.QTreeView):
try:
for item_id, item_version in zip(item_ids, versions):
container = containers_by_id[item_id]
if container.get("version_locked"):
continue
try:
update_container(container, item_version)
except Exception as exc:

View file

@ -32,6 +32,8 @@ class TextureCopy:
product_type = "texture"
template_data = get_template_data(project_entity, folder_entity)
template_data.update({
"family": product_type,
"subset": product_name,
"product": {
"name": product_name,
"type": product_type,

View file

@ -834,12 +834,6 @@ class FoldersFiltersWidget(QtWidgets.QWidget):
self._folders_filter_input = folders_filter_input
self._my_tasks_checkbox = my_tasks_checkbox
def is_my_tasks_checked(self) -> bool:
return self._my_tasks_checkbox.isChecked()
def text(self) -> str:
return self._folders_filter_input.text()
def set_text(self, text: str) -> None:
self._folders_filter_input.setText(text)

View file

@ -205,8 +205,6 @@ class WorkfilesToolWindow(QtWidgets.QWidget):
self._folders_widget = folder_widget
self._filters_widget = filters_widget
return col_widget
def _create_col_3_widget(self, controller, parent):
@ -345,10 +343,6 @@ class WorkfilesToolWindow(QtWidgets.QWidget):
self._project_name = self._controller.get_current_project_name()
self._folders_widget.set_project_name(self._project_name)
# Update my tasks
self._on_my_tasks_checkbox_state_changed(
self._filters_widget.is_my_tasks_checked()
)
def _on_save_as_finished(self, event):
if event["failed"]:

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring AYON addon 'core' version."""
__version__ = "1.7.0+dev"
__version__ = "1.6.12"

View file

@ -1,6 +1,6 @@
name = "core"
title = "Core"
version = "1.7.0+dev"
version = "1.6.12"
client_dir = "ayon_core"
@ -12,7 +12,6 @@ ayon_server_version = ">=1.8.4,<2.0.0"
ayon_launcher_version = ">=1.0.2"
ayon_required_addons = {}
ayon_compatible_addons = {
"ayon_third_party": ">=1.3.0",
"ayon_ocio": ">=1.2.1",
"applications": ">=1.1.2",
"harmony": ">0.4.0",

View file

@ -5,7 +5,7 @@
[tool.poetry]
name = "ayon-core"
version = "1.7.0+dev"
version = "1.6.12"
description = ""
authors = ["Ynput Team <team@ynput.io>"]
readme = "README.md"
@ -37,7 +37,7 @@ opentimelineio = "^0.17.0"
speedcopy = "^2.1"
qtpy="^2.4.3"
pyside6 = "^6.5.2"
pytest-ayon = { git = "https://github.com/ynput/pytest-ayon.git", branch = "develop" }
pytest-ayon = { git = "https://github.com/ynput/pytest-ayon.git", branch = "chore/align-dependencies" }
[tool.codespell]
# Ignore words that are not in the dictionary.

View file

@ -7,31 +7,7 @@ from .publish_plugins import DEFAULT_PUBLISH_VALUES
PRODUCT_NAME_REPL_REGEX = re.compile(r"[^<>{}\[\]a-zA-Z0-9_.]")
def _convert_product_name_templates_1_7_0(overrides):
product_name_profiles = (
overrides
.get("tools", {})
.get("creator", {})
.get("product_name_profiles")
)
if (
not product_name_profiles
or not isinstance(product_name_profiles, list)
):
return
# Already converted
item = product_name_profiles[0]
if "product_base_types" in item or "product_types" not in item:
return
# Move product base types to product types
for item in product_name_profiles:
item["product_base_types"] = item["product_types"]
item["product_types"] = []
def _convert_product_name_templates_1_6_5(overrides):
def _convert_imageio_configs_1_6_5(overrides):
product_name_profiles = (
overrides
.get("tools", {})
@ -182,54 +158,12 @@ def _convert_publish_plugins(overrides):
_convert_oiio_transcode_0_4_5(overrides["publish"])
def _convert_extract_thumbnail(overrides):
"""ExtractThumbnail config settings did change to profiles."""
extract_thumbnail_overrides = (
overrides.get("publish", {}).get("ExtractThumbnail")
)
if extract_thumbnail_overrides is None:
return
base_value = {
"product_types": [],
"host_names": [],
"task_types": [],
"task_names": [],
"product_names": [],
"integrate_thumbnail": True,
"target_size": {"type": "source"},
"duration_split": 0.5,
"oiiotool_defaults": {
"type": "colorspace",
"colorspace": "color_picking",
},
"ffmpeg_args": {"input": ["-apply_trc gamma22"], "output": []},
}
for key in (
"product_names",
"integrate_thumbnail",
"target_size",
"duration_split",
"oiiotool_defaults",
"ffmpeg_args",
):
if key in extract_thumbnail_overrides:
base_value[key] = extract_thumbnail_overrides.pop(key)
extract_thumbnail_profiles = extract_thumbnail_overrides.setdefault(
"profiles", []
)
extract_thumbnail_profiles.append(base_value)
def convert_settings_overrides(
source_version: str,
overrides: dict[str, Any],
) -> dict[str, Any]:
_convert_imageio_configs_0_3_1(overrides)
_convert_imageio_configs_0_4_5(overrides)
_convert_product_name_templates_1_6_5(overrides)
_convert_product_name_templates_1_7_0(overrides)
_convert_imageio_configs_1_6_5(overrides)
_convert_publish_plugins(overrides)
_convert_extract_thumbnail(overrides)
return overrides

View file

@ -74,35 +74,13 @@ class CollectFramesFixDefModel(BaseSettingsModel):
)
def usd_contribution_layer_types():
return [
{"value": "asset", "label": "Asset"},
{"value": "shot", "label": "Shot"},
]
class ContributionLayersModel(BaseSettingsModel):
_layout = "compact"
name: str = SettingsField(
default="",
regex="[A-Za-z0-9_-]+",
title="Name")
scope: list[str] = SettingsField(
# This should actually be returned from a callable to `default_factory`
# because lists are mutable. However, the frontend can't interpret
# the callable. It will fail to apply it as the default. Specifying
# this default directly did not show any ill side effects.
default=["asset", "shot"],
title="Scope",
min_items=1,
enum_resolver=usd_contribution_layer_types)
order: int = SettingsField(
default=0,
name: str = SettingsField(title="Name")
order: str = SettingsField(
title="Order",
description=(
"Higher order means a higher strength and stacks the layer on top."
)
)
description="Higher order means a higher strength and stacks the "
"layer on top.")
class CollectUSDLayerContributionsProfileModel(BaseSettingsModel):
@ -422,30 +400,24 @@ class ExtractThumbnailOIIODefaultsModel(BaseSettingsModel):
)
class ExtractThumbnailProfileModel(BaseSettingsModel):
product_types: list[str] = SettingsField(
default_factory=list, title="Product types"
)
host_names: list[str] = SettingsField(
default_factory=list, title="Host names"
)
task_types: list[str] = SettingsField(
default_factory=list, title="Task types", enum_resolver=task_types_enum
)
task_names: list[str] = SettingsField(
default_factory=list, title="Task names"
)
class ExtractThumbnailModel(BaseSettingsModel):
_isGroup = True
enabled: bool = SettingsField(True)
product_names: list[str] = SettingsField(
default_factory=list, title="Product names"
default_factory=list,
title="Product names"
)
integrate_thumbnail: bool = SettingsField(
True, title="Integrate Thumbnail Representation"
True,
title="Integrate Thumbnail Representation"
)
target_size: ResizeModel = SettingsField(
default_factory=ResizeModel, title="Target size"
default_factory=ResizeModel,
title="Target size"
)
background_color: ColorRGBA_uint8 = SettingsField(
(0, 0, 0, 0.0), title="Background color"
(0, 0, 0, 0.0),
title="Background color"
)
duration_split: float = SettingsField(
0.5,
@ -462,15 +434,6 @@ class ExtractThumbnailProfileModel(BaseSettingsModel):
)
class ExtractThumbnailModel(BaseSettingsModel):
_isGroup = True
enabled: bool = SettingsField(True)
profiles: list[ExtractThumbnailProfileModel] = SettingsField(
default_factory=list, title="Profiles"
)
def _extract_oiio_transcoding_type():
return [
{"value": "colorspace", "label": "Use Colorspace"},
@ -506,18 +469,6 @@ class UseDisplayViewModel(BaseSettingsModel):
)
class ExtractThumbnailFromSourceModel(BaseSettingsModel):
"""Thumbnail extraction from source files using ffmpeg and oiiotool."""
enabled: bool = SettingsField(True)
target_size: ResizeModel = SettingsField(
default_factory=ResizeModel, title="Target size"
)
background_color: ColorRGBA_uint8 = SettingsField(
(0, 0, 0, 0.0), title="Background color"
)
class ExtractOIIOTranscodeOutputModel(BaseSettingsModel):
_layout = "expanded"
name: str = SettingsField(
@ -1293,16 +1244,6 @@ class PublishPuginsModel(BaseSettingsModel):
default_factory=ExtractThumbnailModel,
title="Extract Thumbnail"
)
ExtractThumbnailFromSource: ExtractThumbnailFromSourceModel = SettingsField( # noqa: E501
default_factory=ExtractThumbnailFromSourceModel,
title="Extract Thumbnail from source",
description=(
"Extract thumbnails from explicit file set in "
"instance.data['thumbnailSource'] using oiiotool"
" or ffmpeg."
"Used when artist provided thumbnail source."
)
)
ExtractOIIOTranscode: ExtractOIIOTranscodeModel = SettingsField(
default_factory=ExtractOIIOTranscodeModel,
title="Extract OIIO Transcode"
@ -1404,17 +1345,17 @@ DEFAULT_PUBLISH_VALUES = {
"enabled": True,
"contribution_layers": [
# Asset layers
{"name": "model", "order": 100, "scope": ["asset"]},
{"name": "assembly", "order": 150, "scope": ["asset"]},
{"name": "groom", "order": 175, "scope": ["asset"]},
{"name": "look", "order": 200, "scope": ["asset"]},
{"name": "rig", "order": 300, "scope": ["asset"]},
{"name": "model", "order": 100},
{"name": "assembly", "order": 150},
{"name": "groom", "order": 175},
{"name": "look", "order": 200},
{"name": "rig", "order": 300},
# Shot layers
{"name": "layout", "order": 200, "scope": ["shot"]},
{"name": "animation", "order": 300, "scope": ["shot"]},
{"name": "simulation", "order": 400, "scope": ["shot"]},
{"name": "fx", "order": 500, "scope": ["shot"]},
{"name": "lighting", "order": 600, "scope": ["shot"]},
{"name": "layout", "order": 200},
{"name": "animation", "order": 300},
{"name": "simulation", "order": 400},
{"name": "fx", "order": 500},
{"name": "lighting", "order": 600},
],
"profiles": [
{
@ -1517,12 +1458,6 @@ DEFAULT_PUBLISH_VALUES = {
},
"ExtractThumbnail": {
"enabled": True,
"profiles": [
{
"product_types": [],
"host_names": [],
"task_types": [],
"task_names": [],
"product_names": [],
"integrate_thumbnail": True,
"target_size": {
@ -1539,18 +1474,6 @@ DEFAULT_PUBLISH_VALUES = {
],
"output": []
}
}
]
},
"ExtractThumbnailFromSource": {
"enabled": True,
"target_size": {
"type": "resize",
"resize": {
"width": 300,
"height": 170
}
},
},
"ExtractOIIOTranscode": {
"enabled": True,

View file

@ -24,10 +24,6 @@ class ProductTypeSmartSelectModel(BaseSettingsModel):
class ProductNameProfile(BaseSettingsModel):
_layout = "expanded"
product_base_types: list[str] = SettingsField(
default_factory=list,
title="Product base types",
)
product_types: list[str] = SettingsField(
default_factory=list,
title="Product types",
@ -356,27 +352,6 @@ class CustomStagingDirProfileModel(BaseSettingsModel):
)
class DiscoverValidationModel(BaseSettingsModel):
"""Strictly validate publish plugins discovery.
Artist won't be able to publish if path to publish plugin fails to be
imported.
"""
_isGroup = True
enabled: bool = SettingsField(
False,
description="Enable strict mode of plugins discovery",
)
ignore_paths: list[str] = SettingsField(
default_factory=list,
title="Ignored paths (regex)",
description=(
"Paths that do match regex will be skipped in validation."
),
)
class PublishToolModel(BaseSettingsModel):
template_name_profiles: list[PublishTemplateNameProfile] = SettingsField(
default_factory=list,
@ -394,10 +369,6 @@ class PublishToolModel(BaseSettingsModel):
title="Custom Staging Dir Profiles"
)
)
discover_validation: DiscoverValidationModel = SettingsField(
default_factory=DiscoverValidationModel,
title="Validate plugins discovery",
)
comment_minimum_required_chars: int = SettingsField(
0,
title="Publish comment minimum required characters",
@ -472,7 +443,6 @@ DEFAULT_TOOLS_VALUES = {
],
"product_name_profiles": [
{
"product_base_types": [],
"product_types": [],
"host_names": [],
"task_types": [],
@ -480,31 +450,28 @@ DEFAULT_TOOLS_VALUES = {
"template": "{product[type]}{variant}"
},
{
"product_base_types": [
"product_types": [
"workfile"
],
"product_types": [],
"host_names": [],
"task_types": [],
"task_names": [],
"template": "{product[type]}{Task[name]}"
},
{
"product_base_types": [
"product_types": [
"render"
],
"product_types": [],
"host_names": [],
"task_types": [],
"task_names": [],
"template": "{product[type]}{Task[name]}{Variant}<_{Aov}>"
},
{
"product_base_types": [
"product_types": [
"renderLayer",
"renderPass"
],
"product_types": [],
"host_names": [
"tvpaint"
],
@ -515,11 +482,10 @@ DEFAULT_TOOLS_VALUES = {
)
},
{
"product_base_types": [
"product_types": [
"review",
"workfile"
],
"product_types": [],
"host_names": [
"aftereffects",
"tvpaint"
@ -529,8 +495,7 @@ DEFAULT_TOOLS_VALUES = {
"template": "{product[type]}{Task[name]}"
},
{
"product_base_types": ["render"],
"product_types": [],
"product_types": ["render"],
"host_names": [
"aftereffects"
],
@ -539,10 +504,9 @@ DEFAULT_TOOLS_VALUES = {
"template": "{product[type]}{Task[name]}{Composition}{Variant}"
},
{
"product_base_types": [
"product_types": [
"staticMesh"
],
"product_types": [],
"host_names": [
"maya"
],
@ -551,10 +515,9 @@ DEFAULT_TOOLS_VALUES = {
"template": "S_{folder[name]}{variant}"
},
{
"product_base_types": [
"product_types": [
"skeletalMesh"
],
"product_types": [],
"host_names": [
"maya"
],
@ -563,10 +526,9 @@ DEFAULT_TOOLS_VALUES = {
"template": "SK_{folder[name]}{variant}"
},
{
"product_base_types": [
"product_types": [
"hda"
],
"product_types": [],
"host_names": [
"houdini"
],
@ -575,10 +537,9 @@ DEFAULT_TOOLS_VALUES = {
"template": "{folder[name]}_{variant}"
},
{
"product_base_types": [
"product_types": [
"textureSet"
],
"product_types": [],
"host_names": [
"substancedesigner"
],
@ -730,10 +691,6 @@ DEFAULT_TOOLS_VALUES = {
"template_name": "simpleUnrealTextureHero"
}
],
"discover_validation": {
"enabled": False,
"ignore_paths": [],
},
"comment_minimum_required_chars": 0,
}
}

View file

@ -1,333 +0,0 @@
"""Tests for product_name helpers."""
import pytest
from unittest.mock import patch
from ayon_core.pipeline.create.product_name import (
get_product_name_template,
get_product_name,
)
from ayon_core.pipeline.create.constants import DEFAULT_PRODUCT_TEMPLATE
from ayon_core.pipeline.create.exceptions import (
TaskNotSetError,
TemplateFillError,
)
class TestGetProductNameTemplate:
@patch("ayon_core.pipeline.create.product_name.get_project_settings")
@patch("ayon_core.pipeline.create.product_name.filter_profiles")
def test_matching_profile_with_replacements(
self,
mock_filter_profiles,
mock_get_settings,
):
"""Matching profile applies legacy replacement tokens."""
mock_get_settings.return_value = {
"core": {"tools": {"creator": {"product_name_profiles": []}}}
}
# The function should replace {task}/{family}/{asset} variants
mock_filter_profiles.return_value = {
"template": ("{task}-{Task}-{TASK}-{family}-{Family}"
"-{FAMILY}-{asset}-{Asset}-{ASSET}")
}
result = get_product_name_template(
project_name="proj",
product_type="model",
task_name="modeling",
task_type="Modeling",
host_name="maya",
)
assert result == (
"{task[name]}-{Task[name]}-{TASK[NAME]}-"
"{product[type]}-{Product[type]}-{PRODUCT[TYPE]}-"
"{folder[name]}-{Folder[name]}-{FOLDER[NAME]}"
)
@patch("ayon_core.pipeline.create.product_name.get_project_settings")
@patch("ayon_core.pipeline.create.product_name.filter_profiles")
def test_no_matching_profile_uses_default(
self,
mock_filter_profiles,
mock_get_settings,
):
mock_get_settings.return_value = {
"core": {"tools": {"creator": {"product_name_profiles": []}}}
}
mock_filter_profiles.return_value = None
assert (
get_product_name_template(
project_name="proj",
product_type="model",
task_name="modeling",
task_type="Modeling",
host_name="maya",
)
== DEFAULT_PRODUCT_TEMPLATE
)
@patch("ayon_core.pipeline.create.product_name.get_project_settings")
@patch("ayon_core.pipeline.create.product_name.filter_profiles")
def test_custom_default_template_used(
self,
mock_filter_profiles,
mock_get_settings,
):
mock_get_settings.return_value = {
"core": {"tools": {"creator": {"product_name_profiles": []}}}
}
mock_filter_profiles.return_value = None
custom_default = "{variant}_{family}"
assert (
get_product_name_template(
project_name="proj",
product_type="model",
task_name="modeling",
task_type="Modeling",
host_name="maya",
default_template=custom_default,
)
== custom_default
)
@patch("ayon_core.pipeline.create.product_name.get_project_settings")
@patch("ayon_core.pipeline.create.product_name.filter_profiles")
def test_product_base_type_added_to_filtering_when_provided(
self,
mock_filter_profiles,
mock_get_settings,
):
mock_get_settings.return_value = {
"core": {"tools": {"creator": {"product_name_profiles": []}}}
}
mock_filter_profiles.return_value = None
get_product_name_template(
project_name="proj",
product_type="model",
task_name="modeling",
task_type="Modeling",
host_name="maya",
product_base_type="asset",
)
args, kwargs = mock_filter_profiles.call_args
# args[1] is filtering_criteria
assert args[1]["product_base_types"] == "asset"
class TestGetProductName:
@patch("ayon_core.pipeline.create.product_name.get_product_name_template")
@patch("ayon_core.pipeline.create.product_name."
"StringTemplate.format_strict_template")
@patch("ayon_core.pipeline.create.product_name.prepare_template_data")
def test_empty_product_type_returns_empty(
self, mock_prepare, mock_format, mock_get_tmpl
):
assert (
get_product_name(
project_name="proj",
task_name="modeling",
task_type="Modeling",
host_name="maya",
product_type="",
variant="Main",
)
== ""
)
mock_get_tmpl.assert_not_called()
mock_format.assert_not_called()
mock_prepare.assert_not_called()
@patch("ayon_core.pipeline.create.product_name.get_product_name_template")
@patch("ayon_core.pipeline.create.product_name."
"StringTemplate.format_strict_template")
@patch("ayon_core.pipeline.create.product_name.prepare_template_data")
def test_happy_path(
self, mock_prepare, mock_format, mock_get_tmpl
):
mock_get_tmpl.return_value = "{task[name]}_{product[type]}_{variant}"
mock_prepare.return_value = {
"task": {"name": "modeling"},
"product": {"type": "model"},
"variant": "Main",
"family": "model",
}
mock_format.return_value = "modeling_model_Main"
result = get_product_name(
project_name="proj",
task_name="modeling",
task_type="Modeling",
host_name="maya",
product_type="model",
variant="Main",
)
assert result == "modeling_model_Main"
mock_get_tmpl.assert_called_once()
mock_prepare.assert_called_once()
mock_format.assert_called_once()
@patch("ayon_core.pipeline.create.product_name.get_product_name_template")
@patch("ayon_core.pipeline.create.product_name."
"StringTemplate.format_strict_template")
@patch("ayon_core.pipeline.create.product_name.prepare_template_data")
def test_product_name_with_base_type(
self, mock_prepare, mock_format, mock_get_tmpl
):
mock_get_tmpl.return_value = (
"{task[name]}_{product[basetype]}_{variant}"
)
mock_prepare.return_value = {
"task": {"name": "modeling"},
"product": {"type": "model"},
"variant": "Main",
"family": "model",
}
mock_format.return_value = "modeling_modelBase_Main"
result = get_product_name(
project_name="proj",
task_name="modeling",
task_type="Modeling",
host_name="maya",
product_type="model",
product_base_type="modelBase",
variant="Main",
)
assert result == "modeling_modelBase_Main"
mock_get_tmpl.assert_called_once()
mock_prepare.assert_called_once()
mock_format.assert_called_once()
@patch("ayon_core.pipeline.create.product_name.get_product_name_template")
def test_task_required_but_missing_raises(self, mock_get_tmpl):
mock_get_tmpl.return_value = "{task[name]}_{variant}"
with pytest.raises(TaskNotSetError):
get_product_name(
project_name="proj",
task_name="",
task_type="Modeling",
host_name="maya",
product_type="model",
variant="Main",
)
@patch("ayon_core.pipeline.create.product_name.get_product_name_template")
@patch("ayon_core.pipeline.create.product_name.ayon_api.get_project")
@patch("ayon_core.pipeline.create.product_name.StringTemplate."
"format_strict_template")
@patch("ayon_core.pipeline.create.product_name.prepare_template_data")
def test_task_short_name_is_used(
self, mock_prepare, mock_format, mock_get_project, mock_get_tmpl
):
mock_get_tmpl.return_value = "{task[short]}_{variant}"
mock_get_project.return_value = {
"taskTypes": [{"name": "Modeling", "shortName": "mdl"}]
}
mock_prepare.return_value = {
"task": {
"short": "mdl"
},
"variant": "Main"
}
mock_format.return_value = "mdl_Main"
result = get_product_name(
project_name="proj",
task_name="modeling",
task_type="Modeling",
host_name="maya",
product_type="model",
variant="Main",
)
assert result == "mdl_Main"
@patch("ayon_core.pipeline.create.product_name.get_product_name_template")
@patch("ayon_core.pipeline.create.product_name.StringTemplate."
"format_strict_template")
@patch("ayon_core.pipeline.create.product_name.prepare_template_data")
def test_template_fill_error_translated(
self, mock_prepare, mock_format, mock_get_tmpl
):
mock_get_tmpl.return_value = "{missing_key}_{variant}"
mock_prepare.return_value = {"variant": "Main"}
mock_format.side_effect = KeyError("missing_key")
with pytest.raises(TemplateFillError):
get_product_name(
project_name="proj",
task_name="modeling",
task_type="Modeling",
host_name="maya",
product_type="model",
variant="Main",
)
@patch("ayon_core.pipeline.create.product_name.warn")
@patch("ayon_core.pipeline.create.product_name.get_product_name_template")
@patch("ayon_core.pipeline.create.product_name."
"StringTemplate.format_strict_template")
@patch("ayon_core.pipeline.create.product_name.prepare_template_data")
def test_warns_when_template_needs_base_type_but_missing(
self,
mock_prepare,
mock_format,
mock_get_tmpl,
mock_warn,
):
mock_get_tmpl.return_value = "{product[basetype]}_{variant}"
mock_prepare.return_value = {
"product": {"type": "model"},
"variant": "Main",
"family": "model",
}
mock_format.return_value = "asset_Main"
_ = get_product_name(
project_name="proj",
task_name="modeling",
task_type="Modeling",
host_name="maya",
product_type="model",
variant="Main",
)
mock_warn.assert_called_once()
@patch("ayon_core.pipeline.create.product_name.get_product_name_template")
@patch("ayon_core.pipeline.create.product_name."
"StringTemplate.format_strict_template")
@patch("ayon_core.pipeline.create.product_name.prepare_template_data")
def test_dynamic_data_overrides_defaults(
self, mock_prepare, mock_format, mock_get_tmpl
):
mock_get_tmpl.return_value = "{custom}_{variant}"
mock_prepare.return_value = {"custom": "overridden", "variant": "Main"}
mock_format.return_value = "overridden_Main"
result = get_product_name(
project_name="proj",
task_name="modeling",
task_type="Modeling",
host_name="maya",
product_type="model",
variant="Main",
dynamic_data={"custom": "overridden"},
)
assert result == "overridden_Main"
@patch("ayon_core.pipeline.create.product_name.get_product_name_template")
def test_product_type_filter_is_used(self, mock_get_tmpl):
mock_get_tmpl.return_value = DEFAULT_PRODUCT_TEMPLATE
_ = get_product_name(
project_name="proj",
task_name="modeling",
task_type="Modeling",
host_name="maya",
product_type="model",
variant="Main",
product_type_filter="look",
)
args, kwargs = mock_get_tmpl.call_args
assert kwargs["product_type"] == "look"