Merge branch 'develop' into fix_missing_published_workfiles_details_in_sidepanel

This commit is contained in:
Jakub Trllo 2025-11-07 16:07:12 +01:00 committed by GitHub
commit 614ecfbc58
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
20 changed files with 820 additions and 346 deletions

View file

@ -604,7 +604,11 @@ class EnumDef(AbstractAttrDef):
if value is None:
return copy.deepcopy(self.default)
return list(self._item_values.intersection(value))
return [
v
for v in value
if v in self._item_values
]
def is_value_valid(self, value: Any) -> bool:
"""Check if item is available in possible values."""

View file

@ -110,6 +110,15 @@ def deprecated(new_destination):
return _decorator(func)
class MissingRGBAChannelsError(ValueError):
"""Raised when we can't find channels to use as RGBA for conversion in
input media.
This may be other channels than solely RGBA, like Z-channel. The error is
raised when no matching 'reviewable' channel was found.
"""
def get_transcode_temp_directory():
"""Creates temporary folder for transcoding.
@ -388,6 +397,10 @@ def get_review_info_by_layer_name(channel_names):
...
]
This tries to find suitable outputs good for review purposes, by
searching for channel names like RGBA, but also XYZ, Z, N, AR, AG, AB
channels.
Args:
channel_names (list[str]): List of channel names.
@ -396,7 +409,6 @@ def get_review_info_by_layer_name(channel_names):
"""
layer_names_order = []
rgba_by_layer_name = collections.defaultdict(dict)
channels_by_layer_name = collections.defaultdict(dict)
for channel_name in channel_names:
@ -405,45 +417,95 @@ def get_review_info_by_layer_name(channel_names):
if "." in channel_name:
layer_name, last_part = channel_name.rsplit(".", 1)
channels_by_layer_name[layer_name][channel_name] = last_part
if last_part.lower() not in {
"r", "red",
"g", "green",
"b", "blue",
"a", "alpha"
# R, G, B, A or X, Y, Z, N, AR, AG, AB, RED, GREEN, BLUE, ALPHA
channel = last_part.upper()
if channel not in {
# Detect RGBA channels
"R", "G", "B", "A",
# Support fully written out rgba channel names
"RED", "GREEN", "BLUE", "ALPHA",
# Allow detecting of x, y and z channels, and normal channels
"X", "Y", "Z", "N",
# red, green and blue alpha/opacity, for colored mattes
"AR", "AG", "AB"
}:
continue
if layer_name not in layer_names_order:
layer_names_order.append(layer_name)
# R, G, B or A
channel = last_part[0].upper()
rgba_by_layer_name[layer_name][channel] = channel_name
channels_by_layer_name[layer_name][channel] = channel_name
# Put empty layer or 'rgba' to the beginning of the list
# - if input has R, G, B, A channels they should be used for review
# NOTE They are iterated in reversed order because they're inserted to
# the beginning of 'layer_names_order' -> last added will be first.
for name in reversed(["", "rgba"]):
if name in layer_names_order:
layer_names_order.remove(name)
layer_names_order.insert(0, name)
def _sort(_layer_name: str) -> int:
# Prioritize "" layer name
# Prioritize layers with RGB channels
if _layer_name == "rgba":
return 0
if _layer_name == "":
return 1
channels = channels_by_layer_name[_layer_name]
if all(channel in channels for channel in "RGB"):
return 2
return 10
layer_names_order.sort(key=_sort)
output = []
for layer_name in layer_names_order:
rgba_layer_info = rgba_by_layer_name[layer_name]
red = rgba_layer_info.get("R")
green = rgba_layer_info.get("G")
blue = rgba_layer_info.get("B")
if not red or not green or not blue:
channel_info = channels_by_layer_name[layer_name]
alpha = channel_info.get("A")
# RGB channels
if all(channel in channel_info for channel in "RGB"):
rgb = "R", "G", "B"
# RGB channels using fully written out channel names
elif all(
channel in channel_info
for channel in ("RED", "GREEN", "BLUE")
):
rgb = "RED", "GREEN", "BLUE"
alpha = channel_info.get("ALPHA")
# XYZ channels (position pass)
elif all(channel in channel_info for channel in "XYZ"):
rgb = "X", "Y", "Z"
# Colored mattes (as defined in OpenEXR Channel Name standards)
elif all(channel in channel_info for channel in ("AR", "AG", "AB")):
rgb = "AR", "AG", "AB"
# Luminance channel (as defined in OpenEXR Channel Name standards)
elif "Y" in channel_info:
rgb = "Y", "Y", "Y"
# Has only Z channel (Z-depth layer)
elif "Z" in channel_info:
rgb = "Z", "Z", "Z"
# Has only A channel (Alpha layer)
elif "A" in channel_info:
rgb = "A", "A", "A"
alpha = None
else:
# No reviewable channels found
continue
red = channel_info[rgb[0]]
green = channel_info[rgb[1]]
blue = channel_info[rgb[2]]
output.append({
"name": layer_name,
"review_channels": {
"R": red,
"G": green,
"B": blue,
"A": rgba_layer_info.get("A"),
"A": alpha,
}
})
return output
@ -1467,8 +1529,9 @@ def get_oiio_input_and_channel_args(oiio_input_info, alpha_default=None):
review_channels = get_convert_rgb_channels(channel_names)
if review_channels is None:
raise ValueError(
"Couldn't find channels that can be used for conversion."
raise MissingRGBAChannelsError(
"Couldn't find channels that can be used for conversion "
f"among channels: {channel_names}."
)
red, green, blue, alpha = review_channels

View file

@ -137,6 +137,7 @@ class AttributeValues:
if value is None:
continue
converted_value = attr_def.convert_value(value)
# QUESTION Could we just use converted value all the time?
if converted_value == value:
self._data[attr_def.key] = value
@ -245,11 +246,11 @@ class AttributeValues:
def _update(self, value):
changes = {}
for key, value in dict(value).items():
if key in self._data and self._data.get(key) == value:
for key, key_value in dict(value).items():
if key in self._data and self._data.get(key) == key_value:
continue
self._data[key] = value
changes[key] = value
self._data[key] = key_value
changes[key] = key_value
return changes
def _pop(self, key, default):

View file

@ -1045,7 +1045,9 @@ def get_resources(project_name, version_entity, extension=None):
filtered.append(repre_entity)
representation = filtered[0]
directory = get_representation_path(representation)
directory = get_representation_path(
project_name, representation
)
print("Source: ", directory)
resources = sorted(
[

View file

@ -25,8 +25,8 @@ from .utils import (
get_loader_identifier,
get_loaders_by_name,
get_representation_path_from_context,
get_representation_path,
get_representation_path_from_context,
get_representation_path_with_anatomy,
is_compatible_loader,
@ -85,8 +85,8 @@ __all__ = (
"get_loader_identifier",
"get_loaders_by_name",
"get_representation_path_from_context",
"get_representation_path",
"get_representation_path_from_context",
"get_representation_path_with_anatomy",
"is_compatible_loader",

View file

@ -1,11 +1,15 @@
from __future__ import annotations
import os
import uuid
import platform
import warnings
import logging
import inspect
import collections
import numbers
from typing import Optional, Union, Any
import copy
from functools import wraps
from typing import Optional, Union, Any, overload
import ayon_api
@ -14,9 +18,8 @@ from ayon_core.lib import (
StringTemplate,
TemplateUnsolved,
)
from ayon_core.pipeline import (
Anatomy,
)
from ayon_core.lib.path_templates import TemplateResult
from ayon_core.pipeline import Anatomy
log = logging.getLogger(__name__)
@ -644,15 +647,15 @@ def get_representation_path_from_context(context):
representation = context["representation"]
project_entity = context.get("project")
root = None
if (
project_entity
and project_entity["name"] != get_current_project_name()
):
anatomy = Anatomy(project_entity["name"])
root = anatomy.roots
return get_representation_path(representation, root)
if project_entity:
project_name = project_entity["name"]
else:
project_name = get_current_project_name()
return get_representation_path(
project_name,
representation,
project_entity=project_entity,
)
def get_representation_path_with_anatomy(repre_entity, anatomy):
@ -671,139 +674,248 @@ def get_representation_path_with_anatomy(repre_entity, anatomy):
anatomy (Anatomy): Project anatomy object.
Returns:
Union[None, TemplateResult]: None if path can't be received
TemplateResult: Resolved representation path.
Raises:
InvalidRepresentationContext: When representation data are probably
invalid or not available.
"""
return get_representation_path(
anatomy.project_name,
repre_entity,
anatomy=anatomy,
)
def get_representation_path_with_roots(
representation: dict[str, Any],
roots: dict[str, str],
) -> Optional[TemplateResult]:
"""Get filename from representation with custom root.
Args:
representation(dict): Representation entity.
roots (dict[str, str]): Roots to use.
Returns:
Optional[TemplateResult]: Resolved representation path.
"""
try:
template = representation["attrib"]["template"]
except KeyError:
return None
try:
context = representation["context"]
_fix_representation_context_compatibility(context)
context["root"] = roots
path = StringTemplate.format_strict_template(
template, context
)
except (TemplateUnsolved, KeyError):
# Template references unavailable data
return None
return path.normalized()
def _backwards_compatibility_repre_path(func):
"""Wrapper handling backwards compatibility of 'get_representation_path'.
Allows 'get_representation_path' to support old and new signatures of the
function. The old signature supported passing in representation entity
and optional roots. The new signature requires the project name
to be passed. In case custom roots should be used, a dedicated function
'get_representation_path_with_roots' is available.
The wrapper handles passed arguments, and based on kwargs and types
of the arguments will call the function which relates to
the arguments.
The function is also marked with an attribute 'version' so other addons
can check if the function is using the new signature or is using
the old signature. That should allow addons to adapt to new signature.
>>> if getattr(get_representation_path, "version", None) == 2:
>>> path = get_representation_path(project_name, repre_entity)
>>> else:
>>> path = get_representation_path(repre_entity)
The plan to remove backwards compatibility is 1.1.2026.
"""
# Add an attribute to the function so addons can check if the new variant
# of the function is available.
# >>> getattr(get_representation_path, "version", None) == 2
# >>> True
setattr(func, "version", 2)
@wraps(func)
def inner(*args, **kwargs):
from ayon_core.pipeline import get_current_project_name
# Decide which variant of the function based on passed arguments
# will be used.
if args:
arg_1 = args[0]
if isinstance(arg_1, str):
return func(*args, **kwargs)
elif "project_name" in kwargs:
return func(*args, **kwargs)
warnings.warn(
(
"Used deprecated variant of 'get_representation_path'."
" Please change used arguments signature to follow"
" new definition. Will be removed 1.1.2026."
),
DeprecationWarning,
stacklevel=2,
)
# Find out which arguments were passed
if args:
representation = args[0]
else:
representation = kwargs.get("representation")
if len(args) > 1:
roots = args[1]
else:
roots = kwargs.get("root")
if roots is not None:
return get_representation_path_with_roots(
representation, roots
)
project_name = (
representation["context"].get("project", {}).get("name")
)
if project_name is None:
project_name = get_current_project_name()
return func(project_name, representation)
return inner
@overload
def get_representation_path(
representation: dict[str, Any],
root: Optional[dict[str, Any]] = None,
) -> TemplateResult:
"""DEPRECATED Get filled representation path.
Use 'get_representation_path' using the new function signature.
Args:
representation (dict[str, Any]): Representation entity.
root (Optional[dict[str, Any]): Roots to fill the path.
Returns:
TemplateResult: Resolved path to representation.
Raises:
InvalidRepresentationContext: When representation data are probably
invalid or not available.
"""
pass
@overload
def get_representation_path(
project_name: str,
repre_entity: dict[str, Any],
*,
anatomy: Optional[Anatomy] = None,
project_entity: Optional[dict[str, Any]] = None,
) -> TemplateResult:
"""Get filled representation path.
Args:
project_name (str): Project name.
repre_entity (dict[str, Any]): Representation entity.
anatomy (Optional[Anatomy]): Project anatomy.
project_entity (Optional[dict[str, Any]): Project entity. Is used to
initialize Anatomy and is not needed if 'anatomy' is passed in.
Returns:
TemplateResult: Resolved path to representation.
Raises:
InvalidRepresentationContext: When representation data are probably
invalid or not available.
"""
pass
@_backwards_compatibility_repre_path
def get_representation_path(
project_name: str,
repre_entity: dict[str, Any],
*,
anatomy: Optional[Anatomy] = None,
project_entity: Optional[dict[str, Any]] = None,
) -> TemplateResult:
"""Get filled representation path.
Args:
project_name (str): Project name.
repre_entity (dict[str, Any]): Representation entity.
anatomy (Optional[Anatomy]): Project anatomy.
project_entity (Optional[dict[str, Any]): Project entity. Is used to
initialize Anatomy and is not needed if 'anatomy' is passed in.
Returns:
TemplateResult: Resolved path to representation.
Raises:
InvalidRepresentationContext: When representation data are probably
invalid or not available.
"""
if anatomy is None:
anatomy = Anatomy(project_name, project_entity=project_entity)
try:
template = repre_entity["attrib"]["template"]
except KeyError:
raise InvalidRepresentationContext((
"Representation document does not"
" contain template in data ('data.template')"
))
except KeyError as exc:
raise InvalidRepresentationContext(
"Failed to receive template from representation entity."
) from exc
try:
context = repre_entity["context"]
context = copy.deepcopy(repre_entity["context"])
_fix_representation_context_compatibility(context)
context["root"] = anatomy.roots
path = StringTemplate.format_strict_template(template, context)
except TemplateUnsolved as exc:
raise InvalidRepresentationContext((
"Couldn't resolve representation template with available data."
" Reason: {}".format(str(exc))
))
raise InvalidRepresentationContext(
"Failed to resolve representation template with available data."
) from exc
return path.normalized()
def get_representation_path(representation, root=None):
"""Get filename from representation document
There are three ways of getting the path from representation which are
tried in following sequence until successful.
1. Get template from representation['data']['template'] and data from
representation['context']. Then format template with the data.
2. Get template from project['config'] and format it with default data set
3. Get representation['data']['path'] and use it directly
Args:
representation(dict): representation document from the database
Returns:
str: fullpath of the representation
"""
if root is None:
from ayon_core.pipeline import get_current_project_name, Anatomy
anatomy = Anatomy(get_current_project_name())
return get_representation_path_with_anatomy(
representation, anatomy
)
def path_from_representation():
try:
template = representation["attrib"]["template"]
except KeyError:
return None
try:
context = representation["context"]
_fix_representation_context_compatibility(context)
context["root"] = root
path = StringTemplate.format_strict_template(
template, context
)
# Force replacing backslashes with forward slashed if not on
# windows
if platform.system().lower() != "windows":
path = path.replace("\\", "/")
except (TemplateUnsolved, KeyError):
# Template references unavailable data
return None
if not path:
return path
normalized_path = os.path.normpath(path)
if os.path.exists(normalized_path):
return normalized_path
return path
def path_from_data():
if "path" not in representation["attrib"]:
return None
path = representation["attrib"]["path"]
# Force replacing backslashes with forward slashed if not on
# windows
if platform.system().lower() != "windows":
path = path.replace("\\", "/")
if os.path.exists(path):
return os.path.normpath(path)
dir_path, file_name = os.path.split(path)
if not os.path.exists(dir_path):
return None
base_name, ext = os.path.splitext(file_name)
file_name_items = None
if "#" in base_name:
file_name_items = [part for part in base_name.split("#") if part]
elif "%" in base_name:
file_name_items = base_name.split("%")
if not file_name_items:
return None
filename_start = file_name_items[0]
for _file in os.listdir(dir_path):
if _file.startswith(filename_start) and _file.endswith(ext):
return os.path.normpath(path)
return (
path_from_representation() or path_from_data()
)
def get_representation_path_by_names(
project_name: str,
folder_path: str,
product_name: str,
version_name: str,
representation_name: str,
anatomy: Optional[Anatomy] = None) -> Optional[str]:
project_name: str,
folder_path: str,
product_name: str,
version_name: Union[int, str],
representation_name: str,
anatomy: Optional[Anatomy] = None
) -> Optional[TemplateResult]:
"""Get (latest) filepath for representation for folder and product.
See `get_representation_by_names` for more details.
@ -820,22 +932,21 @@ def get_representation_path_by_names(
representation_name
)
if not representation:
return
return None
if not anatomy:
anatomy = Anatomy(project_name)
if representation:
path = get_representation_path_with_anatomy(representation, anatomy)
return str(path).replace("\\", "/")
return get_representation_path(
project_name,
representation,
anatomy=anatomy,
)
def get_representation_by_names(
project_name: str,
folder_path: str,
product_name: str,
version_name: Union[int, str],
representation_name: str,
project_name: str,
folder_path: str,
product_name: str,
version_name: Union[int, str],
representation_name: str,
) -> Optional[dict]:
"""Get representation entity for asset and subset.
@ -852,7 +963,7 @@ def get_representation_by_names(
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path, fields=["id"])
if not folder_entity:
return
return None
if isinstance(product_name, dict) and "name" in product_name:
# Allow explicitly passing subset document
@ -864,7 +975,7 @@ def get_representation_by_names(
folder_id=folder_entity["id"],
fields=["id"])
if not product_entity:
return
return None
if version_name == "hero":
version_entity = ayon_api.get_hero_version_by_product_id(
@ -876,7 +987,7 @@ def get_representation_by_names(
version_entity = ayon_api.get_version_by_name(
project_name, version_name, product_id=product_entity["id"])
if not version_entity:
return
return None
return ayon_api.get_representation_by_name(
project_name, representation_name, version_id=version_entity["id"])

View file

@ -52,7 +52,7 @@ class CollectAudio(pyblish.api.ContextPlugin):
context, self.__class__
):
# Skip instances that already have audio filled
if instance.data.get("audio"):
if "audio" in instance.data:
self.log.debug(
"Skipping Audio collection. It is already collected"
)

View file

@ -11,6 +11,7 @@ from ayon_core.lib import (
is_oiio_supported,
)
from ayon_core.lib.transcoding import (
MissingRGBAChannelsError,
oiio_color_convert,
)
@ -111,7 +112,17 @@ class ExtractOIIOTranscode(publish.Extractor):
self.log.warning("Config file doesn't exist, skipping")
continue
# Get representation files to convert
if isinstance(repre["files"], list):
repre_files_to_convert = copy.deepcopy(repre["files"])
else:
repre_files_to_convert = [repre["files"]]
# Process each output definition
for output_def in profile_output_defs:
# Local copy to avoid accidental mutable changes
files_to_convert = list(repre_files_to_convert)
output_name = output_def["name"]
new_repre = copy.deepcopy(repre)
@ -122,11 +133,6 @@ class ExtractOIIOTranscode(publish.Extractor):
)
new_repre["stagingDir"] = new_staging_dir
if isinstance(new_repre["files"], list):
files_to_convert = copy.deepcopy(new_repre["files"])
else:
files_to_convert = [new_repre["files"]]
output_extension = output_def["extension"]
output_extension = output_extension.replace('.', '')
self._rename_in_representation(new_repre,
@ -168,30 +174,49 @@ class ExtractOIIOTranscode(publish.Extractor):
additional_command_args = (output_def["oiiotool_args"]
["additional_command_args"])
files_to_convert = self._translate_to_sequence(
files_to_convert)
self.log.debug("Files to convert: {}".format(files_to_convert))
for file_name in files_to_convert:
sequence_files = self._translate_to_sequence(files_to_convert)
self.log.debug("Files to convert: {}".format(sequence_files))
missing_rgba_review_channels = False
for file_name in sequence_files:
if isinstance(file_name, clique.Collection):
# Convert to filepath that can be directly converted
# by oiio like `frame.1001-1025%04d.exr`
file_name: str = file_name.format(
"{head}{range}{padding}{tail}"
)
self.log.debug("Transcoding file: `{}`".format(file_name))
input_path = os.path.join(original_staging_dir,
file_name)
output_path = self._get_output_file_path(input_path,
new_staging_dir,
output_extension)
try:
oiio_color_convert(
input_path=input_path,
output_path=output_path,
config_path=config_path,
source_colorspace=source_colorspace,
target_colorspace=target_colorspace,
target_display=target_display,
target_view=target_view,
source_display=source_display,
source_view=source_view,
additional_command_args=additional_command_args,
logger=self.log
)
except MissingRGBAChannelsError as exc:
missing_rgba_review_channels = True
self.log.error(exc)
self.log.error(
"Skipping OIIO Transcode. Unknown RGBA channels"
f" for colorspace conversion in file: {input_path}"
)
break
oiio_color_convert(
input_path=input_path,
output_path=output_path,
config_path=config_path,
source_colorspace=source_colorspace,
target_colorspace=target_colorspace,
target_display=target_display,
target_view=target_view,
source_display=source_display,
source_view=source_view,
additional_command_args=additional_command_args,
logger=self.log
)
if missing_rgba_review_channels:
# Stop processing this representation
break
# cleanup temporary transcoded files
for file_name in new_repre["files"]:
@ -217,11 +242,11 @@ class ExtractOIIOTranscode(publish.Extractor):
added_review = True
# If there is only 1 file outputted then convert list to
# string, cause that'll indicate that its not a sequence.
# string, because that'll indicate that it is not a sequence.
if len(new_repre["files"]) == 1:
new_repre["files"] = new_repre["files"][0]
# If the source representation has "review" tag, but its not
# If the source representation has "review" tag, but it's not
# part of the output definition tags, then both the
# representations will be transcoded in ExtractReview and
# their outputs will clash in integration.
@ -271,42 +296,34 @@ class ExtractOIIOTranscode(publish.Extractor):
new_repre["files"] = renamed_files
def _translate_to_sequence(self, files_to_convert):
"""Returns original list or list with filename formatted in single
sequence format.
"""Returns original list or a clique.Collection of a sequence.
Uses clique to find frame sequence, in this case it merges all frames
into sequence format (FRAMESTART-FRAMEEND#) and returns it.
If sequence not found, it returns original list
Uses clique to find frame sequence Collection.
If sequence not found, it returns original list.
Args:
files_to_convert (list): list of file names
Returns:
(list) of [file.1001-1010#.exr] or [fileA.exr, fileB.exr]
list[str | clique.Collection]: List of filepaths or a list
of Collections (usually one, unless there are holes)
"""
pattern = [clique.PATTERNS["frames"]]
collections, _ = clique.assemble(
files_to_convert, patterns=pattern,
assume_padded_when_ambiguous=True)
if collections:
if len(collections) > 1:
raise ValueError(
"Too many collections {}".format(collections))
collection = collections[0]
frames = list(collection.indexes)
if collection.holes().indexes:
return files_to_convert
# Get the padding from the collection
# This is the number of digits used in the frame numbers
padding = collection.padding
frame_str = "{}-{}%0{}d".format(frames[0], frames[-1], padding)
file_name = "{}{}{}".format(collection.head, frame_str,
collection.tail)
files_to_convert = [file_name]
# TODO: Technically oiiotool supports holes in the sequence as well
# using the dedicated --frames argument to specify the frames.
# We may want to use that too so conversions of sequences with
# holes will perform faster as well.
# Separate the collection so that we have no holes/gaps per
# collection.
return collection.separate()
return files_to_convert

View file

@ -1,12 +1,83 @@
import collections
import hashlib
import os
import tempfile
import uuid
from pathlib import Path
import pyblish
from ayon_core.lib import get_ffmpeg_tool_args, run_subprocess
from ayon_core.lib import (
get_ffmpeg_tool_args,
run_subprocess
)
def get_audio_instances(context):
"""Return only instances which are having audio in families
Args:
context (pyblish.context): context of publisher
Returns:
list: list of selected instances
"""
audio_instances = []
for instance in context:
if not instance.data.get("parent_instance_id"):
continue
if (
instance.data["productType"] == "audio"
or instance.data.get("reviewAudio")
):
audio_instances.append(instance)
return audio_instances
def map_instances_by_parent_id(context):
"""Create a mapping of instances by their parent id
Args:
context (pyblish.context): context of publisher
Returns:
dict: mapping of instances by their parent id
"""
instances_by_parent_id = collections.defaultdict(list)
for instance in context:
parent_instance_id = instance.data.get("parent_instance_id")
if not parent_instance_id:
continue
instances_by_parent_id[parent_instance_id].append(instance)
return instances_by_parent_id
class CollectParentAudioInstanceAttribute(pyblish.api.ContextPlugin):
"""Collect audio instance attribute"""
order = pyblish.api.CollectorOrder
label = "Collect Audio Instance Attribute"
def process(self, context):
audio_instances = get_audio_instances(context)
# no need to continue if no audio instances found
if not audio_instances:
return
# create mapped instances by parent id
instances_by_parent_id = map_instances_by_parent_id(context)
# distribute audio related attribute
for audio_instance in audio_instances:
parent_instance_id = audio_instance.data["parent_instance_id"]
for sibl_instance in instances_by_parent_id[parent_instance_id]:
# exclude the same audio instance
if sibl_instance.id == audio_instance.id:
continue
self.log.info(
"Adding audio to Sibling instance: "
f"{sibl_instance.data['label']}"
)
sibl_instance.data["audio"] = None
class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
@ -19,7 +90,8 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
order = pyblish.api.ExtractorOrder - 0.44
label = "Extract OTIO Audio Tracks"
hosts = ["hiero", "resolve", "flame"]
temp_dir_path = None
def process(self, context):
"""Convert otio audio track's content to audio representations
@ -28,13 +100,14 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
context (pyblish.Context): context of publisher
"""
# split the long audio file to peces devided by isntances
audio_instances = self.get_audio_instances(context)
self.log.debug("Audio instances: {}".format(len(audio_instances)))
audio_instances = get_audio_instances(context)
if len(audio_instances) < 1:
self.log.info("No audio instances available")
# no need to continue if no audio instances found
if not audio_instances:
return
self.log.debug("Audio instances: {}".format(len(audio_instances)))
# get sequence
otio_timeline = context.data["otioTimeline"]
@ -44,8 +117,8 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
if not audio_inputs:
return
# temp file
audio_temp_fpath = self.create_temp_file("audio")
# Convert all available audio into single file for trimming
audio_temp_fpath = self.create_temp_file("timeline_audio_track")
# create empty audio with longest duration
empty = self.create_empty(audio_inputs)
@ -59,19 +132,25 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
# remove empty
os.remove(empty["mediaPath"])
# create mapped instances by parent id
instances_by_parent_id = map_instances_by_parent_id(context)
# cut instance framerange and add to representations
self.add_audio_to_instances(audio_temp_fpath, audio_instances)
self.add_audio_to_instances(
audio_temp_fpath, audio_instances, instances_by_parent_id)
# remove full mixed audio file
os.remove(audio_temp_fpath)
def add_audio_to_instances(self, audio_file, instances):
def add_audio_to_instances(
self, audio_file, audio_instances, instances_by_parent_id):
created_files = []
for inst in instances:
name = inst.data["folderPath"]
for audio_instance in audio_instances:
folder_path = audio_instance.data["folderPath"]
file_suffix = folder_path.replace("/", "-")
recycling_file = [f for f in created_files if name in f]
audio_clip = inst.data["otioClip"]
recycling_file = [f for f in created_files if file_suffix in f]
audio_clip = audio_instance.data["otioClip"]
audio_range = audio_clip.range_in_parent()
duration = audio_range.duration.to_frames()
@ -84,68 +163,70 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
start_sec = relative_start_time.to_seconds()
duration_sec = audio_range.duration.to_seconds()
# temp audio file
audio_fpath = self.create_temp_file(name)
# shot related audio file
shot_audio_fpath = self.create_temp_file(file_suffix)
cmd = get_ffmpeg_tool_args(
"ffmpeg",
"-ss", str(start_sec),
"-t", str(duration_sec),
"-i", audio_file,
audio_fpath
shot_audio_fpath
)
# run subprocess
self.log.debug("Executing: {}".format(" ".join(cmd)))
run_subprocess(cmd, logger=self.log)
else:
audio_fpath = recycling_file.pop()
if "audio" in (
inst.data["families"] + [inst.data["productType"]]
):
# add generated audio file to created files for recycling
if shot_audio_fpath not in created_files:
created_files.append(shot_audio_fpath)
else:
shot_audio_fpath = recycling_file.pop()
# audio file needs to be published as representation
if audio_instance.data["productType"] == "audio":
# create empty representation attr
if "representations" not in inst.data:
inst.data["representations"] = []
if "representations" not in audio_instance.data:
audio_instance.data["representations"] = []
# add to representations
inst.data["representations"].append({
"files": os.path.basename(audio_fpath),
audio_instance.data["representations"].append({
"files": os.path.basename(shot_audio_fpath),
"name": "wav",
"ext": "wav",
"stagingDir": os.path.dirname(audio_fpath),
"stagingDir": os.path.dirname(shot_audio_fpath),
"frameStart": 0,
"frameEnd": duration
})
elif "reviewAudio" in inst.data.keys():
audio_attr = inst.data.get("audio") or []
# audio file needs to be reviewable too
elif "reviewAudio" in audio_instance.data.keys():
audio_attr = audio_instance.data.get("audio") or []
audio_attr.append({
"filename": audio_fpath,
"filename": shot_audio_fpath,
"offset": 0
})
inst.data["audio"] = audio_attr
audio_instance.data["audio"] = audio_attr
# add generated audio file to created files for recycling
if audio_fpath not in created_files:
created_files.append(audio_fpath)
def get_audio_instances(self, context):
"""Return only instances which are having audio in families
Args:
context (pyblish.context): context of publisher
Returns:
list: list of selected instances
"""
return [
_i for _i in context
# filter only those with audio product type or family
# and also with reviewAudio data key
if bool("audio" in (
_i.data.get("families", []) + [_i.data["productType"]])
) or _i.data.get("reviewAudio")
]
# Make sure if the audio instance is having siblink instances
# which needs audio for reviewable media so it is also added
# to its instance data
# Retrieve instance data from parent instance shot instance.
parent_instance_id = audio_instance.data["parent_instance_id"]
for sibl_instance in instances_by_parent_id[parent_instance_id]:
# exclude the same audio instance
if sibl_instance.id == audio_instance.id:
continue
self.log.info(
"Adding audio to Sibling instance: "
f"{sibl_instance.data['label']}"
)
audio_attr = sibl_instance.data.get("audio") or []
audio_attr.append({
"filename": shot_audio_fpath,
"offset": 0
})
sibl_instance.data["audio"] = audio_attr
def get_audio_track_items(self, otio_timeline):
"""Get all audio clips form OTIO audio tracks
@ -321,19 +402,23 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
os.remove(filters_tmp_filepath)
def create_temp_file(self, name):
def create_temp_file(self, file_suffix):
"""Create temp wav file
Args:
name (str): name to be used in file name
file_suffix (str): name to be used in file name
Returns:
str: temp fpath
"""
name = name.replace("/", "_")
return os.path.normpath(
tempfile.mktemp(
prefix="pyblish_tmp_{}_".format(name),
suffix=".wav"
)
)
extension = ".wav"
# get 8 characters
hash = hashlib.md5(str(uuid.uuid4()).encode()).hexdigest()[:8]
file_name = f"{hash}_{file_suffix}{extension}"
if not self.temp_dir_path:
audio_temp_dir_path = tempfile.mkdtemp(prefix="AYON_audio_")
self.temp_dir_path = Path(audio_temp_dir_path)
self.temp_dir_path.mkdir(parents=True, exist_ok=True)
return (self.temp_dir_path / file_name).as_posix()

View file

@ -361,14 +361,14 @@ class ExtractReview(pyblish.api.InstancePlugin):
if not filtered_output_defs:
self.log.debug((
"Repre: {} - All output definitions were filtered"
" out by single frame filter. Skipping"
" out by single frame filter. Skipped."
).format(repre["name"]))
continue
# Skip if file is not set
if first_input_path is None:
self.log.warning((
"Representation \"{}\" have empty files. Skipped."
"Representation \"{}\" has empty files. Skipped."
).format(repre["name"]))
continue

View file

@ -17,6 +17,7 @@ from ayon_core.lib import (
run_subprocess,
)
from ayon_core.lib.transcoding import (
MissingRGBAChannelsError,
oiio_color_convert,
get_oiio_input_and_channel_args,
get_oiio_info_for_input,
@ -477,7 +478,16 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
return False
input_info = get_oiio_info_for_input(src_path, logger=self.log)
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
try:
input_arg, channels_arg = get_oiio_input_and_channel_args(
input_info
)
except MissingRGBAChannelsError:
self.log.debug(
"Unable to find relevant reviewable channel for thumbnail "
"creation"
)
return False
oiio_cmd = get_oiio_tool_args(
"oiiotool",
input_arg, src_path,

View file

@ -1,6 +1,7 @@
from operator import attrgetter
import dataclasses
import os
import platform
from typing import Any, Dict, List
import pyblish.api
@ -179,6 +180,8 @@ def get_instance_uri_path(
# Ensure `None` for now is also a string
path = str(path)
if platform.system().lower() == "windows":
path = path.replace("\\", "/")
return path

View file

@ -1,5 +1,6 @@
import logging
import re
import copy
from typing import (
Union,
List,
@ -1098,7 +1099,7 @@ class CreateModel:
creator_attributes[key] = attr_def.default
elif attr_def.is_value_valid(value):
creator_attributes[key] = value
creator_attributes[key] = copy.deepcopy(value)
def _set_instances_publish_attr_values(
self, instance_ids, plugin_name, key, value

View file

@ -678,13 +678,8 @@ class PublisherWindow(QtWidgets.QDialog):
self._help_dialog.show()
window = self.window()
if hasattr(QtWidgets.QApplication, "desktop"):
desktop = QtWidgets.QApplication.desktop()
screen_idx = desktop.screenNumber(window)
screen_geo = desktop.screenGeometry(screen_idx)
else:
screen = window.screen()
screen_geo = screen.geometry()
screen = window.screen()
screen_geo = screen.geometry()
window_geo = window.geometry()
dialog_x = window_geo.x() + window_geo.width()

View file

@ -41,6 +41,7 @@ class PushToContextController:
self._process_item_id = None
self._use_original_name = False
self._version_up = False
self.set_source(project_name, version_ids)
@ -212,7 +213,7 @@ class PushToContextController:
self._user_values.variant,
comment=self._user_values.comment,
new_folder_name=self._user_values.new_folder_name,
dst_version=1,
version_up=self._version_up,
use_original_name=self._use_original_name,
)
item_ids.append(item_id)
@ -229,6 +230,9 @@ class PushToContextController:
thread.start()
return item_ids
def set_version_up(self, state):
self._version_up = state
def wait_for_process_thread(self):
if self._process_thread is None:
return

View file

@ -89,7 +89,7 @@ class ProjectPushItem:
variant,
comment,
new_folder_name,
dst_version,
version_up,
item_id=None,
use_original_name=False
):
@ -100,7 +100,7 @@ class ProjectPushItem:
self.dst_project_name = dst_project_name
self.dst_folder_id = dst_folder_id
self.dst_task_name = dst_task_name
self.dst_version = dst_version
self.version_up = version_up
self.variant = variant
self.new_folder_name = new_folder_name
self.comment = comment or ""
@ -118,7 +118,7 @@ class ProjectPushItem:
str(self.dst_folder_id),
str(self.new_folder_name),
str(self.dst_task_name),
str(self.dst_version),
str(self.version_up),
self.use_original_name
])
return self._repr_value
@ -133,7 +133,7 @@ class ProjectPushItem:
"dst_project_name": self.dst_project_name,
"dst_folder_id": self.dst_folder_id,
"dst_task_name": self.dst_task_name,
"dst_version": self.dst_version,
"version_up": self.version_up,
"variant": self.variant,
"comment": self.comment,
"new_folder_name": self.new_folder_name,
@ -948,10 +948,22 @@ class ProjectPushItemProcess:
self._product_entity = product_entity
return product_entity
src_attrib = self._src_product_entity["attrib"]
dst_attrib = {}
for key in {
"description",
"productGroup",
}:
value = src_attrib.get(key)
if value:
dst_attrib[key] = value
product_entity = new_product_entity(
product_name,
product_type,
folder_id,
attribs=dst_attrib
)
self._operations.create_entity(
project_name, "product", product_entity
@ -962,7 +974,7 @@ class ProjectPushItemProcess:
"""Make sure version document exits in database."""
project_name = self._item.dst_project_name
version = self._item.dst_version
version_up = self._item.version_up
src_version_entity = self._src_version_entity
product_entity = self._product_entity
product_id = product_entity["id"]
@ -990,27 +1002,29 @@ class ProjectPushItemProcess:
"description",
"intent",
}:
if key in src_attrib:
dst_attrib[key] = src_attrib[key]
value = src_attrib.get(key)
if value:
dst_attrib[key] = value
if version is None:
last_version_entity = ayon_api.get_last_version_by_product_id(
project_name, product_id
last_version_entity = ayon_api.get_last_version_by_product_id(
project_name, product_id
)
if last_version_entity is None:
dst_version = get_versioning_start(
project_name,
self.host_name,
task_name=self._task_info.get("name"),
task_type=self._task_info.get("taskType"),
product_type=product_type,
product_name=product_entity["name"],
)
if last_version_entity:
version = int(last_version_entity["version"]) + 1
else:
version = get_versioning_start(
project_name,
self.host_name,
task_name=self._task_info.get("name"),
task_type=self._task_info.get("taskType"),
product_type=product_type,
product_name=product_entity["name"],
)
else:
dst_version = int(last_version_entity["version"])
if version_up:
dst_version += 1
existing_version_entity = ayon_api.get_version_by_name(
project_name, version, product_id
project_name, dst_version, product_id
)
thumbnail_id = self._copy_version_thumbnail()
@ -1032,7 +1046,7 @@ class ProjectPushItemProcess:
copied_status = self._get_transferable_status(src_version_entity)
version_entity = new_version_entity(
version,
dst_version,
product_id,
author=src_version_entity["author"],
status=copied_status,
@ -1380,7 +1394,7 @@ class IntegrateModel:
variant,
comment,
new_folder_name,
dst_version,
version_up,
use_original_name
):
"""Create new item for integration.
@ -1394,7 +1408,7 @@ class IntegrateModel:
variant (str): Variant name.
comment (Union[str, None]): Comment.
new_folder_name (Union[str, None]): New folder name.
dst_version (int): Destination version number.
version_up (bool): Should destination product be versioned up
use_original_name (bool): If original product names should be used
Returns:
@ -1411,7 +1425,7 @@ class IntegrateModel:
variant,
comment=comment,
new_folder_name=new_folder_name,
dst_version=dst_version,
version_up=version_up,
use_original_name=use_original_name
)
process_item = ProjectPushItemProcess(self, item)

View file

@ -144,6 +144,8 @@ class PushToContextSelectWindow(QtWidgets.QWidget):
variant_input.setPlaceholderText("< Variant >")
variant_input.setObjectName("ValidatedLineEdit")
version_up_checkbox = NiceCheckbox(True, parent=inputs_widget)
comment_input = PlaceholderLineEdit(inputs_widget)
comment_input.setPlaceholderText("< Publish comment >")
@ -153,7 +155,11 @@ class PushToContextSelectWindow(QtWidgets.QWidget):
inputs_layout.addRow("New folder name", folder_name_input)
inputs_layout.addRow("Variant", variant_input)
inputs_layout.addRow(
"Use original product names", original_names_checkbox)
"Use original product names", original_names_checkbox
)
inputs_layout.addRow(
"Version up existing Product", version_up_checkbox
)
inputs_layout.addRow("Comment", comment_input)
main_splitter.addWidget(context_widget)
@ -209,8 +215,11 @@ class PushToContextSelectWindow(QtWidgets.QWidget):
"Show error detail dialog to copy full error."
)
original_names_checkbox.setToolTip(
"Required for multi copy, doesn't allow changes "
"variant values."
"Required for multi copy, doesn't allow changes variant values."
)
version_up_checkbox.setToolTip(
"Version up existing product. If not selected version will be "
"updated."
)
overlay_close_btn = QtWidgets.QPushButton(
@ -259,6 +268,8 @@ class PushToContextSelectWindow(QtWidgets.QWidget):
library_only_checkbox.stateChanged.connect(self._on_library_only_change)
original_names_checkbox.stateChanged.connect(
self._on_original_names_change)
version_up_checkbox.stateChanged.connect(
self._on_version_up_checkbox_change)
publish_btn.clicked.connect(self._on_select_click)
cancel_btn.clicked.connect(self._on_close_click)
@ -308,6 +319,7 @@ class PushToContextSelectWindow(QtWidgets.QWidget):
self._folder_name_input = folder_name_input
self._comment_input = comment_input
self._use_original_names_checkbox = original_names_checkbox
self._library_only_checkbox = library_only_checkbox
self._publish_btn = publish_btn
@ -328,6 +340,7 @@ class PushToContextSelectWindow(QtWidgets.QWidget):
self._new_folder_name_input_text = None
self._variant_input_text = None
self._comment_input_text = None
self._version_up_checkbox = version_up_checkbox
self._first_show = True
self._show_timer = show_timer
@ -344,6 +357,7 @@ class PushToContextSelectWindow(QtWidgets.QWidget):
show_detail_btn.setVisible(False)
overlay_close_btn.setVisible(False)
overlay_try_btn.setVisible(False)
version_up_checkbox.setChecked(False)
# Support of public api function of controller
def set_source(self, project_name, version_ids):
@ -376,7 +390,6 @@ class PushToContextSelectWindow(QtWidgets.QWidget):
self._invalidate_new_folder_name(
new_folder_name, user_values["is_new_folder_name_valid"]
)
self._controller._invalidate()
self._projects_combobox.refresh()
def _on_first_show(self):
@ -415,14 +428,18 @@ class PushToContextSelectWindow(QtWidgets.QWidget):
self._comment_input_text = text
self._user_input_changed_timer.start()
def _on_library_only_change(self, state: int) -> None:
def _on_library_only_change(self) -> None:
"""Change toggle state, reset filter, recalculate dropdown"""
state = bool(state)
self._projects_combobox.set_standard_filter_enabled(state)
is_checked = self._library_only_checkbox.isChecked()
self._projects_combobox.set_standard_filter_enabled(is_checked)
def _on_original_names_change(self, state: int) -> None:
use_original_name = bool(state)
self._invalidate_use_original_names(use_original_name)
def _on_original_names_change(self) -> None:
is_checked = self._use_original_names_checkbox.isChecked()
self._invalidate_use_original_names(is_checked)
def _on_version_up_checkbox_change(self) -> None:
is_checked = self._version_up_checkbox.isChecked()
self._controller.set_version_up(is_checked)
def _on_user_input_timer(self):
folder_name_enabled = self._new_folder_name_enabled

View file

@ -1,4 +1,3 @@
import qtpy
from qtpy import QtWidgets, QtCore, QtGui
@ -6,7 +5,7 @@ class PickScreenColorWidget(QtWidgets.QWidget):
color_selected = QtCore.Signal(QtGui.QColor)
def __init__(self, parent=None):
super(PickScreenColorWidget, self).__init__(parent)
super().__init__(parent)
self.labels = []
self.magnification = 2
@ -53,7 +52,7 @@ class PickLabel(QtWidgets.QLabel):
close_session = QtCore.Signal()
def __init__(self, pick_widget):
super(PickLabel, self).__init__()
super().__init__()
self.setMouseTracking(True)
self.pick_widget = pick_widget
@ -74,14 +73,10 @@ class PickLabel(QtWidgets.QLabel):
self.show()
self.windowHandle().setScreen(screen_obj)
geo = screen_obj.geometry()
args = (
QtWidgets.QApplication.desktop().winId(),
pix = screen_obj.grabWindow(
self.winId(),
geo.x(), geo.y(), geo.width(), geo.height()
)
if qtpy.API in ("pyqt4", "pyside"):
pix = QtGui.QPixmap.grabWindow(*args)
else:
pix = screen_obj.grabWindow(*args)
if pix.width() > pix.height():
size = pix.height()

View file

@ -53,14 +53,8 @@ def checkstate_enum_to_int(state):
def center_window(window):
"""Move window to center of it's screen."""
if hasattr(QtWidgets.QApplication, "desktop"):
desktop = QtWidgets.QApplication.desktop()
screen_idx = desktop.screenNumber(window)
screen_geo = desktop.screenGeometry(screen_idx)
else:
screen = window.screen()
screen_geo = screen.geometry()
screen = window.screen()
screen_geo = screen.geometry()
geo = window.frameGeometry()
geo.moveCenter(screen_geo.center())

View file

@ -0,0 +1,158 @@
import unittest
from ayon_core.lib.transcoding import (
get_review_info_by_layer_name
)
class GetReviewInfoByLayerName(unittest.TestCase):
"""Test responses from `get_review_info_by_layer_name`"""
def test_rgba_channels(self):
# RGB is supported
info = get_review_info_by_layer_name(["R", "G", "B"])
self.assertEqual(info, [{
"name": "",
"review_channels": {
"R": "R",
"G": "G",
"B": "B",
"A": None,
}
}])
# rgb is supported
info = get_review_info_by_layer_name(["r", "g", "b"])
self.assertEqual(info, [{
"name": "",
"review_channels": {
"R": "r",
"G": "g",
"B": "b",
"A": None,
}
}])
# diffuse.[RGB] is supported
info = get_review_info_by_layer_name(
["diffuse.R", "diffuse.G", "diffuse.B"]
)
self.assertEqual(info, [{
"name": "diffuse",
"review_channels": {
"R": "diffuse.R",
"G": "diffuse.G",
"B": "diffuse.B",
"A": None,
}
}])
info = get_review_info_by_layer_name(["R", "G", "B", "A"])
self.assertEqual(info, [{
"name": "",
"review_channels": {
"R": "R",
"G": "G",
"B": "B",
"A": "A",
}
}])
def test_z_channel(self):
info = get_review_info_by_layer_name(["Z"])
self.assertEqual(info, [{
"name": "",
"review_channels": {
"R": "Z",
"G": "Z",
"B": "Z",
"A": None,
}
}])
info = get_review_info_by_layer_name(["Z", "A"])
self.assertEqual(info, [{
"name": "",
"review_channels": {
"R": "Z",
"G": "Z",
"B": "Z",
"A": "A",
}
}])
def test_ar_ag_ab_channels(self):
info = get_review_info_by_layer_name(["AR", "AG", "AB"])
self.assertEqual(info, [{
"name": "",
"review_channels": {
"R": "AR",
"G": "AG",
"B": "AB",
"A": None,
}
}])
info = get_review_info_by_layer_name(["AR", "AG", "AB", "A"])
self.assertEqual(info, [{
"name": "",
"review_channels": {
"R": "AR",
"G": "AG",
"B": "AB",
"A": "A",
}
}])
def test_unknown_channels(self):
info = get_review_info_by_layer_name(["hello", "world"])
self.assertEqual(info, [])
def test_rgba_priority(self):
"""Ensure main layer, and RGB channels are prioritized
If both Z and RGB channels are present for a layer name, then RGB
should be prioritized and the Z channel should be ignored.
Also, the alpha channel from another "layer name" is not used. Note
how the diffuse response does not take A channel from the main layer.
"""
info = get_review_info_by_layer_name([
"Z",
"diffuse.R", "diffuse.G", "diffuse.B",
"R", "G", "B", "A",
"specular.R", "specular.G", "specular.B", "specular.A",
])
self.assertEqual(info, [
{
"name": "",
"review_channels": {
"R": "R",
"G": "G",
"B": "B",
"A": "A",
},
},
{
"name": "diffuse",
"review_channels": {
"R": "diffuse.R",
"G": "diffuse.G",
"B": "diffuse.B",
"A": None,
},
},
{
"name": "specular",
"review_channels": {
"R": "specular.R",
"G": "specular.G",
"B": "specular.B",
"A": "specular.A",
},
},
])