mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-01 16:34:53 +01:00
Merge branch 'develop' into bugfix/YN-0273_big_resolution_thumbnail_ftrack
This commit is contained in:
commit
46a8db48e7
7 changed files with 126 additions and 69 deletions
|
|
@ -134,16 +134,29 @@ def get_transcode_temp_directory():
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_oiio_info_for_input(filepath, logger=None, subimages=False):
|
def get_oiio_info_for_input(
|
||||||
|
filepath: str,
|
||||||
|
*,
|
||||||
|
subimages: bool = False,
|
||||||
|
verbose: bool = True,
|
||||||
|
logger: logging.Logger = None,
|
||||||
|
):
|
||||||
"""Call oiiotool to get information about input and return stdout.
|
"""Call oiiotool to get information about input and return stdout.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
filepath (str): Path to file.
|
||||||
|
subimages (bool): include info about subimages in the output.
|
||||||
|
verbose (bool): get the full metadata about each input image.
|
||||||
|
logger (logging.Logger): Logger used for logging.
|
||||||
|
|
||||||
Stdout should contain xml format string.
|
Stdout should contain xml format string.
|
||||||
"""
|
"""
|
||||||
args = get_oiio_tool_args(
|
args = get_oiio_tool_args(
|
||||||
"oiiotool",
|
"oiiotool",
|
||||||
"--info",
|
"--info",
|
||||||
"-v"
|
|
||||||
)
|
)
|
||||||
|
if verbose:
|
||||||
|
args.append("-v")
|
||||||
if subimages:
|
if subimages:
|
||||||
args.append("-a")
|
args.append("-a")
|
||||||
|
|
||||||
|
|
@ -573,7 +586,10 @@ def get_review_layer_name(src_filepath):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Load info about file from oiio tool
|
# Load info about file from oiio tool
|
||||||
input_info = get_oiio_info_for_input(src_filepath)
|
input_info = get_oiio_info_for_input(
|
||||||
|
src_filepath,
|
||||||
|
verbose=False,
|
||||||
|
)
|
||||||
if not input_info:
|
if not input_info:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
@ -1234,7 +1250,11 @@ def oiio_color_convert(
|
||||||
for token in ["#", "%d"]:
|
for token in ["#", "%d"]:
|
||||||
first_input_path = first_input_path.replace(token, first_frame)
|
first_input_path = first_input_path.replace(token, first_frame)
|
||||||
|
|
||||||
input_info = get_oiio_info_for_input(first_input_path, logger=logger)
|
input_info = get_oiio_info_for_input(
|
||||||
|
first_input_path,
|
||||||
|
verbose=False,
|
||||||
|
logger=logger,
|
||||||
|
)
|
||||||
|
|
||||||
# Collect channels to export
|
# Collect channels to export
|
||||||
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
|
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
|
||||||
|
|
@ -1448,7 +1468,11 @@ def get_rescaled_command_arguments(
|
||||||
command_args.extend(["-vf", "{0},{1}".format(scale, pad)])
|
command_args.extend(["-vf", "{0},{1}".format(scale, pad)])
|
||||||
|
|
||||||
elif application == "oiiotool":
|
elif application == "oiiotool":
|
||||||
input_info = get_oiio_info_for_input(input_path, logger=log)
|
input_info = get_oiio_info_for_input(
|
||||||
|
input_path,
|
||||||
|
verbose=False,
|
||||||
|
logger=log,
|
||||||
|
)
|
||||||
# Collect channels to export
|
# Collect channels to export
|
||||||
_, channels_arg = get_oiio_input_and_channel_args(
|
_, channels_arg = get_oiio_input_and_channel_args(
|
||||||
input_info, alpha_default=1.0)
|
input_info, alpha_default=1.0)
|
||||||
|
|
@ -1539,7 +1563,11 @@ def _get_image_dimensions(application, input_path, log):
|
||||||
# fallback for weird files with width=0, height=0
|
# fallback for weird files with width=0, height=0
|
||||||
if (input_width == 0 or input_height == 0) and application == "oiiotool":
|
if (input_width == 0 or input_height == 0) and application == "oiiotool":
|
||||||
# Load info about file from oiio tool
|
# Load info about file from oiio tool
|
||||||
input_info = get_oiio_info_for_input(input_path, logger=log)
|
input_info = get_oiio_info_for_input(
|
||||||
|
input_path,
|
||||||
|
verbose=False,
|
||||||
|
logger=log,
|
||||||
|
)
|
||||||
if input_info:
|
if input_info:
|
||||||
input_width = int(input_info["width"])
|
input_width = int(input_info["width"])
|
||||||
input_height = int(input_info["height"])
|
input_height = int(input_info["height"])
|
||||||
|
|
@ -1588,10 +1616,13 @@ def get_oiio_input_and_channel_args(oiio_input_info, alpha_default=None):
|
||||||
"""Get input and channel arguments for oiiotool.
|
"""Get input and channel arguments for oiiotool.
|
||||||
Args:
|
Args:
|
||||||
oiio_input_info (dict): Information about input from oiio tool.
|
oiio_input_info (dict): Information about input from oiio tool.
|
||||||
Should be output of function `get_oiio_info_for_input`.
|
Should be output of function 'get_oiio_info_for_input' (can be
|
||||||
|
called with 'verbose=False').
|
||||||
alpha_default (float, optional): Default value for alpha channel.
|
alpha_default (float, optional): Default value for alpha channel.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
tuple[str, str]: Tuple of input and channel arguments.
|
tuple[str, str]: Tuple of input and channel arguments.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
channel_names = oiio_input_info["channelnames"]
|
channel_names = oiio_input_info["channelnames"]
|
||||||
review_channels = get_convert_rgb_channels(channel_names)
|
review_channels = get_convert_rgb_channels(channel_names)
|
||||||
|
|
|
||||||
|
|
@ -299,7 +299,6 @@ def add_ordered_sublayer(layer, contribution_path, layer_id, order=None,
|
||||||
sdf format args metadata if enabled)
|
sdf format args metadata if enabled)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Add the order with the contribution path so that for future
|
# Add the order with the contribution path so that for future
|
||||||
# contributions we can again use it to magically fit into the
|
# contributions we can again use it to magically fit into the
|
||||||
# ordering. We put this in the path because sublayer paths do
|
# ordering. We put this in the path because sublayer paths do
|
||||||
|
|
@ -317,20 +316,25 @@ def add_ordered_sublayer(layer, contribution_path, layer_id, order=None,
|
||||||
# If the layer was already in the layers, then replace it
|
# If the layer was already in the layers, then replace it
|
||||||
for index, existing_path in enumerate(layer.subLayerPaths):
|
for index, existing_path in enumerate(layer.subLayerPaths):
|
||||||
args = get_sdf_format_args(existing_path)
|
args = get_sdf_format_args(existing_path)
|
||||||
existing_layer = args.get("layer_id")
|
existing_layer_id = args.get("layer_id")
|
||||||
if existing_layer == layer_id:
|
if existing_layer_id == layer_id:
|
||||||
|
existing_layer = layer.subLayerPaths[index]
|
||||||
|
existing_order = args.get("order")
|
||||||
|
existing_order = int(existing_order) if existing_order else None
|
||||||
|
if order is not None and order != existing_order:
|
||||||
|
# We need to move the layer, so we will remove this index
|
||||||
|
# and then re-insert it below at the right order
|
||||||
|
log.debug(f"Removing existing layer: {existing_layer}")
|
||||||
|
del layer.subLayerPaths[index]
|
||||||
|
break
|
||||||
|
|
||||||
# Put it in the same position where it was before when swapping
|
# Put it in the same position where it was before when swapping
|
||||||
# it with the original, also take over its order metadata
|
# it with the original, also take over its order metadata
|
||||||
order = args.get("order")
|
|
||||||
if order is not None:
|
|
||||||
order = int(order)
|
|
||||||
else:
|
|
||||||
order = None
|
|
||||||
contribution_path = _format_path(contribution_path,
|
contribution_path = _format_path(contribution_path,
|
||||||
order=order,
|
order=existing_order,
|
||||||
layer_id=layer_id)
|
layer_id=layer_id)
|
||||||
log.debug(
|
log.debug(
|
||||||
f"Replacing existing layer: {layer.subLayerPaths[index]} "
|
f"Replacing existing layer: {existing_layer} "
|
||||||
f"-> {contribution_path}"
|
f"-> {contribution_path}"
|
||||||
)
|
)
|
||||||
layer.subLayerPaths[index] = contribution_path
|
layer.subLayerPaths[index] = contribution_path
|
||||||
|
|
|
||||||
|
|
@ -508,7 +508,11 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
input_info = get_oiio_info_for_input(src_path, logger=self.log)
|
input_info = get_oiio_info_for_input(
|
||||||
|
src_path,
|
||||||
|
logger=self.log,
|
||||||
|
verbose=False,
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
input_arg, channels_arg = get_oiio_input_and_channel_args(
|
input_arg, channels_arg = get_oiio_input_and_channel_args(
|
||||||
input_info
|
input_info
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ from ayon_core.lib import (
|
||||||
UISeparatorDef,
|
UISeparatorDef,
|
||||||
UILabelDef,
|
UILabelDef,
|
||||||
EnumDef,
|
EnumDef,
|
||||||
filter_profiles
|
filter_profiles, NumberDef
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
from ayon_core.pipeline.usdlib import (
|
from ayon_core.pipeline.usdlib import (
|
||||||
|
|
@ -275,7 +275,7 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
||||||
# the contributions so that we can design a system where custom
|
# the contributions so that we can design a system where custom
|
||||||
# contributions outside the predefined orders are possible to be
|
# contributions outside the predefined orders are possible to be
|
||||||
# managed. So that if a particular asset requires an extra contribution
|
# managed. So that if a particular asset requires an extra contribution
|
||||||
# level, you can add itdirectly from the publisher at that particular
|
# level, you can add it directly from the publisher at that particular
|
||||||
# order. Future publishes will then see the existing contribution and will
|
# order. Future publishes will then see the existing contribution and will
|
||||||
# persist adding it to future bootstraps at that order
|
# persist adding it to future bootstraps at that order
|
||||||
contribution_layers: Dict[str, int] = {
|
contribution_layers: Dict[str, int] = {
|
||||||
|
|
@ -334,10 +334,7 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
||||||
attr_values[key] = attr_values[key].format(**data)
|
attr_values[key] = attr_values[key].format(**data)
|
||||||
|
|
||||||
# Define contribution
|
# Define contribution
|
||||||
order = self.contribution_layers.get(
|
in_layer_order: int = attr_values.get("contribution_in_layer_order", 0)
|
||||||
attr_values["contribution_layer"], 0
|
|
||||||
)
|
|
||||||
|
|
||||||
if attr_values["contribution_apply_as_variant"]:
|
if attr_values["contribution_apply_as_variant"]:
|
||||||
contribution = VariantContribution(
|
contribution = VariantContribution(
|
||||||
instance=instance,
|
instance=instance,
|
||||||
|
|
@ -346,18 +343,21 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
||||||
variant_set_name=attr_values["contribution_variant_set_name"],
|
variant_set_name=attr_values["contribution_variant_set_name"],
|
||||||
variant_name=attr_values["contribution_variant"],
|
variant_name=attr_values["contribution_variant"],
|
||||||
variant_is_default=attr_values["contribution_variant_is_default"], # noqa: E501
|
variant_is_default=attr_values["contribution_variant_is_default"], # noqa: E501
|
||||||
order=order
|
order=in_layer_order
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
contribution = SublayerContribution(
|
contribution = SublayerContribution(
|
||||||
instance=instance,
|
instance=instance,
|
||||||
layer_id=attr_values["contribution_layer"],
|
layer_id=attr_values["contribution_layer"],
|
||||||
target_product=attr_values["contribution_target_product"],
|
target_product=attr_values["contribution_target_product"],
|
||||||
order=order
|
order=in_layer_order
|
||||||
)
|
)
|
||||||
|
|
||||||
asset_product = contribution.target_product
|
asset_product = contribution.target_product
|
||||||
layer_product = "{}_{}".format(asset_product, contribution.layer_id)
|
layer_product = "{}_{}".format(asset_product, contribution.layer_id)
|
||||||
|
layer_order: int = self.contribution_layers.get(
|
||||||
|
attr_values["contribution_layer"], 0
|
||||||
|
)
|
||||||
|
|
||||||
# Layer contribution instance
|
# Layer contribution instance
|
||||||
layer_instance = self.get_or_create_instance(
|
layer_instance = self.get_or_create_instance(
|
||||||
|
|
@ -370,7 +370,7 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
||||||
contribution
|
contribution
|
||||||
)
|
)
|
||||||
layer_instance.data["usd_layer_id"] = contribution.layer_id
|
layer_instance.data["usd_layer_id"] = contribution.layer_id
|
||||||
layer_instance.data["usd_layer_order"] = contribution.order
|
layer_instance.data["usd_layer_order"] = layer_order
|
||||||
|
|
||||||
layer_instance.data["productGroup"] = (
|
layer_instance.data["productGroup"] = (
|
||||||
instance.data.get("productGroup") or "USD Layer"
|
instance.data.get("productGroup") or "USD Layer"
|
||||||
|
|
@ -561,6 +561,19 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
||||||
items=list(cls.contribution_layers.keys()),
|
items=list(cls.contribution_layers.keys()),
|
||||||
default=default_contribution_layer,
|
default=default_contribution_layer,
|
||||||
visible=visible),
|
visible=visible),
|
||||||
|
# TODO: We may want to make the visibility of this optional
|
||||||
|
# based on studio preference, to avoid complexity when not needed
|
||||||
|
NumberDef("contribution_in_layer_order",
|
||||||
|
label="Strength order",
|
||||||
|
tooltip=(
|
||||||
|
"The contribution inside the department layer will be "
|
||||||
|
"made with this offset applied. A higher number means "
|
||||||
|
"a stronger opinion."
|
||||||
|
),
|
||||||
|
default=0,
|
||||||
|
minimum=-99999,
|
||||||
|
maximum=99999,
|
||||||
|
visible=visible),
|
||||||
BoolDef("contribution_apply_as_variant",
|
BoolDef("contribution_apply_as_variant",
|
||||||
label="Add as variant",
|
label="Add as variant",
|
||||||
tooltip=(
|
tooltip=(
|
||||||
|
|
@ -729,7 +742,7 @@ class ExtractUSDLayerContribution(publish.Extractor):
|
||||||
layer=sdf_layer,
|
layer=sdf_layer,
|
||||||
contribution_path=path,
|
contribution_path=path,
|
||||||
layer_id=product_name,
|
layer_id=product_name,
|
||||||
order=None, # unordered
|
order=contribution.order,
|
||||||
add_sdf_arguments_metadata=True
|
add_sdf_arguments_metadata=True
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,8 @@
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from typing import List, Dict, Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
|
@ -13,8 +15,8 @@ class TabItem:
|
||||||
class InterpreterConfig:
|
class InterpreterConfig:
|
||||||
width: Optional[int]
|
width: Optional[int]
|
||||||
height: Optional[int]
|
height: Optional[int]
|
||||||
splitter_sizes: List[int] = field(default_factory=list)
|
splitter_sizes: list[int] = field(default_factory=list)
|
||||||
tabs: List[TabItem] = field(default_factory=list)
|
tabs: list[TabItem] = field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
class AbstractInterpreterController(ABC):
|
class AbstractInterpreterController(ABC):
|
||||||
|
|
@ -27,7 +29,7 @@ class AbstractInterpreterController(ABC):
|
||||||
self,
|
self,
|
||||||
width: int,
|
width: int,
|
||||||
height: int,
|
height: int,
|
||||||
splitter_sizes: List[int],
|
splitter_sizes: list[int],
|
||||||
tabs: List[Dict[str, str]],
|
tabs: list[dict[str, str]],
|
||||||
):
|
) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,5 @@
|
||||||
from typing import List, Dict
|
from __future__ import annotations
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
from ayon_core.lib import JSONSettingRegistry
|
from ayon_core.lib import JSONSettingRegistry
|
||||||
from ayon_core.lib.local_settings import get_launcher_local_dir
|
from ayon_core.lib.local_settings import get_launcher_local_dir
|
||||||
|
|
@ -11,13 +12,15 @@ from .abstract import (
|
||||||
|
|
||||||
|
|
||||||
class InterpreterController(AbstractInterpreterController):
|
class InterpreterController(AbstractInterpreterController):
|
||||||
def __init__(self):
|
def __init__(self, name: Optional[str] = None) -> None:
|
||||||
|
if name is None:
|
||||||
|
name = "python_interpreter_tool"
|
||||||
self._registry = JSONSettingRegistry(
|
self._registry = JSONSettingRegistry(
|
||||||
"python_interpreter_tool",
|
name,
|
||||||
get_launcher_local_dir(),
|
get_launcher_local_dir(),
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_config(self):
|
def get_config(self) -> InterpreterConfig:
|
||||||
width = None
|
width = None
|
||||||
height = None
|
height = None
|
||||||
splitter_sizes = []
|
splitter_sizes = []
|
||||||
|
|
@ -54,9 +57,9 @@ class InterpreterController(AbstractInterpreterController):
|
||||||
self,
|
self,
|
||||||
width: int,
|
width: int,
|
||||||
height: int,
|
height: int,
|
||||||
splitter_sizes: List[int],
|
splitter_sizes: list[int],
|
||||||
tabs: List[Dict[str, str]],
|
tabs: list[dict[str, str]],
|
||||||
):
|
) -> None:
|
||||||
self._registry.set_item("width", width)
|
self._registry.set_item("width", width)
|
||||||
self._registry.set_item("height", height)
|
self._registry.set_item("height", height)
|
||||||
self._registry.set_item("splitter_sizes", splitter_sizes)
|
self._registry.set_item("splitter_sizes", splitter_sizes)
|
||||||
|
|
|
||||||
|
|
@ -1,42 +1,42 @@
|
||||||
import os
|
|
||||||
import sys
|
import sys
|
||||||
import collections
|
import collections
|
||||||
|
|
||||||
|
|
||||||
|
class _CustomSTD:
|
||||||
|
def __init__(self, orig_std, write_callback):
|
||||||
|
self.orig_std = orig_std
|
||||||
|
self._valid_orig = bool(orig_std)
|
||||||
|
self._write_callback = write_callback
|
||||||
|
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
return getattr(self.orig_std, attr)
|
||||||
|
|
||||||
|
def __setattr__(self, key, value):
|
||||||
|
if key in ("orig_std", "_valid_orig", "_write_callback"):
|
||||||
|
super().__setattr__(key, value)
|
||||||
|
else:
|
||||||
|
setattr(self.orig_std, key, value)
|
||||||
|
|
||||||
|
def write(self, text):
|
||||||
|
if self._valid_orig:
|
||||||
|
self.orig_std.write(text)
|
||||||
|
self._write_callback(text)
|
||||||
|
|
||||||
|
|
||||||
class StdOEWrap:
|
class StdOEWrap:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._origin_stdout_write = None
|
|
||||||
self._origin_stderr_write = None
|
|
||||||
self._listening = False
|
|
||||||
self.lines = collections.deque()
|
self.lines = collections.deque()
|
||||||
|
|
||||||
if not sys.stdout:
|
|
||||||
sys.stdout = open(os.devnull, "w")
|
|
||||||
|
|
||||||
if not sys.stderr:
|
|
||||||
sys.stderr = open(os.devnull, "w")
|
|
||||||
|
|
||||||
if self._origin_stdout_write is None:
|
|
||||||
self._origin_stdout_write = sys.stdout.write
|
|
||||||
|
|
||||||
if self._origin_stderr_write is None:
|
|
||||||
self._origin_stderr_write = sys.stderr.write
|
|
||||||
|
|
||||||
self._listening = True
|
self._listening = True
|
||||||
sys.stdout.write = self._stdout_listener
|
|
||||||
sys.stderr.write = self._stderr_listener
|
self._stdout_wrap = _CustomSTD(sys.stdout, self._listener)
|
||||||
|
self._stderr_wrap = _CustomSTD(sys.stderr, self._listener)
|
||||||
|
|
||||||
|
sys.stdout = self._stdout_wrap
|
||||||
|
sys.stderr = self._stderr_wrap
|
||||||
|
|
||||||
def stop_listen(self):
|
def stop_listen(self):
|
||||||
self._listening = False
|
self._listening = False
|
||||||
|
|
||||||
def _stdout_listener(self, text):
|
def _listener(self, text):
|
||||||
if self._listening:
|
if self._listening:
|
||||||
self.lines.append(text)
|
self.lines.append(text)
|
||||||
if self._origin_stdout_write is not None:
|
|
||||||
self._origin_stdout_write(text)
|
|
||||||
|
|
||||||
def _stderr_listener(self, text):
|
|
||||||
if self._listening:
|
|
||||||
self.lines.append(text)
|
|
||||||
if self._origin_stderr_write is not None:
|
|
||||||
self._origin_stderr_write(text)
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue