mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 12:54:40 +01:00
Merge branch 'develop' into bugfix/1450-hardcoded-template-paths
This commit is contained in:
commit
65fcdd6c07
11 changed files with 696 additions and 58 deletions
|
|
@ -1232,6 +1232,14 @@ def oiio_color_convert(
|
|||
# Handle the different conversion cases
|
||||
# Source view and display are known
|
||||
if source_view and source_display:
|
||||
color_convert_args = None
|
||||
ocio_display_args = None
|
||||
oiio_cmd.extend([
|
||||
"--ociodisplay:inverse=1:subimages=0",
|
||||
source_display,
|
||||
source_view,
|
||||
])
|
||||
|
||||
if target_colorspace:
|
||||
# This is a two-step conversion process since there's no direct
|
||||
# display/view to colorspace command
|
||||
|
|
@ -1241,22 +1249,25 @@ def oiio_color_convert(
|
|||
elif source_display != target_display or source_view != target_view:
|
||||
# Complete display/view pair conversion
|
||||
# - go through a reference space
|
||||
color_convert_args = (target_display, target_view)
|
||||
ocio_display_args = (target_display, target_view)
|
||||
else:
|
||||
color_convert_args = None
|
||||
logger.debug(
|
||||
"Source and target display/view pairs are identical."
|
||||
" No color conversion needed."
|
||||
)
|
||||
|
||||
if color_convert_args:
|
||||
# Use colorconvert for colorspace target
|
||||
oiio_cmd.extend([
|
||||
"--ociodisplay:inverse=1:subimages=0",
|
||||
source_display,
|
||||
source_view,
|
||||
"--colorconvert:subimages=0",
|
||||
*color_convert_args
|
||||
])
|
||||
elif ocio_display_args:
|
||||
# Use ociodisplay for display/view target
|
||||
oiio_cmd.extend([
|
||||
"--ociodisplay:subimages=0",
|
||||
*ocio_display_args
|
||||
])
|
||||
|
||||
elif target_colorspace:
|
||||
# Standard color space to color space conversion
|
||||
|
|
@ -1281,24 +1292,6 @@ def oiio_color_convert(
|
|||
run_subprocess(oiio_cmd, logger=logger)
|
||||
|
||||
|
||||
def split_cmd_args(in_args):
|
||||
"""Makes sure all entered arguments are separated in individual items.
|
||||
|
||||
Split each argument string with " -" to identify if string contains
|
||||
one or more arguments.
|
||||
Args:
|
||||
in_args (list): of arguments ['-n', '-d uint10']
|
||||
Returns
|
||||
(list): ['-n', '-d', 'unint10']
|
||||
"""
|
||||
splitted_args = []
|
||||
for arg in in_args:
|
||||
if not arg.strip():
|
||||
continue
|
||||
splitted_args.extend(arg.split(" "))
|
||||
return splitted_args
|
||||
|
||||
|
||||
def get_rescaled_command_arguments(
|
||||
application,
|
||||
input_path,
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import platform
|
|||
import tempfile
|
||||
import warnings
|
||||
from copy import deepcopy
|
||||
from dataclasses import dataclass
|
||||
|
||||
import ayon_api
|
||||
|
||||
|
|
@ -26,6 +27,18 @@ from ayon_core.pipeline.load import get_representation_path_with_anatomy
|
|||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConfigData:
|
||||
"""OCIO Config to use in a certain context.
|
||||
|
||||
When enabled and no path/template are set, it will be considered invalid
|
||||
and will error on OCIO path not found. Enabled must be False to explicitly
|
||||
allow OCIO to be disabled."""
|
||||
path: str = ""
|
||||
template: str = ""
|
||||
enabled: bool = True
|
||||
|
||||
|
||||
class CachedData:
|
||||
remapping = {}
|
||||
has_compatible_ocio_package = None
|
||||
|
|
@ -710,7 +723,7 @@ def _get_config_path_from_profile_data(
|
|||
template_data (dict[str, Any]): Template data.
|
||||
|
||||
Returns:
|
||||
dict[str, str]: Config data with path and template.
|
||||
ConfigData: Config data with path and template.
|
||||
"""
|
||||
template = profile[profile_type]
|
||||
result = StringTemplate.format_strict_template(
|
||||
|
|
@ -719,12 +732,12 @@ def _get_config_path_from_profile_data(
|
|||
normalized_path = str(result.normalized())
|
||||
if not os.path.exists(normalized_path):
|
||||
log.warning(f"Path was not found '{normalized_path}'.")
|
||||
return None
|
||||
return ConfigData() # Return invalid config data
|
||||
|
||||
return {
|
||||
"path": normalized_path,
|
||||
"template": template
|
||||
}
|
||||
return ConfigData(
|
||||
path=normalized_path,
|
||||
template=template
|
||||
)
|
||||
|
||||
|
||||
def _get_global_config_data(
|
||||
|
|
@ -735,7 +748,7 @@ def _get_global_config_data(
|
|||
imageio_global,
|
||||
folder_id,
|
||||
log,
|
||||
):
|
||||
) -> ConfigData:
|
||||
"""Get global config data.
|
||||
|
||||
Global config from core settings is using profiles that are based on
|
||||
|
|
@ -759,8 +772,7 @@ def _get_global_config_data(
|
|||
log (logging.Logger): Logger object.
|
||||
|
||||
Returns:
|
||||
Union[dict[str, str], None]: Config data with path and template
|
||||
or None.
|
||||
ConfigData: Config data with path and template.
|
||||
|
||||
"""
|
||||
task_name = task_type = None
|
||||
|
|
@ -779,12 +791,14 @@ def _get_global_config_data(
|
|||
)
|
||||
if profile is None:
|
||||
log.info(f"No config profile matched filters {str(filter_values)}")
|
||||
return None
|
||||
return ConfigData(enabled=False)
|
||||
|
||||
profile_type = profile["type"]
|
||||
if profile_type in ("builtin_path", "custom_path"):
|
||||
if profile_type in {"builtin_path", "custom_path"}:
|
||||
return _get_config_path_from_profile_data(
|
||||
profile, profile_type, template_data)
|
||||
elif profile_type == "disabled":
|
||||
return ConfigData(enabled=False)
|
||||
|
||||
# TODO decide if this is the right name for representation
|
||||
repre_name = "ocioconfig"
|
||||
|
|
@ -798,7 +812,7 @@ def _get_global_config_data(
|
|||
"Colorspace OCIO config path cannot be set. "
|
||||
"Profile is set to published product but `Product name` is empty."
|
||||
)
|
||||
return None
|
||||
return ConfigData()
|
||||
|
||||
folder_info = template_data.get("folder")
|
||||
if not folder_info:
|
||||
|
|
@ -819,7 +833,7 @@ def _get_global_config_data(
|
|||
)
|
||||
if not folder_entity:
|
||||
log.warning(f"Folder entity '{folder_path}' was not found..")
|
||||
return None
|
||||
return ConfigData()
|
||||
folder_id = folder_entity["id"]
|
||||
|
||||
product_entities_by_name = {
|
||||
|
|
@ -855,7 +869,7 @@ def _get_global_config_data(
|
|||
log.info(
|
||||
f"Product '{product_name}' does not have available any versions."
|
||||
)
|
||||
return None
|
||||
return ConfigData()
|
||||
|
||||
# Find 'ocioconfig' representation entity
|
||||
repre_entity = ayon_api.get_representation_by_name(
|
||||
|
|
@ -868,15 +882,15 @@ def _get_global_config_data(
|
|||
f"Representation '{repre_name}'"
|
||||
f" not found on product '{product_name}'."
|
||||
)
|
||||
return None
|
||||
return ConfigData()
|
||||
|
||||
path = get_representation_path_with_anatomy(repre_entity, anatomy)
|
||||
template = repre_entity["attrib"]["template"]
|
||||
|
||||
return {
|
||||
"path": path,
|
||||
"template": template,
|
||||
}
|
||||
return ConfigData(
|
||||
path=path,
|
||||
template=template
|
||||
)
|
||||
|
||||
|
||||
def get_imageio_config_preset(
|
||||
|
|
@ -1015,13 +1029,19 @@ def get_imageio_config_preset(
|
|||
host_ocio_config["filepath"], template_data
|
||||
)
|
||||
|
||||
if not config_data:
|
||||
if not config_data.enabled:
|
||||
return {} # OCIO management disabled
|
||||
|
||||
if not config_data.path:
|
||||
raise FileExistsError(
|
||||
"No OCIO config found in settings. It is"
|
||||
" either missing or there is typo in path inputs"
|
||||
)
|
||||
|
||||
return config_data
|
||||
return {
|
||||
"path": config_data.path,
|
||||
"template": config_data.template,
|
||||
}
|
||||
|
||||
|
||||
def _get_host_config_data(templates, template_data):
|
||||
|
|
|
|||
|
|
@ -41,8 +41,8 @@ def get_product_name_template(
|
|||
profiles = tools_settings["creator"]["product_name_profiles"]
|
||||
filtering_criteria = {
|
||||
"product_types": product_type,
|
||||
"hosts": host_name,
|
||||
"tasks": task_name,
|
||||
"host_names": host_name,
|
||||
"task_names": task_name,
|
||||
"task_types": task_type
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -684,3 +684,20 @@ def get_sdf_format_args(path):
|
|||
"""Return SDF_FORMAT_ARGS parsed to `dict`"""
|
||||
_raw_path, data = Sdf.Layer.SplitIdentifier(path)
|
||||
return data
|
||||
|
||||
|
||||
def get_standard_default_prim_name(folder_path: str) -> str:
|
||||
"""Return the AYON-specified default prim name for a folder path.
|
||||
|
||||
This is used e.g. for the default prim in AYON USD Contribution workflows.
|
||||
"""
|
||||
folder_name: str = folder_path.rsplit("/", 1)[-1]
|
||||
|
||||
# Prim names are not allowed to start with a digit in USD. Authoring them
|
||||
# would mean generating essentially garbage data and may result in
|
||||
# unexpected behavior in certain USD or DCC versions, like failure to
|
||||
# refresh in usdview or crashes in Houdini 21.
|
||||
if folder_name and folder_name[0].isdigit():
|
||||
folder_name = f"_{folder_name}"
|
||||
|
||||
return folder_name
|
||||
|
|
|
|||
353
client/ayon_core/plugins/publish/extract_oiio_postprocess.py
Normal file
353
client/ayon_core/plugins/publish/extract_oiio_postprocess.py
Normal file
|
|
@ -0,0 +1,353 @@
|
|||
from __future__ import annotations
|
||||
from typing import Any, Optional
|
||||
import os
|
||||
import copy
|
||||
import clique
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import (
|
||||
publish,
|
||||
get_temp_dir
|
||||
)
|
||||
from ayon_core.lib import (
|
||||
is_oiio_supported,
|
||||
get_oiio_tool_args,
|
||||
run_subprocess
|
||||
)
|
||||
from ayon_core.lib.transcoding import IMAGE_EXTENSIONS
|
||||
from ayon_core.lib.profiles_filtering import filter_profiles
|
||||
|
||||
|
||||
class ExtractOIIOPostProcess(publish.Extractor):
|
||||
"""Process representations through `oiiotool` with profile defined
|
||||
settings so that e.g. color space conversions can be applied or images
|
||||
could be converted to scanline, resized, etc. regardless of colorspace
|
||||
data.
|
||||
"""
|
||||
|
||||
label = "OIIO Post Process"
|
||||
order = pyblish.api.ExtractorOrder + 0.020
|
||||
|
||||
settings_category = "core"
|
||||
|
||||
optional = True
|
||||
|
||||
# Supported extensions
|
||||
supported_exts = {ext.lstrip(".") for ext in IMAGE_EXTENSIONS}
|
||||
|
||||
# Configurable by Settings
|
||||
profiles = None
|
||||
options = None
|
||||
|
||||
def process(self, instance):
|
||||
if instance.data.get("farm"):
|
||||
self.log.debug("Should be processed on farm, skipping.")
|
||||
return
|
||||
|
||||
if not self.profiles:
|
||||
self.log.debug("No profiles present for OIIO Post Process")
|
||||
return
|
||||
|
||||
if not instance.data.get("representations"):
|
||||
self.log.debug("No representations, skipping.")
|
||||
return
|
||||
|
||||
if not is_oiio_supported():
|
||||
self.log.warning("OIIO not supported, no transcoding possible.")
|
||||
return
|
||||
|
||||
new_representations = []
|
||||
for idx, repre in enumerate(list(instance.data["representations"])):
|
||||
self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"]))
|
||||
if not self._repre_is_valid(repre):
|
||||
continue
|
||||
|
||||
# We check profile per representation name and extension because
|
||||
# it's included in the profile check. As such, an instance may have
|
||||
# a different profile applied per representation.
|
||||
profile = self._get_profile(
|
||||
instance,
|
||||
repre
|
||||
)
|
||||
if not profile:
|
||||
continue
|
||||
|
||||
# Get representation files to convert
|
||||
if isinstance(repre["files"], list):
|
||||
repre_files_to_convert = copy.deepcopy(repre["files"])
|
||||
else:
|
||||
repre_files_to_convert = [repre["files"]]
|
||||
|
||||
added_representations = False
|
||||
added_review = False
|
||||
|
||||
# Process each output definition
|
||||
for output_def in profile["outputs"]:
|
||||
|
||||
# Local copy to avoid accidental mutable changes
|
||||
files_to_convert = list(repre_files_to_convert)
|
||||
|
||||
output_name = output_def["name"]
|
||||
new_repre = copy.deepcopy(repre)
|
||||
|
||||
original_staging_dir = new_repre["stagingDir"]
|
||||
new_staging_dir = get_temp_dir(
|
||||
project_name=instance.context.data["projectName"],
|
||||
use_local_temp=True,
|
||||
)
|
||||
new_repre["stagingDir"] = new_staging_dir
|
||||
|
||||
output_extension = output_def["extension"]
|
||||
output_extension = output_extension.replace('.', '')
|
||||
self._rename_in_representation(new_repre,
|
||||
files_to_convert,
|
||||
output_name,
|
||||
output_extension)
|
||||
|
||||
sequence_files = self._translate_to_sequence(files_to_convert)
|
||||
self.log.debug("Files to convert: {}".format(sequence_files))
|
||||
for file_name in sequence_files:
|
||||
if isinstance(file_name, clique.Collection):
|
||||
# Convert to filepath that can be directly converted
|
||||
# by oiio like `frame.1001-1025%04d.exr`
|
||||
file_name: str = file_name.format(
|
||||
"{head}{range}{padding}{tail}"
|
||||
)
|
||||
|
||||
self.log.debug("Transcoding file: `{}`".format(file_name))
|
||||
input_path = os.path.join(original_staging_dir,
|
||||
file_name)
|
||||
output_path = self._get_output_file_path(input_path,
|
||||
new_staging_dir,
|
||||
output_extension)
|
||||
|
||||
# TODO: Support formatting with dynamic keys from the
|
||||
# representation, like e.g. colorspace config, display,
|
||||
# view, etc.
|
||||
input_arguments: list[str] = output_def.get(
|
||||
"input_arguments", []
|
||||
)
|
||||
output_arguments: list[str] = output_def.get(
|
||||
"output_arguments", []
|
||||
)
|
||||
|
||||
# Prepare subprocess arguments
|
||||
oiio_cmd = get_oiio_tool_args(
|
||||
"oiiotool",
|
||||
*input_arguments,
|
||||
input_path,
|
||||
*output_arguments,
|
||||
"-o",
|
||||
output_path
|
||||
)
|
||||
|
||||
self.log.debug(
|
||||
"Conversion command: {}".format(" ".join(oiio_cmd)))
|
||||
run_subprocess(oiio_cmd, logger=self.log)
|
||||
|
||||
# cleanup temporary transcoded files
|
||||
for file_name in new_repre["files"]:
|
||||
transcoded_file_path = os.path.join(new_staging_dir,
|
||||
file_name)
|
||||
instance.context.data["cleanupFullPaths"].append(
|
||||
transcoded_file_path)
|
||||
|
||||
custom_tags = output_def.get("custom_tags")
|
||||
if custom_tags:
|
||||
if new_repre.get("custom_tags") is None:
|
||||
new_repre["custom_tags"] = []
|
||||
new_repre["custom_tags"].extend(custom_tags)
|
||||
|
||||
# Add additional tags from output definition to representation
|
||||
if new_repre.get("tags") is None:
|
||||
new_repre["tags"] = []
|
||||
for tag in output_def["tags"]:
|
||||
if tag not in new_repre["tags"]:
|
||||
new_repre["tags"].append(tag)
|
||||
|
||||
if tag == "review":
|
||||
added_review = True
|
||||
|
||||
# If there is only 1 file outputted then convert list to
|
||||
# string, because that'll indicate that it is not a sequence.
|
||||
if len(new_repre["files"]) == 1:
|
||||
new_repre["files"] = new_repre["files"][0]
|
||||
|
||||
# If the source representation has "review" tag, but it's not
|
||||
# part of the output definition tags, then both the
|
||||
# representations will be transcoded in ExtractReview and
|
||||
# their outputs will clash in integration.
|
||||
if "review" in repre.get("tags", []):
|
||||
added_review = True
|
||||
|
||||
new_representations.append(new_repre)
|
||||
added_representations = True
|
||||
|
||||
if added_representations:
|
||||
self._mark_original_repre_for_deletion(
|
||||
repre, profile, added_review
|
||||
)
|
||||
|
||||
tags = repre.get("tags") or []
|
||||
if "delete" in tags and "thumbnail" not in tags:
|
||||
instance.data["representations"].remove(repre)
|
||||
|
||||
instance.data["representations"].extend(new_representations)
|
||||
|
||||
def _rename_in_representation(self, new_repre, files_to_convert,
|
||||
output_name, output_extension):
|
||||
"""Replace old extension with new one everywhere in representation.
|
||||
|
||||
Args:
|
||||
new_repre (dict)
|
||||
files_to_convert (list): of filenames from repre["files"],
|
||||
standardized to always list
|
||||
output_name (str): key of output definition from Settings,
|
||||
if "<passthrough>" token used, keep original repre name
|
||||
output_extension (str): extension from output definition
|
||||
"""
|
||||
if output_name != "passthrough":
|
||||
new_repre["name"] = output_name
|
||||
if not output_extension:
|
||||
return
|
||||
|
||||
new_repre["ext"] = output_extension
|
||||
new_repre["outputName"] = output_name
|
||||
|
||||
renamed_files = []
|
||||
for file_name in files_to_convert:
|
||||
file_name, _ = os.path.splitext(file_name)
|
||||
file_name = '{}.{}'.format(file_name,
|
||||
output_extension)
|
||||
renamed_files.append(file_name)
|
||||
new_repre["files"] = renamed_files
|
||||
|
||||
def _translate_to_sequence(self, files_to_convert):
|
||||
"""Returns original list or a clique.Collection of a sequence.
|
||||
|
||||
Uses clique to find frame sequence Collection.
|
||||
If sequence not found, it returns original list.
|
||||
|
||||
Args:
|
||||
files_to_convert (list): list of file names
|
||||
Returns:
|
||||
list[str | clique.Collection]: List of filepaths or a list
|
||||
of Collections (usually one, unless there are holes)
|
||||
"""
|
||||
pattern = [clique.PATTERNS["frames"]]
|
||||
collections, _ = clique.assemble(
|
||||
files_to_convert, patterns=pattern,
|
||||
assume_padded_when_ambiguous=True)
|
||||
if collections:
|
||||
if len(collections) > 1:
|
||||
raise ValueError(
|
||||
"Too many collections {}".format(collections))
|
||||
|
||||
collection = collections[0]
|
||||
# TODO: Technically oiiotool supports holes in the sequence as well
|
||||
# using the dedicated --frames argument to specify the frames.
|
||||
# We may want to use that too so conversions of sequences with
|
||||
# holes will perform faster as well.
|
||||
# Separate the collection so that we have no holes/gaps per
|
||||
# collection.
|
||||
return collection.separate()
|
||||
|
||||
return files_to_convert
|
||||
|
||||
def _get_output_file_path(self, input_path, output_dir,
|
||||
output_extension):
|
||||
"""Create output file name path."""
|
||||
file_name = os.path.basename(input_path)
|
||||
file_name, input_extension = os.path.splitext(file_name)
|
||||
if not output_extension:
|
||||
output_extension = input_extension.replace(".", "")
|
||||
new_file_name = '{}.{}'.format(file_name,
|
||||
output_extension)
|
||||
return os.path.join(output_dir, new_file_name)
|
||||
|
||||
def _get_profile(
|
||||
self,
|
||||
instance: pyblish.api.Instance,
|
||||
repre: dict
|
||||
) -> Optional[dict[str, Any]]:
|
||||
"""Returns profile if it should process this instance."""
|
||||
host_name = instance.context.data["hostName"]
|
||||
product_type = instance.data["productType"]
|
||||
product_name = instance.data["productName"]
|
||||
task_data = instance.data["anatomyData"].get("task", {})
|
||||
task_name = task_data.get("name")
|
||||
task_type = task_data.get("type")
|
||||
repre_name: str = repre["name"]
|
||||
repre_ext: str = repre["ext"]
|
||||
filtering_criteria = {
|
||||
"host_names": host_name,
|
||||
"product_types": product_type,
|
||||
"product_names": product_name,
|
||||
"task_names": task_name,
|
||||
"task_types": task_type,
|
||||
"representation_names": repre_name,
|
||||
"representation_exts": repre_ext,
|
||||
}
|
||||
profile = filter_profiles(self.profiles, filtering_criteria,
|
||||
logger=self.log)
|
||||
|
||||
if not profile:
|
||||
self.log.debug(
|
||||
"Skipped instance. None of profiles in presets are for"
|
||||
f" Host: \"{host_name}\" |"
|
||||
f" Product types: \"{product_type}\" |"
|
||||
f" Product names: \"{product_name}\" |"
|
||||
f" Task name \"{task_name}\" |"
|
||||
f" Task type \"{task_type}\" |"
|
||||
f" Representation: \"{repre_name}\" (.{repre_ext})"
|
||||
)
|
||||
|
||||
return profile
|
||||
|
||||
def _repre_is_valid(self, repre: dict) -> bool:
|
||||
"""Validation if representation should be processed.
|
||||
|
||||
Args:
|
||||
repre (dict): Representation which should be checked.
|
||||
|
||||
Returns:
|
||||
bool: False if can't be processed else True.
|
||||
"""
|
||||
if repre.get("ext") not in self.supported_exts:
|
||||
self.log.debug((
|
||||
"Representation '{}' has unsupported extension: '{}'. Skipped."
|
||||
).format(repre["name"], repre.get("ext")))
|
||||
return False
|
||||
|
||||
if not repre.get("files"):
|
||||
self.log.debug((
|
||||
"Representation '{}' has empty files. Skipped."
|
||||
).format(repre["name"]))
|
||||
return False
|
||||
|
||||
if "delete" in repre.get("tags", []):
|
||||
self.log.debug((
|
||||
"Representation '{}' has 'delete' tag. Skipped."
|
||||
).format(repre["name"]))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _mark_original_repre_for_deletion(
|
||||
self,
|
||||
repre: dict,
|
||||
profile: dict,
|
||||
added_review: bool
|
||||
):
|
||||
"""If new transcoded representation created, delete old."""
|
||||
if not repre.get("tags"):
|
||||
repre["tags"] = []
|
||||
|
||||
delete_original = profile["delete_original"]
|
||||
|
||||
if delete_original:
|
||||
if "delete" not in repre["tags"]:
|
||||
repre["tags"].append("delete")
|
||||
|
||||
if added_review and "review" in repre["tags"]:
|
||||
repre["tags"].remove("review")
|
||||
|
|
@ -163,7 +163,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
"flame",
|
||||
"unreal",
|
||||
"batchdelivery",
|
||||
"photoshop"
|
||||
"photoshop",
|
||||
"substancepainter",
|
||||
]
|
||||
|
||||
settings_category = "core"
|
||||
|
|
|
|||
|
|
@ -25,7 +25,8 @@ try:
|
|||
variant_nested_prim_path,
|
||||
setup_asset_layer,
|
||||
add_ordered_sublayer,
|
||||
set_layer_defaults
|
||||
set_layer_defaults,
|
||||
get_standard_default_prim_name
|
||||
)
|
||||
except ImportError:
|
||||
pass
|
||||
|
|
@ -176,7 +177,12 @@ def get_instance_uri_path(
|
|||
|
||||
# If for whatever reason we were unable to retrieve from the context
|
||||
# then get the path from an existing database entry
|
||||
path = get_representation_path_by_names(**query)
|
||||
path = get_representation_path_by_names(
|
||||
anatomy=context.data["anatomy"],
|
||||
**names
|
||||
)
|
||||
if not path:
|
||||
raise RuntimeError(f"Unable to resolve publish path for: {names}")
|
||||
|
||||
# Ensure `None` for now is also a string
|
||||
path = str(path)
|
||||
|
|
@ -494,7 +500,7 @@ class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
|||
"asset"
|
||||
if profile.get("contribution_target_product") == "usdAsset"
|
||||
else "shot")
|
||||
init_as_visible = False
|
||||
init_as_visible = True
|
||||
|
||||
# Attributes logic
|
||||
publish_attributes = instance["publish_attributes"].get(
|
||||
|
|
@ -640,6 +646,7 @@ class ExtractUSDLayerContribution(publish.Extractor):
|
|||
settings_category = "core"
|
||||
|
||||
use_ayon_entity_uri = False
|
||||
enforce_default_prim = False
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -650,9 +657,18 @@ class ExtractUSDLayerContribution(publish.Extractor):
|
|||
path = get_last_publish(instance)
|
||||
if path and BUILD_INTO_LAST_VERSIONS:
|
||||
sdf_layer = Sdf.Layer.OpenAsAnonymous(path)
|
||||
|
||||
# If enabled in settings, ignore any default prim specified on
|
||||
# older publish versions and always publish with the AYON
|
||||
# standard default prim
|
||||
if self.enforce_default_prim:
|
||||
sdf_layer.defaultPrim = get_standard_default_prim_name(
|
||||
folder_path
|
||||
)
|
||||
|
||||
default_prim = sdf_layer.defaultPrim
|
||||
else:
|
||||
default_prim = folder_path.rsplit("/", 1)[-1] # use folder name
|
||||
default_prim = get_standard_default_prim_name(folder_path)
|
||||
sdf_layer = Sdf.Layer.CreateAnonymous()
|
||||
set_layer_defaults(sdf_layer, default_prim=default_prim)
|
||||
|
||||
|
|
@ -810,7 +826,7 @@ class ExtractUSDAssetContribution(publish.Extractor):
|
|||
folder_path = instance.data["folderPath"]
|
||||
product_name = instance.data["productName"]
|
||||
self.log.debug(f"Building asset: {folder_path} > {product_name}")
|
||||
folder_name = folder_path.rsplit("/", 1)[-1]
|
||||
asset_name = get_standard_default_prim_name(folder_path)
|
||||
|
||||
# Contribute layers to asset
|
||||
# Use existing asset and add to it, or initialize a new asset layer
|
||||
|
|
@ -828,8 +844,9 @@ class ExtractUSDAssetContribution(publish.Extractor):
|
|||
# If no existing publish of this product exists then we initialize
|
||||
# the layer as either a default asset or shot structure.
|
||||
init_type = instance.data["contribution_target_product_init"]
|
||||
self.log.debug("Initializing layer as type: %s", init_type)
|
||||
asset_layer, payload_layer = self.init_layer(
|
||||
asset_name=folder_name, init_type=init_type
|
||||
asset_name=asset_name, init_type=init_type
|
||||
)
|
||||
|
||||
# Author timeCodesPerSecond and framesPerSecond if the asset layer
|
||||
|
|
@ -909,7 +926,7 @@ class ExtractUSDAssetContribution(publish.Extractor):
|
|||
payload_layer.Export(payload_path, args={"format": "usda"})
|
||||
self.add_relative_file(instance, payload_path)
|
||||
|
||||
def init_layer(self, asset_name, init_type):
|
||||
def init_layer(self, asset_name: str, init_type: str):
|
||||
"""Initialize layer if no previous version exists"""
|
||||
|
||||
if init_type == "asset":
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ name="core"
|
|||
description="AYON core addon."
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.9.1,<3.10"
|
||||
markdown = "^3.4.1"
|
||||
clique = "1.6.*"
|
||||
jsonschema = "^2.6.0"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue