Merge branch 'feature/OP-7176_Use-folder-path-as-unique-identifier' into feature/OP-7190_Use-folder-path-as-identifier-in-editorial

This commit is contained in:
Jakub Jezek 2023-11-09 14:00:29 +01:00
commit db8e8b162c
No known key found for this signature in database
GPG key ID: 730D7C02726179A7
26 changed files with 320 additions and 70 deletions

View file

@ -35,6 +35,7 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
- 3.17.6-nightly.1
- 3.17.5
- 3.17.5-nightly.3
- 3.17.5-nightly.2
@ -134,7 +135,6 @@ body:
- 3.15.1
- 3.15.1-nightly.6
- 3.15.1-nightly.5
- 3.15.1-nightly.4
validations:
required: true
- type: dropdown

View file

@ -232,10 +232,12 @@ def get_assets(
else:
new_asset_names.add(name)
yielded_ids = set()
if folder_paths:
for folder in _folders_query(
project_name, con, fields, folder_paths=folder_paths, **kwargs
):
yielded_ids.add(folder["id"])
yield convert_v4_folder_to_v3(folder, project_name)
if not new_asset_names:
@ -244,7 +246,9 @@ def get_assets(
for folder in _folders_query(
project_name, con, fields, folder_names=new_asset_names, **kwargs
):
yield convert_v4_folder_to_v3(folder, project_name)
if folder["id"] not in yielded_ids:
yielded_ids.add(folder["id"])
yield convert_v4_folder_to_v3(folder, project_name)
def get_archived_assets(

View file

@ -17,7 +17,10 @@ class ExtractABC(publish.Extractor):
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.abc"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.abc"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
@ -59,8 +62,8 @@ class ExtractABC(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {representation}")
class ExtractModelABC(ExtractABC):

View file

@ -17,7 +17,11 @@ class ExtractAnimationABC(publish.Extractor):
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.abc"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.abc"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
@ -66,5 +70,5 @@ class ExtractAnimationABC(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {representation}")

View file

@ -17,7 +17,10 @@ class ExtractBlend(publish.Extractor):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
@ -28,16 +31,22 @@ class ExtractBlend(publish.Extractor):
for obj in instance:
data_blocks.add(obj)
# Pack used images in the blend files.
if obj.type == 'MESH':
for material_slot in obj.material_slots:
mat = material_slot.material
if mat and mat.use_nodes:
tree = mat.node_tree
if tree.type == 'SHADER':
for node in tree.nodes:
if node.bl_idname == 'ShaderNodeTexImage':
if node.image:
node.image.pack()
if obj.type != 'MESH':
continue
for material_slot in obj.material_slots:
mat = material_slot.material
if not(mat and mat.use_nodes):
continue
tree = mat.node_tree
if tree.type != 'SHADER':
continue
for node in tree.nodes:
if node.bl_idname != 'ShaderNodeTexImage':
continue
# Check if image is not packed already
# and pack it if not.
if node.image and node.image.packed_file is None:
node.image.pack()
bpy.data.libraries.write(filepath, data_blocks)
@ -52,5 +61,5 @@ class ExtractBlend(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {representation}")

View file

@ -17,7 +17,10 @@ class ExtractBlendAnimation(publish.Extractor):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
@ -50,5 +53,5 @@ class ExtractBlendAnimation(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {representation}")

View file

@ -18,7 +18,10 @@ class ExtractCameraABC(publish.Extractor):
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.abc"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.abc"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
@ -64,5 +67,5 @@ class ExtractCameraABC(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {representation}")

View file

@ -17,7 +17,10 @@ class ExtractCamera(publish.Extractor):
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.fbx"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.fbx"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
@ -73,5 +76,5 @@ class ExtractCamera(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {representation}")

View file

@ -18,7 +18,10 @@ class ExtractFBX(publish.Extractor):
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.fbx"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.fbx"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
@ -84,5 +87,5 @@ class ExtractFBX(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {representation}")

View file

@ -86,7 +86,10 @@ class ExtractAnimationFBX(publish.Extractor):
asset_group.select_set(True)
armature.select_set(True)
fbx_filename = f"{instance.name}_{armature.name}.fbx"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
fbx_filename = f"{instance_name}_{armature.name}.fbx"
filepath = os.path.join(stagingdir, fbx_filename)
override = plugin.create_blender_context(
@ -119,7 +122,7 @@ class ExtractAnimationFBX(publish.Extractor):
pair[1].user_clear()
bpy.data.actions.remove(pair[1])
json_filename = f"{instance.name}.json"
json_filename = f"{instance_name}.json"
json_path = os.path.join(stagingdir, json_filename)
json_dict = {
@ -158,5 +161,5 @@ class ExtractAnimationFBX(publish.Extractor):
instance.data["representations"].append(fbx_representation)
instance.data["representations"].append(json_representation)
self.log.info("Extracted instance '{}' to: {}".format(
instance.name, fbx_representation))
self.log.info(
f"Extracted instance '{instance_name}' to: {fbx_representation}")

View file

@ -212,7 +212,11 @@ class ExtractLayout(publish.Extractor):
json_data.append(json_element)
json_filename = "{}.json".format(instance.name)
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
json_filename = f"{instance_name}.json"
json_path = os.path.join(stagingdir, json_filename)
with open(json_path, "w+") as file:
@ -245,5 +249,5 @@ class ExtractLayout(publish.Extractor):
}
instance.data["representations"].append(fbx_representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, json_representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {json_representation}")

View file

@ -50,7 +50,10 @@ class ExtractPlayblast(publish.Extractor):
# get output path
stagingdir = self.staging_dir(instance)
filename = instance.name
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
filename = f"{asset_name}_{subset}"
path = os.path.join(stagingdir, filename)
self.log.debug(f"Outputting images to {path}")

View file

@ -27,7 +27,10 @@ class ExtractThumbnail(publish.Extractor):
self.log.debug("Extracting capture..")
stagingdir = self.staging_dir(instance)
filename = instance.name
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
filename = f"{asset_name}_{subset}"
path = os.path.join(stagingdir, filename)
self.log.debug(f"Outputting images to {path}")

View file

@ -774,7 +774,8 @@ class ReferenceLoader(Loader):
"ma": "mayaAscii",
"mb": "mayaBinary",
"abc": "Alembic",
"fbx": "FBX"
"fbx": "FBX",
"usd": "USD Import"
}.get(representation["name"])
assert file_type, "Unsupported representation: %s" % representation

View file

@ -1,7 +1,9 @@
import os
import difflib
import contextlib
from maya import cmds
import qargparse
from openpype.settings import get_project_settings
import openpype.hosts.maya.api.plugin
@ -128,6 +130,12 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
if not attach_to_root:
group_name = namespace
kwargs = {}
if "file_options" in options:
kwargs["options"] = options["file_options"]
if "file_type" in options:
kwargs["type"] = options["file_type"]
path = self.filepath_from_context(context)
with maintained_selection():
cmds.loadPlugin("AbcImport.mll", quiet=True)
@ -139,7 +147,8 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
reference=True,
returnNewNodes=True,
groupReference=attach_to_root,
groupName=group_name)
groupName=group_name,
**kwargs)
shapes = cmds.ls(nodes, shapes=True, long=True)
@ -251,3 +260,92 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
else:
self.log.warning("This version of Maya does not support locking of"
" transforms of cameras.")
class MayaUSDReferenceLoader(ReferenceLoader):
"""Reference USD file to native Maya nodes using MayaUSDImport reference"""
families = ["usd"]
representations = ["usd"]
extensions = {"usd", "usda", "usdc"}
options = ReferenceLoader.options + [
qargparse.Boolean(
"readAnimData",
label="Load anim data",
default=True,
help="Load animation data from USD file"
),
qargparse.Boolean(
"useAsAnimationCache",
label="Use as animation cache",
default=True,
help=(
"Imports geometry prims with time-sampled point data using a "
"point-based deformer that references the imported "
"USD file.\n"
"This provides better import and playback performance when "
"importing time-sampled geometry from USD, and should "
"reduce the weight of the resulting Maya scene."
)
),
qargparse.Boolean(
"importInstances",
label="Import instances",
default=True,
help=(
"Import USD instanced geometries as Maya instanced shapes. "
"Will flatten the scene otherwise."
)
),
qargparse.String(
"primPath",
label="Prim Path",
default="/",
help=(
"Name of the USD scope where traversing will begin.\n"
"The prim at the specified primPath (including the prim) will "
"be imported.\n"
"Specifying the pseudo-root (/) means you want "
"to import everything in the file.\n"
"If the passed prim path is empty, it will first try to "
"import the defaultPrim for the rootLayer if it exists.\n"
"Otherwise, it will behave as if the pseudo-root was passed "
"in."
)
)
]
file_type = "USD Import"
def process_reference(self, context, name, namespace, options):
cmds.loadPlugin("mayaUsdPlugin", quiet=True)
def bool_option(key, default):
# Shorthand for getting optional boolean file option from options
value = int(bool(options.get(key, default)))
return "{}={}".format(key, value)
def string_option(key, default):
# Shorthand for getting optional string file option from options
value = str(options.get(key, default))
return "{}={}".format(key, value)
options["file_options"] = ";".join([
string_option("primPath", default="/"),
bool_option("importInstances", default=True),
bool_option("useAsAnimationCache", default=True),
bool_option("readAnimData", default=True),
# TODO: Expose more parameters
# "preferredMaterial=none",
# "importRelativeTextures=Automatic",
# "useCustomFrameRange=0",
# "startTime=0",
# "endTime=0",
# "importUSDZTextures=0"
])
options["file_type"] = self.file_type
return super(MayaUSDReferenceLoader, self).process_reference(
context, name, namespace, options
)

View file

@ -43,7 +43,7 @@ from . import (
_is_installed = False
_process_id = None
_registered_root = {"_": ""}
_registered_root = {"_": {}}
_registered_host = {"_": None}
# Keep modules manager (and it's modules) in memory
# - that gives option to register modules' callbacks
@ -84,15 +84,22 @@ def register_root(path):
def registered_root():
"""Return currently registered root"""
root = _registered_root["_"]
if root:
return root
"""Return registered roots from current project anatomy.
root = legacy_io.Session.get("AVALON_PROJECTS")
if root:
return os.path.normpath(root)
return ""
Consider this does return roots only for current project and current
platforms, only if host was installer using 'install_host'.
Deprecated:
Please use project 'Anatomy' to get roots. This function is still used
at current core functions of load logic, but that will change
in future and this function will be removed eventually. Using this
function at new places can cause problems in the future.
Returns:
dict[str, str]: Root paths.
"""
return _registered_root["_"]
def install_host(host):

View file

@ -2255,11 +2255,11 @@ class CreateContext:
if task_name:
task_names_by_asset_name[asset_name].add(task_name)
asset_names = [
asset_names = {
asset_name
for asset_name in task_names_by_asset_name.keys()
if asset_name is not None
]
}
fields = {"name", "data.tasks"}
if AYON_SERVER_ENABLED:
fields |= {"data.parents"}
@ -2270,10 +2270,12 @@ class CreateContext:
))
task_names_by_asset_name = {}
asset_docs_by_name = collections.defaultdict(list)
for asset_doc in asset_docs:
asset_name = get_asset_name_identifier(asset_doc)
tasks = asset_doc.get("data", {}).get("tasks") or {}
task_names_by_asset_name[asset_name] = set(tasks.keys())
asset_docs_by_name[asset_doc["name"]].append(asset_doc)
for instance in instances:
if not instance.has_valid_asset or not instance.has_valid_task:
@ -2281,6 +2283,11 @@ class CreateContext:
if AYON_SERVER_ENABLED:
asset_name = instance["folderPath"]
if "/" not in asset_name:
asset_docs = asset_docs_by_name.get(asset_name)
if len(asset_docs) == 1:
asset_name = get_asset_name_identifier(asset_docs[0])
instance["folderPath"] = asset_name
else:
asset_name = instance["asset"]

View file

@ -30,7 +30,7 @@ def install():
session = session_data_from_environment(context_keys=True)
session["schema"] = "openpype:session-3.0"
session["schema"] = "openpype:session-4.0"
try:
schema.validate(session)
except schema.ValidationError as e:

View file

@ -62,8 +62,6 @@ def auto_reconnect(func):
SESSION_CONTEXT_KEYS = (
# Root directory of projects on disk
"AVALON_PROJECTS",
# Name of current Project
"AVALON_PROJECT",
# Name of current Asset

View file

@ -0,0 +1,61 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "openpype:session-4.0",
"description": "The Avalon environment",
"type": "object",
"additionalProperties": true,
"required": [
"AVALON_PROJECT"
],
"properties": {
"AVALON_PROJECT": {
"description": "Name of project",
"type": "string",
"pattern": "^\\w*$",
"example": "Hulk"
},
"AVALON_ASSET": {
"description": "Name of asset",
"type": "string",
"pattern": "^[\\/\\w]*$",
"example": "Bruce"
},
"AVALON_TASK": {
"description": "Name of task",
"type": "string",
"pattern": "^\\w*$",
"example": "modeling"
},
"AVALON_APP": {
"description": "Name of host",
"type": "string",
"pattern": "^\\w*$",
"example": "maya"
},
"AVALON_DB": {
"description": "Name of database",
"type": "string",
"pattern": "^\\w*$",
"example": "avalon",
"default": "avalon"
},
"AVALON_LABEL": {
"description": "Nice name of Avalon, used in e.g. graphical user interfaces",
"type": "string",
"example": "MyLabel",
"default": "Avalon"
},
"AVALON_TIMEOUT": {
"description": "Wherever there is a need for a timeout, this is the default value.",
"type": "string",
"pattern": "^[0-9]*$",
"default": "1000",
"example": "1000"
}
}
}

View file

@ -1021,10 +1021,14 @@ def _convert_traypublisher_project_settings(ayon_settings, output):
item["family"] = item.pop("product_type")
shot_add_tasks = ayon_editorial_simple["shot_add_tasks"]
# TODO: backward compatibility and remove in future
if isinstance(shot_add_tasks, dict):
shot_add_tasks = []
# aggregate shot_add_tasks items
new_shot_add_tasks = {
item["name"]: item["task_type"]
item["name"]: {"type": item["task_type"]}
for item in shot_add_tasks
}
ayon_editorial_simple["shot_add_tasks"] = new_shot_add_tasks

View file

@ -580,6 +580,10 @@ class AssetsField(BaseClickableFrame):
"""Change to asset names set with last `set_selected_items` call."""
self.set_selected_items(self._origin_value)
def confirm_value(self):
self._origin_value = copy.deepcopy(self._selected_items)
self._has_value_changed = False
class TasksComboboxProxy(QtCore.QSortFilterProxyModel):
def __init__(self, *args, **kwargs):
@ -786,6 +790,15 @@ class TasksCombobox(QtWidgets.QComboBox):
self._set_is_valid(is_valid)
def confirm_value(self, asset_names):
new_task_name = self._selected_items[0]
self._origin_value = [
(asset_name, new_task_name)
for asset_name in asset_names
]
self._origin_selection = copy.deepcopy(self._selected_items)
self._has_value_changed = False
def set_selected_items(self, asset_task_combinations=None):
"""Set items for selected instances.
@ -920,6 +933,10 @@ class VariantInputWidget(PlaceholderLineEdit):
"""Change text of multiselection."""
self._multiselection_text = text
def confirm_value(self):
self._origin_value = copy.deepcopy(self._current_value)
self._has_value_changed = False
def _set_is_valid(self, valid):
if valid == self._is_valid:
return
@ -1111,6 +1128,7 @@ class GlobalAttrsWidget(QtWidgets.QWidget):
btns_layout = QtWidgets.QHBoxLayout()
btns_layout.setContentsMargins(0, 0, 0, 0)
btns_layout.addStretch(1)
btns_layout.setSpacing(5)
btns_layout.addWidget(submit_btn)
btns_layout.addWidget(cancel_btn)
@ -1161,6 +1179,7 @@ class GlobalAttrsWidget(QtWidgets.QWidget):
subset_names = set()
invalid_tasks = False
asset_names = []
for instance in self._current_instances:
new_variant_value = instance.get("variant")
if AYON_SERVER_ENABLED:
@ -1177,6 +1196,7 @@ class GlobalAttrsWidget(QtWidgets.QWidget):
if task_name is not None:
new_task_name = task_name
asset_names.append(new_asset_name)
try:
new_subset_name = self._controller.get_subset_name(
instance.creator_identifier,
@ -1218,6 +1238,15 @@ class GlobalAttrsWidget(QtWidgets.QWidget):
self._set_btns_enabled(False)
self._set_btns_visible(invalid_tasks)
if variant_value is not None:
self.variant_input.confirm_value()
if asset_name is not None:
self.asset_value_widget.confirm_value()
if task_name is not None:
self.task_value_widget.confirm_value(asset_names)
self.instance_context_changed.emit()
def _on_cancel(self):

View file

@ -15,6 +15,7 @@ from openpype.tools.utils import (
MessageOverlayObject,
PixmapLabel,
)
from openpype.tools.utils.lib import center_window
from .constants import ResetKeySequence
from .publish_report_viewer import PublishReportViewerWidget
@ -529,6 +530,7 @@ class PublisherWindow(QtWidgets.QDialog):
def _on_first_show(self):
self.resize(self.default_width, self.default_height)
self.setStyleSheet(style.load_stylesheet())
center_window(self)
self._reset_on_show = self._reset_on_first_show
def _on_show_timer(self):

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
__version__ = "3.17.5"
__version__ = "3.17.6-nightly.1"

View file

@ -5,19 +5,17 @@ from ayon_server.settings import BaseSettingsModel, task_types_enum
class ClipNameTokenizerItem(BaseSettingsModel):
_layout = "expanded"
# TODO was 'dict-modifiable', is list of dicts now, must be fixed in code
name: str = Field("#TODO", title="Tokenizer name")
name: str = Field("", title="Tokenizer name")
regex: str = Field("", title="Tokenizer regex")
class ShotAddTasksItem(BaseSettingsModel):
_layout = "expanded"
# TODO was 'dict-modifiable', is list of dicts now, must be fixed in code
name: str = Field('', title="Key")
task_type: list[str] = Field(
task_type: str = Field(
title="Task type",
default_factory=list,
enum_resolver=task_types_enum)
enum_resolver=task_types_enum
)
class ShotRenameSubmodel(BaseSettingsModel):
@ -54,7 +52,7 @@ class TokenToParentConvertorItem(BaseSettingsModel):
)
class ShotHierchySubmodel(BaseSettingsModel):
class ShotHierarchySubmodel(BaseSettingsModel):
enabled: bool = True
parents_path: str = Field(
"",
@ -102,9 +100,9 @@ class EditorialSimpleCreatorPlugin(BaseSettingsModel):
title="Shot Rename",
default_factory=ShotRenameSubmodel
)
shot_hierarchy: ShotHierchySubmodel = Field(
shot_hierarchy: ShotHierarchySubmodel = Field(
title="Shot Hierarchy",
default_factory=ShotHierchySubmodel
default_factory=ShotHierarchySubmodel
)
shot_add_tasks: list[ShotAddTasksItem] = Field(
title="Add tasks to shot",

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring addon version."""
__version__ = "0.1.2"
__version__ = "0.1.3"