mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 12:54:40 +01:00
Merge branch 'develop' into enhancement/AY-978_hiero-colorspace-settings-aligned-with-nuke
# Conflicts: # server_addon/hiero/server/version.py # server_addon/nuke/server/version.py
This commit is contained in:
commit
1e997ddc39
187 changed files with 3462 additions and 1214 deletions
|
|
@ -15,6 +15,7 @@ from abc import ABCMeta, abstractmethod
|
|||
import six
|
||||
import appdirs
|
||||
import ayon_api
|
||||
from semver import VersionInfo
|
||||
|
||||
from ayon_core import AYON_CORE_ROOT
|
||||
from ayon_core.lib import Logger, is_dev_mode_enabled
|
||||
|
|
@ -46,6 +47,11 @@ IGNORED_HOSTS_IN_AYON = {
|
|||
}
|
||||
IGNORED_MODULES_IN_AYON = set()
|
||||
|
||||
# When addon was moved from ayon-core codebase
|
||||
# - this is used to log the missing addon
|
||||
MOVED_ADDON_MILESTONE_VERSIONS = {
|
||||
"applications": VersionInfo(0, 2, 0),
|
||||
}
|
||||
|
||||
# Inherit from `object` for Python 2 hosts
|
||||
class _ModuleClass(object):
|
||||
|
|
@ -192,6 +198,45 @@ def _get_ayon_addons_information(bundle_info):
|
|||
return output
|
||||
|
||||
|
||||
def _handle_moved_addons(addon_name, milestone_version, log):
|
||||
"""Log message that addon version is not compatible with current core.
|
||||
|
||||
The function can return path to addon client code, but that can happen
|
||||
only if ayon-core is used from code (for development), but still
|
||||
logs a warning.
|
||||
|
||||
Args:
|
||||
addon_name (str): Addon name.
|
||||
milestone_version (str): Milestone addon version.
|
||||
log (logging.Logger): Logger object.
|
||||
|
||||
Returns:
|
||||
Union[str, None]: Addon dir or None.
|
||||
"""
|
||||
# Handle addons which were moved out of ayon-core
|
||||
# - Try to fix it by loading it directly from server addons dir in
|
||||
# ayon-core repository. But that will work only if ayon-core is
|
||||
# used from code.
|
||||
addon_dir = os.path.join(
|
||||
os.path.dirname(os.path.dirname(AYON_CORE_ROOT)),
|
||||
"server_addon",
|
||||
addon_name,
|
||||
"client",
|
||||
)
|
||||
if not os.path.exists(addon_dir):
|
||||
log.error((
|
||||
"Addon '{}' is not be available."
|
||||
" Please update applications addon to '{}' or higher."
|
||||
).format(addon_name, milestone_version))
|
||||
return None
|
||||
|
||||
log.warning((
|
||||
"Please update '{}' addon to '{}' or higher."
|
||||
" Using client code from ayon-core repository."
|
||||
).format(addon_name, milestone_version))
|
||||
return addon_dir
|
||||
|
||||
|
||||
def _load_ayon_addons(openpype_modules, modules_key, log):
|
||||
"""Load AYON addons based on information from server.
|
||||
|
||||
|
|
@ -249,6 +294,7 @@ def _load_ayon_addons(openpype_modules, modules_key, log):
|
|||
use_dev_path = dev_addon_info.get("enabled", False)
|
||||
|
||||
addon_dir = None
|
||||
milestone_version = MOVED_ADDON_MILESTONE_VERSIONS.get(addon_name)
|
||||
if use_dev_path:
|
||||
addon_dir = dev_addon_info["path"]
|
||||
if not addon_dir or not os.path.exists(addon_dir):
|
||||
|
|
@ -257,6 +303,16 @@ def _load_ayon_addons(openpype_modules, modules_key, log):
|
|||
).format(addon_name, addon_version, addon_dir))
|
||||
continue
|
||||
|
||||
elif (
|
||||
milestone_version is not None
|
||||
and VersionInfo.parse(addon_version) < milestone_version
|
||||
):
|
||||
addon_dir = _handle_moved_addons(
|
||||
addon_name, milestone_version, log
|
||||
)
|
||||
if not addon_dir:
|
||||
continue
|
||||
|
||||
elif addons_dir_exists:
|
||||
folder_name = "{}_{}".format(addon_name, addon_version)
|
||||
addon_dir = os.path.join(addons_dir, folder_name)
|
||||
|
|
@ -336,66 +392,9 @@ def _load_ayon_addons(openpype_modules, modules_key, log):
|
|||
return addons_to_skip_in_core
|
||||
|
||||
|
||||
def _load_ayon_core_addons_dir(
|
||||
ignore_addon_names, openpype_modules, modules_key, log
|
||||
):
|
||||
addons_dir = os.path.join(AYON_CORE_ROOT, "addons")
|
||||
if not os.path.exists(addons_dir):
|
||||
return
|
||||
|
||||
imported_modules = []
|
||||
|
||||
# Make sure that addons which already have client code are not loaded
|
||||
# from core again, with older code
|
||||
filtered_paths = []
|
||||
for name in os.listdir(addons_dir):
|
||||
if name in ignore_addon_names:
|
||||
continue
|
||||
path = os.path.join(addons_dir, name)
|
||||
if os.path.isdir(path):
|
||||
filtered_paths.append(path)
|
||||
|
||||
for path in filtered_paths:
|
||||
while path in sys.path:
|
||||
sys.path.remove(path)
|
||||
sys.path.insert(0, path)
|
||||
|
||||
for name in os.listdir(path):
|
||||
fullpath = os.path.join(path, name)
|
||||
if os.path.isfile(fullpath):
|
||||
basename, ext = os.path.splitext(name)
|
||||
if ext != ".py":
|
||||
continue
|
||||
else:
|
||||
basename = name
|
||||
try:
|
||||
module = __import__(basename, fromlist=("",))
|
||||
for attr_name in dir(module):
|
||||
attr = getattr(module, attr_name)
|
||||
if (
|
||||
inspect.isclass(attr)
|
||||
and issubclass(attr, AYONAddon)
|
||||
):
|
||||
new_import_str = "{}.{}".format(modules_key, basename)
|
||||
sys.modules[new_import_str] = module
|
||||
setattr(openpype_modules, basename, module)
|
||||
imported_modules.append(module)
|
||||
break
|
||||
|
||||
except Exception:
|
||||
log.error(
|
||||
"Failed to import addon '{}'.".format(fullpath),
|
||||
exc_info=True
|
||||
)
|
||||
return imported_modules
|
||||
|
||||
|
||||
def _load_addons_in_core(
|
||||
ignore_addon_names, openpype_modules, modules_key, log
|
||||
):
|
||||
_load_ayon_core_addons_dir(
|
||||
ignore_addon_names, openpype_modules, modules_key, log
|
||||
)
|
||||
# Add current directory at first place
|
||||
# - has small differences in import logic
|
||||
hosts_dir = os.path.join(AYON_CORE_ROOT, "hosts")
|
||||
|
|
|
|||
|
|
@ -41,7 +41,6 @@ class CollectAERender(publish.AbstractCollectRender):
|
|||
|
||||
def get_instances(self, context):
|
||||
instances = []
|
||||
instances_to_remove = []
|
||||
|
||||
app_version = CollectAERender.get_stub().get_app_version()
|
||||
app_version = app_version[0:4]
|
||||
|
|
@ -117,7 +116,10 @@ class CollectAERender(publish.AbstractCollectRender):
|
|||
fps=fps,
|
||||
app_version=app_version,
|
||||
publish_attributes=inst.data.get("publish_attributes", {}),
|
||||
file_names=[item.file_name for item in render_q]
|
||||
file_names=[item.file_name for item in render_q],
|
||||
|
||||
# The source instance this render instance replaces
|
||||
source_instance=inst
|
||||
)
|
||||
|
||||
comp = compositions_by_id.get(comp_id)
|
||||
|
|
@ -145,10 +147,7 @@ class CollectAERender(publish.AbstractCollectRender):
|
|||
instance.families.remove("review")
|
||||
|
||||
instances.append(instance)
|
||||
instances_to_remove.append(inst)
|
||||
|
||||
for instance in instances_to_remove:
|
||||
context.remove(instance)
|
||||
return instances
|
||||
|
||||
def get_expected_files(self, render_instance):
|
||||
|
|
|
|||
|
|
@ -55,8 +55,7 @@ class BlenderAddon(AYONAddon, IHostAddon):
|
|||
)
|
||||
|
||||
# Define Qt binding if not defined
|
||||
if not env.get("QT_PREFERRED_BINDING"):
|
||||
env["QT_PREFERRED_BINDING"] = "PySide2"
|
||||
env.pop("QT_PREFERRED_BINDING", None)
|
||||
|
||||
def get_launch_hook_paths(self, app):
|
||||
if app.host_name != self.host_name:
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ class InstallPySideToBlender(PreLaunchHook):
|
|||
|
||||
def inner_execute(self):
|
||||
# Get blender's python directory
|
||||
version_regex = re.compile(r"^[2-4]\.[0-9]+$")
|
||||
version_regex = re.compile(r"^([2-4])\.[0-9]+$")
|
||||
|
||||
platform = system().lower()
|
||||
executable = self.launch_context.executable.executable_path
|
||||
|
|
@ -42,7 +42,8 @@ class InstallPySideToBlender(PreLaunchHook):
|
|||
if os.path.basename(executable).lower() != expected_executable:
|
||||
self.log.info((
|
||||
f"Executable does not lead to {expected_executable} file."
|
||||
"Can't determine blender's python to check/install PySide2."
|
||||
"Can't determine blender's python to check/install"
|
||||
" Qt binding."
|
||||
))
|
||||
return
|
||||
|
||||
|
|
@ -73,6 +74,15 @@ class InstallPySideToBlender(PreLaunchHook):
|
|||
return
|
||||
|
||||
version_subfolder = version_subfolders[0]
|
||||
before_blender_4 = False
|
||||
if int(version_regex.match(version_subfolder).group(1)) < 4:
|
||||
before_blender_4 = True
|
||||
# Blender 4 has Python 3.11 which does not support 'PySide2'
|
||||
# QUESTION could we always install PySide6?
|
||||
qt_binding = "PySide2" if before_blender_4 else "PySide6"
|
||||
# Use PySide6 6.6.3 because 6.7.0 had a bug
|
||||
# - 'QTextEdit' can't be added to 'QBoxLayout'
|
||||
qt_binding_version = None if before_blender_4 else "6.6.3"
|
||||
|
||||
python_dir = os.path.join(versions_dir, version_subfolder, "python")
|
||||
python_lib = os.path.join(python_dir, "lib")
|
||||
|
|
@ -116,22 +126,41 @@ class InstallPySideToBlender(PreLaunchHook):
|
|||
return
|
||||
|
||||
# Check if PySide2 is installed and skip if yes
|
||||
if self.is_pyside_installed(python_executable):
|
||||
if self.is_pyside_installed(python_executable, qt_binding):
|
||||
self.log.debug("Blender has already installed PySide2.")
|
||||
return
|
||||
|
||||
# Install PySide2 in blender's python
|
||||
if platform == "windows":
|
||||
result = self.install_pyside_windows(python_executable)
|
||||
result = self.install_pyside_windows(
|
||||
python_executable,
|
||||
qt_binding,
|
||||
qt_binding_version,
|
||||
before_blender_4,
|
||||
)
|
||||
else:
|
||||
result = self.install_pyside(python_executable)
|
||||
result = self.install_pyside(
|
||||
python_executable,
|
||||
qt_binding,
|
||||
qt_binding_version,
|
||||
)
|
||||
|
||||
if result:
|
||||
self.log.info("Successfully installed PySide2 module to blender.")
|
||||
self.log.info(
|
||||
f"Successfully installed {qt_binding} module to blender."
|
||||
)
|
||||
else:
|
||||
self.log.warning("Failed to install PySide2 module to blender.")
|
||||
self.log.warning(
|
||||
f"Failed to install {qt_binding} module to blender."
|
||||
)
|
||||
|
||||
def install_pyside_windows(self, python_executable):
|
||||
def install_pyside_windows(
|
||||
self,
|
||||
python_executable,
|
||||
qt_binding,
|
||||
qt_binding_version,
|
||||
before_blender_4,
|
||||
):
|
||||
"""Install PySide2 python module to blender's python.
|
||||
|
||||
Installation requires administration rights that's why it is required
|
||||
|
|
@ -139,7 +168,6 @@ class InstallPySideToBlender(PreLaunchHook):
|
|||
administration rights.
|
||||
"""
|
||||
try:
|
||||
import win32api
|
||||
import win32con
|
||||
import win32process
|
||||
import win32event
|
||||
|
|
@ -150,12 +178,37 @@ class InstallPySideToBlender(PreLaunchHook):
|
|||
self.log.warning("Couldn't import \"pywin32\" modules")
|
||||
return
|
||||
|
||||
if qt_binding_version:
|
||||
qt_binding = f"{qt_binding}=={qt_binding_version}"
|
||||
|
||||
try:
|
||||
# Parameters
|
||||
# - use "-m pip" as module pip to install PySide2 and argument
|
||||
# "--ignore-installed" is to force install module to blender's
|
||||
# site-packages and make sure it is binary compatible
|
||||
parameters = "-m pip install --ignore-installed PySide2"
|
||||
fake_exe = "fake.exe"
|
||||
site_packages_prefix = os.path.dirname(
|
||||
os.path.dirname(python_executable)
|
||||
)
|
||||
args = [
|
||||
fake_exe,
|
||||
"-m",
|
||||
"pip",
|
||||
"install",
|
||||
"--ignore-installed",
|
||||
qt_binding,
|
||||
]
|
||||
if not before_blender_4:
|
||||
# Define prefix for site package
|
||||
# Python in blender 4.x is installing packages in AppData and
|
||||
# not in blender's directory.
|
||||
args.extend(["--prefix", site_packages_prefix])
|
||||
|
||||
parameters = (
|
||||
subprocess.list2cmdline(args)
|
||||
.lstrip(fake_exe)
|
||||
.lstrip(" ")
|
||||
)
|
||||
|
||||
# Execute command and ask for administrator's rights
|
||||
process_info = ShellExecuteEx(
|
||||
|
|
@ -173,20 +226,29 @@ class InstallPySideToBlender(PreLaunchHook):
|
|||
except pywintypes.error:
|
||||
pass
|
||||
|
||||
def install_pyside(self, python_executable):
|
||||
"""Install PySide2 python module to blender's python."""
|
||||
def install_pyside(
|
||||
self,
|
||||
python_executable,
|
||||
qt_binding,
|
||||
qt_binding_version,
|
||||
):
|
||||
"""Install Qt binding python module to blender's python."""
|
||||
if qt_binding_version:
|
||||
qt_binding = f"{qt_binding}=={qt_binding_version}"
|
||||
try:
|
||||
# Parameters
|
||||
# - use "-m pip" as module pip to install PySide2 and argument
|
||||
# - use "-m pip" as module pip to install qt binding and argument
|
||||
# "--ignore-installed" is to force install module to blender's
|
||||
# site-packages and make sure it is binary compatible
|
||||
# TODO find out if blender 4.x on linux/darwin does install
|
||||
# qt binding to correct place.
|
||||
args = [
|
||||
python_executable,
|
||||
"-m",
|
||||
"pip",
|
||||
"install",
|
||||
"--ignore-installed",
|
||||
"PySide2",
|
||||
qt_binding,
|
||||
]
|
||||
process = subprocess.Popen(
|
||||
args, stdout=subprocess.PIPE, universal_newlines=True
|
||||
|
|
@ -203,13 +265,15 @@ class InstallPySideToBlender(PreLaunchHook):
|
|||
except subprocess.SubprocessError:
|
||||
pass
|
||||
|
||||
def is_pyside_installed(self, python_executable):
|
||||
def is_pyside_installed(self, python_executable, qt_binding):
|
||||
"""Check if PySide2 module is in blender's pip list.
|
||||
|
||||
Check that PySide2 is installed directly in blender's site-packages.
|
||||
It is possible that it is installed in user's site-packages but that
|
||||
may be incompatible with blender's python.
|
||||
"""
|
||||
|
||||
qt_binding_low = qt_binding.lower()
|
||||
# Get pip list from blender's python executable
|
||||
args = [python_executable, "-m", "pip", "list"]
|
||||
process = subprocess.Popen(args, stdout=subprocess.PIPE)
|
||||
|
|
@ -226,6 +290,6 @@ class InstallPySideToBlender(PreLaunchHook):
|
|||
if not line:
|
||||
continue
|
||||
package_name = line[0:package_len].strip()
|
||||
if package_name.lower() == "pyside2":
|
||||
if package_name.lower() == qt_binding_low:
|
||||
return True
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -167,7 +167,7 @@ class JsonLayoutLoader(plugin.AssetLoader):
|
|||
asset_group.empty_display_type = 'SINGLE_ARROW'
|
||||
avalon_container.objects.link(asset_group)
|
||||
|
||||
self._process(libpath, asset, asset_group, None)
|
||||
self._process(libpath, asset_name, asset_group, None)
|
||||
|
||||
bpy.context.scene.collection.objects.link(asset_group)
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
|
||||
import bpy
|
||||
|
||||
from ayon_core.lib import BoolDef
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_core.hosts.blender.api import plugin
|
||||
|
||||
|
|
@ -17,6 +18,8 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
|
|||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
|
||||
# Define extract output file path
|
||||
stagingdir = self.staging_dir(instance)
|
||||
folder_name = instance.data["folderEntity"]["name"]
|
||||
|
|
@ -46,7 +49,8 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
|
|||
bpy.ops.wm.alembic_export(
|
||||
filepath=filepath,
|
||||
selected=True,
|
||||
flatten=False
|
||||
flatten=False,
|
||||
subdiv_schema=attr_values.get("subdiv_schema", False)
|
||||
)
|
||||
|
||||
plugin.deselect_all()
|
||||
|
|
@ -65,6 +69,21 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
|
|||
self.log.debug("Extracted instance '%s' to: %s",
|
||||
instance.name, representation)
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
return [
|
||||
BoolDef(
|
||||
"subdiv_schema",
|
||||
label="Alembic Mesh Subdiv Schema",
|
||||
tooltip="Export Meshes using Alembic's subdivision schema.\n"
|
||||
"Enabling this includes creases with the export but "
|
||||
"excludes the mesh's normals.\n"
|
||||
"Enabling this usually result in smaller file size "
|
||||
"due to lack of normals.",
|
||||
default=False
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
class ExtractModelABC(ExtractABC):
|
||||
"""Extract model as ABC."""
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@ import sys
|
|||
import re
|
||||
import contextlib
|
||||
|
||||
from ayon_core.lib import Logger
|
||||
|
||||
from ayon_core.lib import Logger, BoolDef, UILabelDef
|
||||
from ayon_core.style import load_stylesheet
|
||||
from ayon_core.pipeline import registered_host
|
||||
from ayon_core.pipeline.create import CreateContext
|
||||
from ayon_core.pipeline.context_tools import get_current_folder_entity
|
||||
|
|
@ -181,7 +181,6 @@ def validate_comp_prefs(comp=None, force_repair=False):
|
|||
|
||||
from . import menu
|
||||
from ayon_core.tools.utils import SimplePopup
|
||||
from ayon_core.style import load_stylesheet
|
||||
dialog = SimplePopup(parent=menu.menu)
|
||||
dialog.setWindowTitle("Fusion comp has invalid configuration")
|
||||
|
||||
|
|
@ -340,9 +339,7 @@ def prompt_reset_context():
|
|||
from ayon_core.tools.attribute_defs.dialog import (
|
||||
AttributeDefinitionsDialog
|
||||
)
|
||||
from ayon_core.style import load_stylesheet
|
||||
from ayon_core.lib import BoolDef, UILabelDef
|
||||
from qtpy import QtWidgets, QtCore
|
||||
from qtpy import QtCore
|
||||
|
||||
definitions = [
|
||||
UILabelDef(
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import os
|
||||
from ayon_core.lib import PreLaunchHook
|
||||
from ayon_applications import PreLaunchHook
|
||||
from ayon_core.hosts.fusion import FUSION_HOST_DIR
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -85,7 +85,6 @@ class InstallPySideToFusion(PreLaunchHook):
|
|||
administration rights.
|
||||
"""
|
||||
try:
|
||||
import win32api
|
||||
import win32con
|
||||
import win32process
|
||||
import win32event
|
||||
|
|
|
|||
|
|
@ -37,14 +37,13 @@ class CollectFusionRender(
|
|||
aspect_x = comp_frame_format_prefs["AspectX"]
|
||||
aspect_y = comp_frame_format_prefs["AspectY"]
|
||||
|
||||
instances = []
|
||||
instances_to_remove = []
|
||||
|
||||
current_file = context.data["currentFile"]
|
||||
version = context.data["version"]
|
||||
|
||||
project_entity = context.data["projectEntity"]
|
||||
|
||||
instances = []
|
||||
for inst in context:
|
||||
if not inst.data.get("active", True):
|
||||
continue
|
||||
|
|
@ -91,7 +90,10 @@ class CollectFusionRender(
|
|||
frameStep=1,
|
||||
fps=comp_frame_format_prefs.get("Rate"),
|
||||
app_version=comp.GetApp().Version,
|
||||
publish_attributes=inst.data.get("publish_attributes", {})
|
||||
publish_attributes=inst.data.get("publish_attributes", {}),
|
||||
|
||||
# The source instance this render instance replaces
|
||||
source_instance=inst
|
||||
)
|
||||
|
||||
render_target = inst.data["creator_attributes"]["render_target"]
|
||||
|
|
@ -114,13 +116,7 @@ class CollectFusionRender(
|
|||
# to skip ExtractReview locally
|
||||
instance.families.remove("review")
|
||||
|
||||
# add new instance to the list and remove the original
|
||||
# instance since it is not needed anymore
|
||||
instances.append(instance)
|
||||
instances_to_remove.append(inst)
|
||||
|
||||
for instance in instances_to_remove:
|
||||
context.remove(instance)
|
||||
|
||||
return instances
|
||||
|
||||
|
|
|
|||
|
|
@ -92,10 +92,6 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
folder_path, folder_name = self._get_folder_data(tag_data)
|
||||
|
||||
product_name = tag_data.get("productName")
|
||||
if product_name is None:
|
||||
product_name = tag_data["subset"]
|
||||
|
||||
families = [str(f) for f in tag_data["families"]]
|
||||
|
||||
# TODO: remove backward compatibility
|
||||
|
|
@ -293,7 +289,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
label += " {}".format(product_name)
|
||||
|
||||
data.update({
|
||||
"name": "{}_{}".format(folder_path, subset),
|
||||
"name": "{}_{}".format(folder_path, product_name),
|
||||
"label": label,
|
||||
"productName": product_name,
|
||||
"productType": product_type,
|
||||
|
|
|
|||
|
|
@ -811,6 +811,43 @@ def get_current_context_template_data_with_folder_attrs():
|
|||
return template_data
|
||||
|
||||
|
||||
def set_review_color_space(opengl_node, review_color_space="", log=None):
|
||||
"""Set ociocolorspace parameter for the given OpenGL node.
|
||||
|
||||
Set `ociocolorspace` parameter of the given OpenGl node
|
||||
to to the given review_color_space value.
|
||||
If review_color_space is empty, a default colorspace corresponding to
|
||||
the display & view of the current Houdini session will be used.
|
||||
|
||||
Args:
|
||||
opengl_node (hou.Node): ROP node to set its ociocolorspace parm.
|
||||
review_color_space (str): Colorspace value for ociocolorspace parm.
|
||||
log (logging.Logger): Logger to log to.
|
||||
"""
|
||||
|
||||
if log is None:
|
||||
log = self.log
|
||||
|
||||
# Set Color Correction parameter to OpenColorIO
|
||||
colorcorrect_parm = opengl_node.parm("colorcorrect")
|
||||
if colorcorrect_parm.eval() != 2:
|
||||
colorcorrect_parm.set(2)
|
||||
log.debug(
|
||||
"'Color Correction' parm on '{}' has been set to"
|
||||
" 'OpenColorIO'".format(opengl_node.path())
|
||||
)
|
||||
|
||||
opengl_node.setParms(
|
||||
{"ociocolorspace": review_color_space}
|
||||
)
|
||||
|
||||
log.debug(
|
||||
"'OCIO Colorspace' parm on '{}' has been set to "
|
||||
"the view color space '{}'"
|
||||
.format(opengl_node, review_color_space)
|
||||
)
|
||||
|
||||
|
||||
def get_context_var_changes():
|
||||
"""get context var changes."""
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,58 @@
|
|||
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||
|
||||
|
||||
class SetDefaultDisplayView(PreLaunchHook):
|
||||
"""Set default view and default display for houdini via OpenColorIO.
|
||||
|
||||
Houdini's defaultDisplay and defaultView are set by
|
||||
setting 'OCIO_ACTIVE_DISPLAYS' and 'OCIO_ACTIVE_VIEWS'
|
||||
environment variables respectively.
|
||||
|
||||
More info: https://www.sidefx.com/docs/houdini/io/ocio.html#set-up
|
||||
"""
|
||||
|
||||
app_groups = {"houdini"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
|
||||
OCIO = self.launch_context.env.get("OCIO")
|
||||
|
||||
# This is a cheap way to skip this hook if either global color
|
||||
# management or houdini color management was disabled because the
|
||||
# OCIO var would be set by the global OCIOEnvHook
|
||||
if not OCIO:
|
||||
return
|
||||
|
||||
houdini_color_settings = \
|
||||
self.data["project_settings"]["houdini"]["imageio"]["workfile"]
|
||||
|
||||
if not houdini_color_settings["enabled"]:
|
||||
self.log.info(
|
||||
"Houdini workfile color management is disabled."
|
||||
)
|
||||
return
|
||||
|
||||
# 'OCIO_ACTIVE_DISPLAYS', 'OCIO_ACTIVE_VIEWS' are checked
|
||||
# as Admins can add them in Ayon env vars or Ayon tools.
|
||||
|
||||
default_display = houdini_color_settings["default_display"]
|
||||
if default_display:
|
||||
# get 'OCIO_ACTIVE_DISPLAYS' value if exists.
|
||||
self._set_context_env("OCIO_ACTIVE_DISPLAYS", default_display)
|
||||
|
||||
default_view = houdini_color_settings["default_view"]
|
||||
if default_view:
|
||||
# get 'OCIO_ACTIVE_VIEWS' value if exists.
|
||||
self._set_context_env("OCIO_ACTIVE_VIEWS", default_view)
|
||||
|
||||
def _set_context_env(self, env_var, default_value):
|
||||
env_value = self.launch_context.env.get(env_var, "")
|
||||
new_value = ":".join(
|
||||
key for key in [default_value, env_value] if key
|
||||
)
|
||||
self.log.info(
|
||||
"Setting {} environment to: {}"
|
||||
.format(env_var, new_value)
|
||||
)
|
||||
self.launch_context.env[env_var] = new_value
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating alembic camera products."""
|
||||
from ayon_core.hosts.houdini.api import plugin
|
||||
from ayon_core.pipeline import CreatedInstance, CreatorError
|
||||
from ayon_core.pipeline import CreatorError
|
||||
|
||||
import hou
|
||||
|
||||
|
|
@ -23,7 +23,7 @@ class CreateAlembicCamera(plugin.HoudiniCreator):
|
|||
instance = super(CreateAlembicCamera, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
parms = {
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ class CreateArnoldAss(plugin.HoudiniCreator):
|
|||
instance = super(CreateArnoldAss, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: plugin.CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
instance = super(CreateArnoldRop, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: plugin.CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating pointcache bgeo files."""
|
||||
from ayon_core.hosts.houdini.api import plugin
|
||||
from ayon_core.pipeline import CreatedInstance, CreatorError
|
||||
from ayon_core.pipeline import CreatorError
|
||||
import hou
|
||||
from ayon_core.lib import EnumDef, BoolDef
|
||||
|
||||
|
|
@ -25,7 +25,7 @@ class CreateBGEO(plugin.HoudiniCreator):
|
|||
instance = super(CreateBGEO, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating composite sequences."""
|
||||
from ayon_core.hosts.houdini.api import plugin
|
||||
from ayon_core.pipeline import CreatedInstance, CreatorError
|
||||
from ayon_core.pipeline import CreatorError
|
||||
|
||||
import hou
|
||||
|
||||
|
|
@ -25,7 +25,7 @@ class CreateCompositeSequence(plugin.HoudiniCreator):
|
|||
instance = super(CreateCompositeSequence, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
filepath = "{}{}".format(
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ class CreateHDA(plugin.HoudiniCreator):
|
|||
instance = super(CreateHDA, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: plugin.CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
return instance
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin to create Karma ROP."""
|
||||
from ayon_core.hosts.houdini.api import plugin
|
||||
from ayon_core.pipeline import CreatedInstance
|
||||
from ayon_core.lib import BoolDef, EnumDef, NumberDef
|
||||
|
||||
|
||||
|
|
@ -25,7 +24,7 @@ class CreateKarmaROP(plugin.HoudiniCreator):
|
|||
instance = super(CreateKarmaROP, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating pointcache alembics."""
|
||||
from ayon_core.hosts.houdini.api import plugin
|
||||
from ayon_core.pipeline import CreatedInstance
|
||||
from ayon_core.lib import BoolDef
|
||||
|
||||
|
||||
|
|
@ -22,7 +21,7 @@ class CreateMantraIFD(plugin.HoudiniCreator):
|
|||
instance = super(CreateMantraIFD, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin to create Mantra ROP."""
|
||||
from ayon_core.hosts.houdini.api import plugin
|
||||
from ayon_core.pipeline import CreatedInstance
|
||||
from ayon_core.lib import EnumDef, BoolDef
|
||||
|
||||
|
||||
|
|
@ -28,7 +27,7 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
instance = super(CreateMantraROP, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating openGL reviews."""
|
||||
from ayon_core.hosts.houdini.api import plugin
|
||||
from ayon_core.hosts.houdini.api import lib, plugin
|
||||
from ayon_core.lib import EnumDef, BoolDef, NumberDef
|
||||
|
||||
import os
|
||||
|
|
@ -14,6 +14,13 @@ class CreateReview(plugin.HoudiniCreator):
|
|||
label = "Review"
|
||||
product_type = "review"
|
||||
icon = "video-camera"
|
||||
review_color_space = ""
|
||||
|
||||
def apply_settings(self, project_settings):
|
||||
super(CreateReview, self).apply_settings(project_settings)
|
||||
color_settings = project_settings["houdini"]["imageio"]["workfile"]
|
||||
if color_settings["enabled"]:
|
||||
self.review_color_space = color_settings.get("review_color_space")
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
|
||||
|
|
@ -85,10 +92,20 @@ class CreateReview(plugin.HoudiniCreator):
|
|||
|
||||
instance_node.setParms(parms)
|
||||
|
||||
# Set OCIO Colorspace to the default output colorspace
|
||||
# Set OCIO Colorspace to the default colorspace
|
||||
# if there's OCIO
|
||||
if os.getenv("OCIO"):
|
||||
self.set_colorcorrect_to_default_view_space(instance_node)
|
||||
# Fall to the default value if cls.review_color_space is empty.
|
||||
if not self.review_color_space:
|
||||
# cls.review_color_space is an empty string
|
||||
# when the imageio/workfile setting is disabled or
|
||||
# when the Review colorspace setting is empty.
|
||||
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
|
||||
self.review_color_space = get_default_display_view_colorspace()
|
||||
|
||||
lib.set_review_color_space(instance_node,
|
||||
self.review_color_space,
|
||||
self.log)
|
||||
|
||||
to_lock = ["id", "productType"]
|
||||
|
||||
|
|
@ -131,23 +148,3 @@ class CreateReview(plugin.HoudiniCreator):
|
|||
minimum=0.0001,
|
||||
decimals=3)
|
||||
]
|
||||
|
||||
def set_colorcorrect_to_default_view_space(self,
|
||||
instance_node):
|
||||
"""Set ociocolorspace to the default output space."""
|
||||
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
|
||||
|
||||
# set Color Correction parameter to OpenColorIO
|
||||
instance_node.setParms({"colorcorrect": 2})
|
||||
|
||||
# Get default view space for ociocolorspace parm.
|
||||
default_view_space = get_default_display_view_colorspace()
|
||||
instance_node.setParms(
|
||||
{"ociocolorspace": default_view_space}
|
||||
)
|
||||
|
||||
self.log.debug(
|
||||
"'OCIO Colorspace' parm on '{}' has been set to "
|
||||
"the default view color space '{}'"
|
||||
.format(instance_node, default_view_space)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating USDs."""
|
||||
from ayon_core.hosts.houdini.api import plugin
|
||||
from ayon_core.pipeline import CreatedInstance
|
||||
|
||||
import hou
|
||||
|
||||
|
|
@ -22,7 +21,7 @@ class CreateUSD(plugin.HoudiniCreator):
|
|||
instance = super(CreateUSD, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating USD renders."""
|
||||
from ayon_core.hosts.houdini.api import plugin
|
||||
from ayon_core.pipeline import CreatedInstance
|
||||
|
||||
|
||||
class CreateUSDRender(plugin.HoudiniCreator):
|
||||
|
|
@ -23,7 +22,7 @@ class CreateUSDRender(plugin.HoudiniCreator):
|
|||
instance = super(CreateUSDRender, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating VDB Caches."""
|
||||
from ayon_core.hosts.houdini.api import plugin
|
||||
from ayon_core.pipeline import CreatedInstance
|
||||
from ayon_core.lib import BoolDef
|
||||
|
||||
import hou
|
||||
|
|
@ -26,7 +25,7 @@ class CreateVDBCache(plugin.HoudiniCreator):
|
|||
instance = super(CreateVDBCache, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
file_path = "{}{}".format(
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
import hou
|
||||
|
||||
from ayon_core.hosts.houdini.api import plugin
|
||||
from ayon_core.pipeline import CreatedInstance, CreatorError
|
||||
from ayon_core.pipeline import CreatorError
|
||||
from ayon_core.lib import EnumDef, BoolDef
|
||||
|
||||
|
||||
|
|
@ -31,7 +31,7 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
instance = super(CreateVrayROP, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
|
||||
|
|
|
|||
|
|
@ -45,33 +45,11 @@ class AbcLoader(load.LoaderPlugin):
|
|||
alembic = container.createNode("alembic", node_name=node_name)
|
||||
alembic.setParms({"fileName": file_path})
|
||||
|
||||
# Add unpack node
|
||||
unpack_name = "unpack_{}".format(name)
|
||||
unpack = container.createNode("unpack", node_name=unpack_name)
|
||||
unpack.setInput(0, alembic)
|
||||
unpack.setParms({"transfer_attributes": "path"})
|
||||
# Position nodes nicely
|
||||
container.moveToGoodPosition()
|
||||
container.layoutChildren()
|
||||
|
||||
# Add normal to points
|
||||
# Order of menu ['point', 'vertex', 'prim', 'detail']
|
||||
normal_name = "normal_{}".format(name)
|
||||
normal_node = container.createNode("normal", node_name=normal_name)
|
||||
normal_node.setParms({"type": 0})
|
||||
|
||||
normal_node.setInput(0, unpack)
|
||||
|
||||
null = container.createNode("null", node_name="OUT")
|
||||
null.setInput(0, normal_node)
|
||||
|
||||
# Ensure display flag is on the Alembic input node and not on the OUT
|
||||
# node to optimize "debug" displaying in the viewport.
|
||||
alembic.setDisplayFlag(True)
|
||||
|
||||
# Set new position for unpack node else it gets cluttered
|
||||
nodes = [container, alembic, unpack, normal_node, null]
|
||||
for nr, node in enumerate(nodes):
|
||||
node.setPosition([0, (0 - nr)])
|
||||
|
||||
self[:] = nodes
|
||||
nodes = [container, alembic]
|
||||
|
||||
return pipeline.containerise(
|
||||
node_name,
|
||||
|
|
|
|||
|
|
@ -1,9 +1,21 @@
|
|||
from collections import deque
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import registered_host
|
||||
|
||||
|
||||
def collect_input_containers(nodes):
|
||||
def get_container_members(container):
|
||||
node = container["node"]
|
||||
# Usually the loaded containers don't have any complex references
|
||||
# and the contained children should be all we need. So we disregard
|
||||
# checking for .references() on the nodes.
|
||||
members = set(node.allSubChildren())
|
||||
members.add(node) # include the node itself
|
||||
return members
|
||||
|
||||
|
||||
def collect_input_containers(containers, nodes):
|
||||
"""Collect containers that contain any of the node in `nodes`.
|
||||
|
||||
This will return any loaded Avalon container that contains at least one of
|
||||
|
|
@ -11,30 +23,13 @@ def collect_input_containers(nodes):
|
|||
there are member nodes of that container.
|
||||
|
||||
Returns:
|
||||
list: Input avalon containers
|
||||
list: Loaded containers that contain the `nodes`
|
||||
|
||||
"""
|
||||
|
||||
# Lookup by node ids
|
||||
lookup = frozenset(nodes)
|
||||
|
||||
containers = []
|
||||
host = registered_host()
|
||||
for container in host.ls():
|
||||
|
||||
node = container["node"]
|
||||
|
||||
# Usually the loaded containers don't have any complex references
|
||||
# and the contained children should be all we need. So we disregard
|
||||
# checking for .references() on the nodes.
|
||||
members = set(node.allSubChildren())
|
||||
members.add(node) # include the node itself
|
||||
|
||||
# If there's an intersection
|
||||
if not lookup.isdisjoint(members):
|
||||
containers.append(container)
|
||||
|
||||
return containers
|
||||
# Assume the containers have collected their cached '_members' data
|
||||
# in the collector.
|
||||
return [container for container in containers
|
||||
if any(node in container["_members"] for node in nodes)]
|
||||
|
||||
|
||||
def iter_upstream(node):
|
||||
|
|
@ -54,7 +49,7 @@ def iter_upstream(node):
|
|||
)
|
||||
|
||||
# Initialize process queue with the node's ancestors itself
|
||||
queue = list(upstream)
|
||||
queue = deque(upstream)
|
||||
collected = set(upstream)
|
||||
|
||||
# Traverse upstream references for all nodes and yield them as we
|
||||
|
|
@ -72,6 +67,10 @@ def iter_upstream(node):
|
|||
|
||||
# Include the references' ancestors that have not been collected yet.
|
||||
for reference in references:
|
||||
if reference in collected:
|
||||
# Might have been collected in previous iteration
|
||||
continue
|
||||
|
||||
ancestors = reference.inputAncestors(
|
||||
include_ref_inputs=True, follow_subnets=True
|
||||
)
|
||||
|
|
@ -108,13 +107,32 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
|
|||
)
|
||||
return
|
||||
|
||||
# Collect all upstream parents
|
||||
nodes = list(iter_upstream(output))
|
||||
nodes.append(output)
|
||||
# For large scenes the querying of "host.ls()" can be relatively slow
|
||||
# e.g. up to a second. Many instances calling it easily slows this
|
||||
# down. As such, we cache it so we trigger it only once.
|
||||
# todo: Instead of hidden cache make "CollectContainers" plug-in
|
||||
cache_key = "__cache_containers"
|
||||
scene_containers = instance.context.data.get(cache_key, None)
|
||||
if scene_containers is None:
|
||||
# Query the scenes' containers if there's no cache yet
|
||||
host = registered_host()
|
||||
scene_containers = list(host.ls())
|
||||
for container in scene_containers:
|
||||
# Embed the members into the container dictionary
|
||||
container_members = set(get_container_members(container))
|
||||
container["_members"] = container_members
|
||||
instance.context.data[cache_key] = scene_containers
|
||||
|
||||
# Collect containers for the given set of nodes
|
||||
containers = collect_input_containers(nodes)
|
||||
inputs = []
|
||||
if scene_containers:
|
||||
# Collect all upstream parents
|
||||
nodes = list(iter_upstream(output))
|
||||
nodes.append(output)
|
||||
|
||||
# Collect containers for the given set of nodes
|
||||
containers = collect_input_containers(scene_containers, nodes)
|
||||
|
||||
inputs = [c["representation"] for c in containers]
|
||||
|
||||
inputs = [c["representation"] for c in containers]
|
||||
instance.data["inputRepresentations"] = inputs
|
||||
self.log.debug("Collected inputs: %s" % inputs)
|
||||
|
|
|
|||
|
|
@ -7,7 +7,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop, splitext
|
|||
import hou
|
||||
|
||||
|
||||
class ExtractComposite(publish.Extractor):
|
||||
class ExtractComposite(publish.Extractor,
|
||||
publish.ColormanagedPyblishPluginMixin):
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Composite (Image Sequence)"
|
||||
|
|
@ -45,8 +46,14 @@ class ExtractComposite(publish.Extractor):
|
|||
"frameEnd": instance.data["frameEndHandle"],
|
||||
}
|
||||
|
||||
from pprint import pformat
|
||||
|
||||
self.log.info(pformat(representation))
|
||||
if ext.lower() == "exr":
|
||||
# Inject colorspace with 'scene_linear' as that's the
|
||||
# default Houdini working colorspace and all extracted
|
||||
# OpenEXR images should be in that colorspace.
|
||||
# https://www.sidefx.com/docs/houdini/render/linear.html#image-formats
|
||||
self.set_representation_colorspace(
|
||||
representation, instance.context,
|
||||
colorspace="scene_linear"
|
||||
)
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
|
|
|||
|
|
@ -8,7 +8,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop
|
|||
import hou
|
||||
|
||||
|
||||
class ExtractOpenGL(publish.Extractor):
|
||||
class ExtractOpenGL(publish.Extractor,
|
||||
publish.ColormanagedPyblishPluginMixin):
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.01
|
||||
label = "Extract OpenGL"
|
||||
|
|
@ -46,6 +47,14 @@ class ExtractOpenGL(publish.Extractor):
|
|||
"camera_name": instance.data.get("review_camera")
|
||||
}
|
||||
|
||||
if ropnode.evalParm("colorcorrect") == 2: # OpenColorIO enabled
|
||||
colorspace = ropnode.evalParm("ociocolorspace")
|
||||
# inject colorspace data
|
||||
self.set_representation_colorspace(
|
||||
representation, instance.context,
|
||||
colorspace=colorspace
|
||||
)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
instance.data["representations"].append(representation)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ import pyblish.api
|
|||
from ayon_core.lib import version_up
|
||||
from ayon_core.pipeline import registered_host
|
||||
from ayon_core.pipeline.publish import get_errored_plugins_from_context
|
||||
from ayon_core.hosts.houdini.api import HoudiniHost
|
||||
from ayon_core.pipeline.publish import KnownPublishError
|
||||
|
||||
|
||||
|
|
@ -39,7 +38,7 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin):
|
|||
)
|
||||
|
||||
# Filename must not have changed since collecting
|
||||
host = registered_host() # type: HoudiniHost
|
||||
host = registered_host()
|
||||
current_file = host.current_file()
|
||||
if context.data["currentFile"] != current_file:
|
||||
raise KnownPublishError(
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import sys
|
||||
import hou
|
||||
import pyblish.api
|
||||
import six
|
||||
|
||||
from ayon_core.pipeline import PublishValidationError
|
||||
|
||||
|
|
@ -26,28 +25,21 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin):
|
|||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError(
|
||||
("Output node(s) `{}` are incorrect. "
|
||||
"See plug-in log for details.").format(invalid),
|
||||
title=self.label
|
||||
"Output node '{}' is incorrect. "
|
||||
"See plug-in log for details.".format(invalid),
|
||||
title=self.label,
|
||||
description=(
|
||||
"### Invalid COP output node\n\n"
|
||||
"The output node path for the instance must be set to a "
|
||||
"valid COP node path.\n\nSee the log for more details."
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
output_node = instance.data.get("output_node")
|
||||
|
||||
import hou
|
||||
|
||||
try:
|
||||
output_node = instance.data["output_node"]
|
||||
except KeyError:
|
||||
six.reraise(
|
||||
PublishValidationError,
|
||||
PublishValidationError(
|
||||
"Can't determine COP output node.",
|
||||
title=cls.__name__),
|
||||
sys.exc_info()[2]
|
||||
)
|
||||
|
||||
if output_node is None:
|
||||
if not output_node:
|
||||
node = hou.node(instance.data.get("instance_node"))
|
||||
cls.log.error(
|
||||
"COP Output node in '%s' does not exist. "
|
||||
|
|
@ -61,8 +53,8 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin):
|
|||
cls.log.error(
|
||||
"Output node %s is not a COP node. "
|
||||
"COP Path must point to a COP node, "
|
||||
"instead found category type: %s"
|
||||
% (output_node.path(), output_node.type().category().name())
|
||||
"instead found category type: %s",
|
||||
output_node.path(), output_node.type().category().name()
|
||||
)
|
||||
return [output_node.path()]
|
||||
|
||||
|
|
@ -70,9 +62,7 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin):
|
|||
# is Cop2 to avoid potential edge case scenarios even though
|
||||
# the isinstance check above should be stricter than this category
|
||||
if output_node.type().category().name() != "Cop2":
|
||||
raise PublishValidationError(
|
||||
(
|
||||
"Output node {} is not of category Cop2."
|
||||
" This is a bug..."
|
||||
).format(output_node.path()),
|
||||
title=cls.label)
|
||||
cls.log.error(
|
||||
"Output node %s is not of category Cop2.", output_node.path()
|
||||
)
|
||||
return [output_node.path()]
|
||||
|
|
|
|||
|
|
@ -4,15 +4,19 @@ from ayon_core.pipeline import (
|
|||
PublishValidationError,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
from ayon_core.pipeline.publish import RepairAction
|
||||
from ayon_core.pipeline.publish import (
|
||||
RepairAction,
|
||||
get_plugin_settings,
|
||||
apply_plugin_settings_automatically
|
||||
)
|
||||
from ayon_core.hosts.houdini.api.action import SelectROPAction
|
||||
|
||||
import os
|
||||
import hou
|
||||
|
||||
|
||||
class SetDefaultViewSpaceAction(RepairAction):
|
||||
label = "Set default view colorspace"
|
||||
class ResetViewSpaceAction(RepairAction):
|
||||
label = "Reset OCIO colorspace parm"
|
||||
icon = "mdi.monitor"
|
||||
|
||||
|
||||
|
|
@ -27,9 +31,25 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
|
|||
families = ["review"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Review Colorspace"
|
||||
actions = [SetDefaultViewSpaceAction, SelectROPAction]
|
||||
actions = [ResetViewSpaceAction, SelectROPAction]
|
||||
|
||||
optional = True
|
||||
review_color_space = ""
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings):
|
||||
# Preserve automatic settings applying logic
|
||||
settings = get_plugin_settings(plugin=cls,
|
||||
project_settings=project_settings,
|
||||
log=cls.log,
|
||||
category="houdini")
|
||||
apply_plugin_settings_automatically(cls, settings, logger=cls.log)
|
||||
|
||||
# Add review color settings
|
||||
color_settings = project_settings["houdini"]["imageio"]["workfile"]
|
||||
if color_settings["enabled"]:
|
||||
cls.review_color_space = color_settings.get("review_color_space")
|
||||
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -52,39 +72,54 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
|
|||
" 'OpenColorIO'".format(rop_node.path())
|
||||
)
|
||||
|
||||
if rop_node.evalParm("ociocolorspace") not in \
|
||||
hou.Color.ocio_spaces():
|
||||
|
||||
current_color_space = rop_node.evalParm("ociocolorspace")
|
||||
if current_color_space not in hou.Color.ocio_spaces():
|
||||
raise PublishValidationError(
|
||||
"Invalid value: Colorspace name doesn't exist.\n"
|
||||
"Check 'OCIO Colorspace' parameter on '{}' ROP"
|
||||
.format(rop_node.path())
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
"""Set Default View Space Action.
|
||||
# if houdini/imageio/workfile is enabled and
|
||||
# Review colorspace setting is empty then this check should
|
||||
# actually check if the current_color_space setting equals
|
||||
# the default colorspace value.
|
||||
# However, it will make the black cmd screen show up more often
|
||||
# which is very annoying.
|
||||
if self.review_color_space and \
|
||||
self.review_color_space != current_color_space:
|
||||
|
||||
It is a helper action more than a repair action,
|
||||
used to set colorspace on opengl node to the default view.
|
||||
"""
|
||||
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
|
||||
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
|
||||
if rop_node.evalParm("colorcorrect") != 2:
|
||||
rop_node.setParms({"colorcorrect": 2})
|
||||
cls.log.debug(
|
||||
"'Color Correction' parm on '{}' has been set to"
|
||||
" 'OpenColorIO'".format(rop_node.path())
|
||||
raise PublishValidationError(
|
||||
"Invalid value: Colorspace name doesn't match"
|
||||
"the Colorspace specified in settings."
|
||||
)
|
||||
|
||||
# Get default view colorspace name
|
||||
default_view_space = get_default_display_view_colorspace()
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
"""Reset view colorspace.
|
||||
|
||||
rop_node.setParms({"ociocolorspace": default_view_space})
|
||||
cls.log.info(
|
||||
"'OCIO Colorspace' parm on '{}' has been set to "
|
||||
"the default view color space '{}'"
|
||||
.format(rop_node, default_view_space)
|
||||
)
|
||||
It is used to set colorspace on opengl node.
|
||||
|
||||
It uses the colorspace value specified in the Houdini addon settings.
|
||||
If the value in the Houdini addon settings is empty,
|
||||
it will fall to the default colorspace.
|
||||
|
||||
Note:
|
||||
This repair action assumes that OCIO is enabled.
|
||||
As if OCIO is disabled the whole validation is skipped
|
||||
and this repair action won't show up.
|
||||
"""
|
||||
from ayon_core.hosts.houdini.api.lib import set_review_color_space
|
||||
|
||||
# Fall to the default value if cls.review_color_space is empty.
|
||||
if not cls.review_color_space:
|
||||
# cls.review_color_space is an empty string
|
||||
# when the imageio/workfile setting is disabled or
|
||||
# when the Review colorspace setting is empty.
|
||||
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
|
||||
cls.review_color_space = get_default_display_view_colorspace()
|
||||
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
set_review_color_space(rop_node,
|
||||
cls.review_color_space,
|
||||
cls.log)
|
||||
|
|
|
|||
|
|
@ -8,10 +8,15 @@ from typing import Any, Dict, Union
|
|||
import six
|
||||
import ayon_api
|
||||
|
||||
from ayon_core.pipeline import get_current_project_name, colorspace
|
||||
from ayon_core.pipeline import (
|
||||
get_current_project_name,
|
||||
get_current_folder_path,
|
||||
get_current_task_name,
|
||||
colorspace
|
||||
)
|
||||
from ayon_core.settings import get_project_settings
|
||||
from ayon_core.pipeline.context_tools import (
|
||||
get_current_folder_entity,
|
||||
get_current_task_entity
|
||||
)
|
||||
from ayon_core.style import load_stylesheet
|
||||
from pymxs import runtime as rt
|
||||
|
|
@ -221,41 +226,30 @@ def reset_scene_resolution():
|
|||
scene resolution can be overwritten by a folder if the folder.attrib
|
||||
contains any information regarding scene resolution.
|
||||
"""
|
||||
|
||||
folder_entity = get_current_folder_entity(
|
||||
fields={"attrib.resolutionWidth", "attrib.resolutionHeight"}
|
||||
)
|
||||
folder_attributes = folder_entity["attrib"]
|
||||
width = int(folder_attributes["resolutionWidth"])
|
||||
height = int(folder_attributes["resolutionHeight"])
|
||||
task_attributes = get_current_task_entity(fields={"attrib"})["attrib"]
|
||||
width = int(task_attributes["resolutionWidth"])
|
||||
height = int(task_attributes["resolutionHeight"])
|
||||
|
||||
set_scene_resolution(width, height)
|
||||
|
||||
|
||||
def get_frame_range(folder_entiy=None) -> Union[Dict[str, Any], None]:
|
||||
"""Get the current folder frame range and handles.
|
||||
def get_frame_range(task_entity=None) -> Union[Dict[str, Any], None]:
|
||||
"""Get the current task frame range and handles
|
||||
|
||||
Args:
|
||||
folder_entiy (dict): Folder eneity.
|
||||
task_entity (dict): Task Entity.
|
||||
|
||||
Returns:
|
||||
dict: with frame start, frame end, handle start, handle end.
|
||||
"""
|
||||
# Set frame start/end
|
||||
if folder_entiy is None:
|
||||
folder_entiy = get_current_folder_entity()
|
||||
|
||||
folder_attributes = folder_entiy["attrib"]
|
||||
frame_start = folder_attributes.get("frameStart")
|
||||
frame_end = folder_attributes.get("frameEnd")
|
||||
|
||||
if frame_start is None or frame_end is None:
|
||||
return {}
|
||||
|
||||
frame_start = int(frame_start)
|
||||
frame_end = int(frame_end)
|
||||
handle_start = int(folder_attributes.get("handleStart", 0))
|
||||
handle_end = int(folder_attributes.get("handleEnd", 0))
|
||||
if task_entity is None:
|
||||
task_entity = get_current_task_entity(fields={"attrib"})
|
||||
task_attributes = task_entity["attrib"]
|
||||
frame_start = int(task_attributes["frameStart"])
|
||||
frame_end = int(task_attributes["frameEnd"])
|
||||
handle_start = int(task_attributes["handleStart"])
|
||||
handle_end = int(task_attributes["handleEnd"])
|
||||
frame_start_handle = frame_start - handle_start
|
||||
frame_end_handle = frame_end + handle_end
|
||||
|
||||
|
|
@ -281,9 +275,9 @@ def reset_frame_range(fps: bool = True):
|
|||
scene frame rate in frames-per-second.
|
||||
"""
|
||||
if fps:
|
||||
project_name = get_current_project_name()
|
||||
project_entity = ayon_api.get_project(project_name)
|
||||
fps_number = float(project_entity["attrib"].get("fps"))
|
||||
task_entity = get_current_task_entity()
|
||||
task_attributes = task_entity["attrib"]
|
||||
fps_number = float(task_attributes["fps"])
|
||||
rt.frameRate = fps_number
|
||||
frame_range = get_frame_range()
|
||||
|
||||
|
|
@ -525,6 +519,36 @@ def get_plugins() -> list:
|
|||
return plugin_info_list
|
||||
|
||||
|
||||
def update_modifier_node_names(event, node):
|
||||
"""Update the name of the nodes after renaming
|
||||
|
||||
Args:
|
||||
event (pymxs.MXSWrapperBase): Event Name (
|
||||
Mandatory argument for rt.NodeEventCallback)
|
||||
node (list): Event Number (
|
||||
Mandatory argument for rt.NodeEventCallback)
|
||||
|
||||
"""
|
||||
containers = [
|
||||
obj
|
||||
for obj in rt.Objects
|
||||
if (
|
||||
rt.ClassOf(obj) == rt.Container
|
||||
and rt.getUserProp(obj, "id") == "pyblish.avalon.instance"
|
||||
and rt.getUserProp(obj, "productType") not in {
|
||||
"workfile", "tyflow"
|
||||
}
|
||||
)
|
||||
]
|
||||
if not containers:
|
||||
return
|
||||
for container in containers:
|
||||
ayon_data = container.modifiers[0].openPypeData
|
||||
updated_node_names = [str(node.node) for node
|
||||
in ayon_data.all_handles]
|
||||
rt.setProperty(ayon_data, "sel_list", updated_node_names)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def render_resolution(width, height):
|
||||
"""Set render resolution option during context
|
||||
|
|
|
|||
|
|
@ -63,6 +63,8 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
|||
|
||||
rt.callbacks.addScript(rt.Name('postWorkspaceChange'),
|
||||
self._deferred_menu_creation)
|
||||
rt.NodeEventCallback(
|
||||
nameChanged=lib.update_modifier_node_names)
|
||||
|
||||
def workfile_has_unsaved_changes(self):
|
||||
return rt.getSaveRequired()
|
||||
|
|
|
|||
|
|
@ -53,6 +53,7 @@ class ExtractAlembic(publish.Extractor,
|
|||
hosts = ["max"]
|
||||
families = ["pointcache"]
|
||||
optional = True
|
||||
active = True
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
|
|
@ -102,24 +103,27 @@ class ExtractAlembic(publish.Extractor,
|
|||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
return [
|
||||
defs = super(ExtractAlembic, cls).get_attribute_defs()
|
||||
defs.extend([
|
||||
BoolDef("custom_attrs",
|
||||
label="Custom Attributes",
|
||||
default=False),
|
||||
]
|
||||
])
|
||||
return defs
|
||||
|
||||
|
||||
class ExtractCameraAlembic(ExtractAlembic):
|
||||
"""Extract Camera with AlembicExport."""
|
||||
|
||||
label = "Extract Alembic Camera"
|
||||
families = ["camera"]
|
||||
optional = True
|
||||
|
||||
|
||||
class ExtractModel(ExtractAlembic):
|
||||
class ExtractModelAlembic(ExtractAlembic):
|
||||
"""Extract Geometry in Alembic Format"""
|
||||
label = "Extract Geometry (Alembic)"
|
||||
families = ["model"]
|
||||
optional = True
|
||||
|
||||
def _set_abc_attributes(self, instance):
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ class ValidateFrameRange(pyblish.api.InstancePlugin,
|
|||
return
|
||||
|
||||
frame_range = get_frame_range(
|
||||
instance.data["folderEntity"])
|
||||
instance.data["taskEntity"])
|
||||
|
||||
inst_frame_start = instance.data.get("frameStartHandle")
|
||||
inst_frame_end = instance.data.get("frameEndHandle")
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin,
|
|||
context_label = "{} > {}".format(*context)
|
||||
instance_label = "{} > {}".format(folderPath, task)
|
||||
message = (
|
||||
"Instance '{}' publishes to different folder or task "
|
||||
"Instance '{}' publishes to different context(folder or task) "
|
||||
"than current context: {}. Current context: {}".format(
|
||||
instance.name, instance_label, context_label
|
||||
)
|
||||
|
|
@ -46,7 +46,7 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin,
|
|||
raise PublishValidationError(
|
||||
message=message,
|
||||
description=(
|
||||
"## Publishing to a different context folder or task\n"
|
||||
"## Publishing to a different context data(folder or task)\n"
|
||||
"There are publish instances present which are publishing "
|
||||
"into a different folder path or task than your current context.\n\n"
|
||||
"Usually this is not what you want but there can be cases "
|
||||
|
|
|
|||
|
|
@ -7,7 +7,10 @@ from ayon_core.pipeline.publish import (
|
|||
RepairAction,
|
||||
PublishValidationError
|
||||
)
|
||||
from ayon_core.hosts.max.api.lib import reset_scene_resolution
|
||||
from ayon_core.hosts.max.api.lib import (
|
||||
reset_scene_resolution,
|
||||
imprint
|
||||
)
|
||||
|
||||
|
||||
class ValidateResolutionSetting(pyblish.api.InstancePlugin,
|
||||
|
|
@ -25,8 +28,10 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
|
|||
if not self.is_active(instance.data):
|
||||
return
|
||||
width, height = self.get_folder_resolution(instance)
|
||||
current_width = rt.renderWidth
|
||||
current_height = rt.renderHeight
|
||||
current_width, current_height = (
|
||||
self.get_current_resolution(instance)
|
||||
)
|
||||
|
||||
if current_width != width and current_height != height:
|
||||
raise PublishValidationError("Resolution Setting "
|
||||
"not matching resolution "
|
||||
|
|
@ -41,12 +46,16 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
|
|||
"not matching resolution set "
|
||||
"on asset or shot.")
|
||||
|
||||
def get_folder_resolution(self, instance):
|
||||
folder_entity = instance.data["folderEntity"]
|
||||
if folder_entity:
|
||||
folder_attributes = folder_entity["attrib"]
|
||||
width = folder_attributes["resolutionWidth"]
|
||||
height = folder_attributes["resolutionHeight"]
|
||||
def get_current_resolution(self, instance):
|
||||
return rt.renderWidth, rt.renderHeight
|
||||
|
||||
@classmethod
|
||||
def get_folder_resolution(cls, instance):
|
||||
task_entity = instance.data.get("taskEntity")
|
||||
if task_entity:
|
||||
task_attributes = task_entity["attrib"]
|
||||
width = task_attributes["resolutionWidth"]
|
||||
height = task_attributes["resolutionHeight"]
|
||||
return int(width), int(height)
|
||||
|
||||
# Defaults if not found in folder entity
|
||||
|
|
@ -55,3 +64,29 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
|
|||
@classmethod
|
||||
def repair(cls, instance):
|
||||
reset_scene_resolution()
|
||||
|
||||
|
||||
class ValidateReviewResolutionSetting(ValidateResolutionSetting):
|
||||
families = ["review"]
|
||||
optional = True
|
||||
actions = [RepairAction]
|
||||
|
||||
def get_current_resolution(self, instance):
|
||||
current_width = instance.data["review_width"]
|
||||
current_height = instance.data["review_height"]
|
||||
return current_width, current_height
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
context_width, context_height = (
|
||||
cls.get_folder_resolution(instance)
|
||||
)
|
||||
creator_attrs = instance.data["creator_attributes"]
|
||||
creator_attrs["review_width"] = context_width
|
||||
creator_attrs["review_height"] = context_height
|
||||
creator_attrs_data = {
|
||||
"creator_attributes": creator_attrs
|
||||
}
|
||||
# update the width and height of review
|
||||
# data in creator_attributes
|
||||
imprint(instance.data["instance_node"], creator_attrs_data)
|
||||
|
|
|
|||
|
|
@ -2,8 +2,6 @@
|
|||
"""Tools to work with FBX."""
|
||||
import logging
|
||||
|
||||
from pyblish.api import Instance
|
||||
|
||||
from maya import cmds # noqa
|
||||
import maya.mel as mel # noqa
|
||||
from ayon_core.hosts.maya.api.lib import maintained_selection
|
||||
|
|
@ -146,7 +144,6 @@ class FBXExtractor:
|
|||
return options
|
||||
|
||||
def set_options_from_instance(self, instance):
|
||||
# type: (Instance) -> None
|
||||
"""Sets FBX export options from data in the instance.
|
||||
|
||||
Args:
|
||||
|
|
|
|||
|
|
@ -1917,6 +1917,29 @@ def apply_attributes(attributes, nodes_by_id):
|
|||
set_attribute(attr, value, node)
|
||||
|
||||
|
||||
def is_valid_reference_node(reference_node):
|
||||
"""Return whether Maya considers the reference node a valid reference.
|
||||
|
||||
Maya might report an error when using `maya.cmds.referenceQuery`:
|
||||
Reference node 'reference_node' is not associated with a reference file.
|
||||
|
||||
Note that this does *not* check whether the reference node points to an
|
||||
existing file. Instead it only returns whether maya considers it valid
|
||||
and thus is not an unassociated reference node
|
||||
|
||||
Arguments:
|
||||
reference_node (str): Reference node name
|
||||
|
||||
Returns:
|
||||
bool: Whether reference node is a valid reference
|
||||
|
||||
"""
|
||||
sel = OpenMaya.MSelectionList()
|
||||
sel.add(reference_node)
|
||||
depend_node = sel.getDependNode(0)
|
||||
return OpenMaya.MFnReference(depend_node).isValidReference()
|
||||
|
||||
|
||||
def get_container_members(container):
|
||||
"""Returns the members of a container.
|
||||
This includes the nodes from any loaded references in the container.
|
||||
|
|
@ -1942,7 +1965,16 @@ def get_container_members(container):
|
|||
if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"):
|
||||
continue
|
||||
|
||||
reference_members = cmds.referenceQuery(ref, nodes=True, dagPath=True)
|
||||
try:
|
||||
reference_members = cmds.referenceQuery(ref,
|
||||
nodes=True,
|
||||
dagPath=True)
|
||||
except RuntimeError:
|
||||
# Ignore reference nodes that are not associated with a
|
||||
# referenced file on which `referenceQuery` command fails
|
||||
if not is_valid_reference_node(ref):
|
||||
continue
|
||||
raise
|
||||
reference_members = cmds.ls(reference_members,
|
||||
long=True,
|
||||
objectsOnly=True)
|
||||
|
|
@ -4238,6 +4270,9 @@ def get_reference_node(members, log=None):
|
|||
if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"):
|
||||
continue
|
||||
|
||||
if not is_valid_reference_node(ref):
|
||||
continue
|
||||
|
||||
references.add(ref)
|
||||
|
||||
assert references, "No reference node found in container"
|
||||
|
|
@ -4268,15 +4303,19 @@ def get_reference_node_parents(ref):
|
|||
list: The upstream parent reference nodes.
|
||||
|
||||
"""
|
||||
parent = cmds.referenceQuery(ref,
|
||||
referenceNode=True,
|
||||
parent=True)
|
||||
def _get_parent(reference_node):
|
||||
"""Return parent reference node, but ignore invalid reference nodes"""
|
||||
if not is_valid_reference_node(reference_node):
|
||||
return
|
||||
return cmds.referenceQuery(reference_node,
|
||||
referenceNode=True,
|
||||
parent=True)
|
||||
|
||||
parent = _get_parent(ref)
|
||||
parents = []
|
||||
while parent:
|
||||
parents.append(parent)
|
||||
parent = cmds.referenceQuery(parent,
|
||||
referenceNode=True,
|
||||
parent=True)
|
||||
parent = _get_parent(parent)
|
||||
return parents
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -720,7 +720,8 @@ class RenderProductsArnold(ARenderProducts):
|
|||
|
||||
# AOVs > Legacy > Maya Render View > Mode
|
||||
aovs_enabled = bool(
|
||||
self._get_attr("defaultArnoldRenderOptions.aovMode")
|
||||
self._get_attr(
|
||||
"defaultArnoldRenderOptions.aovMode", as_string=False)
|
||||
)
|
||||
if not aovs_enabled:
|
||||
return beauty_products
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ from .lib import pairwise
|
|||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _allow_export_from_render_setup_layer():
|
||||
def allow_export_from_render_setup_layer():
|
||||
"""Context manager to override Maya settings to allow RS layer export"""
|
||||
try:
|
||||
|
||||
|
|
@ -102,7 +102,7 @@ def export_in_rs_layer(path, nodes, export=None):
|
|||
cmds.disconnectAttr(src, dest)
|
||||
|
||||
# Export Selected
|
||||
with _allow_export_from_render_setup_layer():
|
||||
with allow_export_from_render_setup_layer():
|
||||
cmds.select(nodes, noExpand=True)
|
||||
if export:
|
||||
export()
|
||||
|
|
|
|||
|
|
@ -331,7 +331,8 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
|
|||
if scene_parent:
|
||||
cmds.parent(node, scene_parent)
|
||||
else:
|
||||
cmds.parent(node, world=True)
|
||||
if cmds.listRelatives(node, parent=True):
|
||||
cmds.parent(node, world=True)
|
||||
|
||||
holding_sets = cmds.listSets(object=placeholder.scene_identifier)
|
||||
if not holding_sets:
|
||||
|
|
|
|||
101
client/ayon_core/hosts/maya/api/yeti.py
Normal file
101
client/ayon_core/hosts/maya/api/yeti.py
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
from typing import List
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
||||
def get_yeti_user_variables(yeti_shape_node: str) -> List[str]:
|
||||
"""Get user defined yeti user variables for a `pgYetiMaya` shape node.
|
||||
|
||||
Arguments:
|
||||
yeti_shape_node (str): The `pgYetiMaya` shape node.
|
||||
|
||||
Returns:
|
||||
list: Attribute names (for a vector attribute it only lists the top
|
||||
parent attribute, not the attribute per axis)
|
||||
"""
|
||||
|
||||
attrs = cmds.listAttr(yeti_shape_node,
|
||||
userDefined=True,
|
||||
string=("yetiVariableV_*",
|
||||
"yetiVariableF_*")) or []
|
||||
valid_attrs = []
|
||||
for attr in attrs:
|
||||
attr_type = cmds.attributeQuery(attr, node=yeti_shape_node,
|
||||
attributeType=True)
|
||||
if attr.startswith("yetiVariableV_") and attr_type == "double3":
|
||||
# vector
|
||||
valid_attrs.append(attr)
|
||||
elif attr.startswith("yetiVariableF_") and attr_type == "double":
|
||||
valid_attrs.append(attr)
|
||||
|
||||
return valid_attrs
|
||||
|
||||
|
||||
def create_yeti_variable(yeti_shape_node: str,
|
||||
attr_name: str,
|
||||
value=None,
|
||||
force_value: bool = False) -> bool:
|
||||
"""Get user defined yeti user variables for a `pgYetiMaya` shape node.
|
||||
|
||||
Arguments:
|
||||
yeti_shape_node (str): The `pgYetiMaya` shape node.
|
||||
attr_name (str): The fully qualified yeti variable name, e.g.
|
||||
"yetiVariableF_myfloat" or "yetiVariableV_myvector"
|
||||
value (object): The value to set (must match the type of the attribute)
|
||||
When value is None it will ignored and not be set.
|
||||
force_value (bool): Whether to set the value if the attribute already
|
||||
exists or not.
|
||||
|
||||
Returns:
|
||||
bool: Whether the attribute value was set or not.
|
||||
|
||||
"""
|
||||
exists = cmds.attributeQuery(attr_name, node=yeti_shape_node, exists=True)
|
||||
if not exists:
|
||||
if attr_name.startswith("yetiVariableV_"):
|
||||
_create_vector_yeti_user_variable(yeti_shape_node, attr_name)
|
||||
if attr_name.startswith("yetiVariableF_"):
|
||||
_create_float_yeti_user_variable(yeti_shape_node, attr_name)
|
||||
|
||||
if value is not None and (not exists or force_value):
|
||||
plug = "{}.{}".format(yeti_shape_node, attr_name)
|
||||
if (
|
||||
isinstance(value, (list, tuple))
|
||||
and attr_name.startswith("yetiVariableV_")
|
||||
):
|
||||
cmds.setAttr(plug, *value, type="double3")
|
||||
else:
|
||||
cmds.setAttr(plug, value)
|
||||
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _create_vector_yeti_user_variable(yeti_shape_node: str, attr_name: str):
|
||||
if not attr_name.startswith("yetiVariableV_"):
|
||||
raise ValueError("Must start with yetiVariableV_")
|
||||
cmds.addAttr(yeti_shape_node,
|
||||
longName=attr_name,
|
||||
attributeType="double3",
|
||||
cachedInternally=True,
|
||||
keyable=True)
|
||||
for axis in "XYZ":
|
||||
cmds.addAttr(yeti_shape_node,
|
||||
longName="{}{}".format(attr_name, axis),
|
||||
attributeType="double",
|
||||
parent=attr_name,
|
||||
cachedInternally=True,
|
||||
keyable=True)
|
||||
|
||||
|
||||
def _create_float_yeti_user_variable(yeti_node: str, attr_name: str):
|
||||
if not attr_name.startswith("yetiVariableF_"):
|
||||
raise ValueError("Must start with yetiVariableF_")
|
||||
|
||||
cmds.addAttr(yeti_node,
|
||||
longName=attr_name,
|
||||
attributeType="double",
|
||||
cachedInternally=True,
|
||||
softMinValue=0,
|
||||
softMaxValue=100,
|
||||
keyable=True)
|
||||
|
|
@ -37,7 +37,7 @@ class ConnectGeometry(InventoryAction):
|
|||
repre_id = container["representation"]
|
||||
repre_context = repre_contexts_by_id[repre_id]
|
||||
|
||||
product_type = repre_context["prouct"]["productType"]
|
||||
product_type = repre_context["product"]["productType"]
|
||||
|
||||
containers_by_product_type.setdefault(product_type, [])
|
||||
containers_by_product_type[product_type].append(container)
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ class ConnectXgen(InventoryAction):
|
|||
repre_id = container["representation"]
|
||||
repre_context = repre_contexts_by_id[repre_id]
|
||||
|
||||
product_type = repre_context["prouct"]["productType"]
|
||||
product_type = repre_context["product"]["productType"]
|
||||
|
||||
containers_by_product_type.setdefault(product_type, [])
|
||||
containers_by_product_type[product_type].append(container)
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ class ConnectYetiRig(InventoryAction):
|
|||
repre_id = container["representation"]
|
||||
repre_context = repre_contexts_by_id[repre_id]
|
||||
|
||||
product_type = repre_context["prouct"]["productType"]
|
||||
product_type = repre_context["product"]["productType"]
|
||||
|
||||
containers_by_product_type.setdefault(product_type, [])
|
||||
containers_by_product_type[product_type].append(container)
|
||||
|
|
|
|||
39
client/ayon_core/hosts/maya/plugins/load/load_as_template.py
Normal file
39
client/ayon_core/hosts/maya/plugins/load/load_as_template.py
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
from ayon_core.lib import (
|
||||
BoolDef
|
||||
)
|
||||
from ayon_core.pipeline import (
|
||||
load,
|
||||
registered_host
|
||||
)
|
||||
from ayon_core.hosts.maya.api.workfile_template_builder import (
|
||||
MayaTemplateBuilder
|
||||
)
|
||||
|
||||
|
||||
class LoadAsTemplate(load.LoaderPlugin):
|
||||
"""Load workfile as a template """
|
||||
|
||||
product_types = {"workfile", "mayaScene"}
|
||||
label = "Load as template"
|
||||
representations = ["ma", "mb"]
|
||||
icon = "wrench"
|
||||
color = "#775555"
|
||||
order = 10
|
||||
|
||||
options = [
|
||||
BoolDef("keep_placeholders",
|
||||
label="Keep Placeholders",
|
||||
default=False),
|
||||
BoolDef("create_first_version",
|
||||
label="Create First Version",
|
||||
default=False),
|
||||
]
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
keep_placeholders = data.get("keep_placeholders", False)
|
||||
create_first_version = data.get("create_first_version", False)
|
||||
path = self.filepath_from_context(context)
|
||||
builder = MayaTemplateBuilder(registered_host())
|
||||
builder.build_template(template_path=path,
|
||||
keep_placeholders=keep_placeholders,
|
||||
create_first_version=create_first_version)
|
||||
|
|
@ -12,6 +12,7 @@ from ayon_core.pipeline import (
|
|||
get_representation_path
|
||||
)
|
||||
from ayon_core.hosts.maya.api import lib
|
||||
from ayon_core.hosts.maya.api.yeti import create_yeti_variable
|
||||
from ayon_core.hosts.maya.api.pipeline import containerise
|
||||
from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type
|
||||
|
||||
|
|
@ -23,8 +24,19 @@ SKIP_UPDATE_ATTRS = {
|
|||
"viewportDensity",
|
||||
"viewportWidth",
|
||||
"viewportLength",
|
||||
"renderDensity",
|
||||
"renderWidth",
|
||||
"renderLength",
|
||||
"increaseRenderBounds"
|
||||
}
|
||||
|
||||
SKIP_ATTR_MESSAGE = (
|
||||
"Skipping updating %s.%s to %s because it "
|
||||
"is considered a local overridable attribute. "
|
||||
"Either set manually or the load the cache "
|
||||
"anew."
|
||||
)
|
||||
|
||||
|
||||
def set_attribute(node, attr, value):
|
||||
"""Wrapper of set attribute which ignores None values"""
|
||||
|
|
@ -209,9 +221,31 @@ class YetiCacheLoader(load.LoaderPlugin):
|
|||
|
||||
for attr, value in node_settings["attrs"].items():
|
||||
if attr in SKIP_UPDATE_ATTRS:
|
||||
self.log.info(
|
||||
SKIP_ATTR_MESSAGE, yeti_node, attr, value
|
||||
)
|
||||
continue
|
||||
set_attribute(attr, value, yeti_node)
|
||||
|
||||
# Set up user defined attributes
|
||||
user_variables = node_settings.get("user_variables", {})
|
||||
for attr, value in user_variables.items():
|
||||
was_value_set = create_yeti_variable(
|
||||
yeti_shape_node=yeti_node,
|
||||
attr_name=attr,
|
||||
value=value,
|
||||
# We do not want to update the
|
||||
# value if it already exists so
|
||||
# that any local overrides that
|
||||
# may have been applied still
|
||||
# persist
|
||||
force_value=False
|
||||
)
|
||||
if not was_value_set:
|
||||
self.log.info(
|
||||
SKIP_ATTR_MESSAGE, yeti_node, attr, value
|
||||
)
|
||||
|
||||
cmds.setAttr("{}.representation".format(container_node),
|
||||
repre_entity["id"],
|
||||
typ="string")
|
||||
|
|
@ -332,6 +366,13 @@ class YetiCacheLoader(load.LoaderPlugin):
|
|||
for attr, value in attributes.items():
|
||||
set_attribute(attr, value, yeti_node)
|
||||
|
||||
# Set up user defined attributes
|
||||
user_variables = node_settings.get("user_variables", {})
|
||||
for attr, value in user_variables.items():
|
||||
create_yeti_variable(yeti_shape_node=yeti_node,
|
||||
attr_name=attr,
|
||||
value=value)
|
||||
|
||||
# Connect to the time node
|
||||
cmds.connectAttr("time1.outTime", "%s.currentTime" % yeti_node)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,13 @@
|
|||
from typing import List
|
||||
|
||||
import maya.cmds as cmds
|
||||
|
||||
from ayon_core.hosts.maya.api import plugin
|
||||
from ayon_core.hosts.maya.api import lib
|
||||
|
||||
from ayon_core.pipeline import registered_host
|
||||
from ayon_core.pipeline.create import CreateContext
|
||||
|
||||
|
||||
class YetiRigLoader(plugin.ReferenceLoader):
|
||||
"""This loader will load Yeti rig."""
|
||||
|
|
@ -15,6 +20,9 @@ class YetiRigLoader(plugin.ReferenceLoader):
|
|||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
# From settings
|
||||
create_cache_instance_on_load = True
|
||||
|
||||
def process_reference(
|
||||
self, context, name=None, namespace=None, options=None
|
||||
):
|
||||
|
|
@ -49,4 +57,41 @@ class YetiRigLoader(plugin.ReferenceLoader):
|
|||
)
|
||||
self[:] = nodes
|
||||
|
||||
if self.create_cache_instance_on_load:
|
||||
# Automatically create in instance to allow publishing the loaded
|
||||
# yeti rig into a yeti cache
|
||||
self._create_yeti_cache_instance(nodes, variant=namespace)
|
||||
|
||||
return nodes
|
||||
|
||||
def _create_yeti_cache_instance(self, nodes: List[str], variant: str):
|
||||
"""Create a yeticache product type instance to publish the output.
|
||||
|
||||
This is similar to how loading animation rig will automatically create
|
||||
an animation instance for publishing any loaded character rigs, but
|
||||
then for yeti rigs.
|
||||
|
||||
Args:
|
||||
nodes (List[str]): Nodes generated on load.
|
||||
variant (str): Variant for the yeti cache instance to create.
|
||||
|
||||
"""
|
||||
|
||||
# Find the roots amongst the loaded nodes
|
||||
yeti_nodes = cmds.ls(nodes, type="pgYetiMaya", long=True)
|
||||
assert yeti_nodes, "No pgYetiMaya nodes in rig, this is a bug."
|
||||
|
||||
self.log.info("Creating variant: {}".format(variant))
|
||||
|
||||
creator_identifier = "io.openpype.creators.maya.yeticache"
|
||||
|
||||
host = registered_host()
|
||||
create_context = CreateContext(host)
|
||||
|
||||
with lib.maintained_selection():
|
||||
cmds.select(yeti_nodes, noExpand=True)
|
||||
create_context.create(
|
||||
creator_identifier=creator_identifier,
|
||||
variant=variant,
|
||||
pre_create_data={"use_selection": True}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ class CollectFileDependencies(pyblish.api.ContextPlugin):
|
|||
families = ["renderlayer"]
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings, system_settings):
|
||||
def apply_settings(cls, project_settings):
|
||||
# Disable plug-in if not used for deadline submission anyway
|
||||
settings = project_settings["deadline"]["publish"]["MayaSubmitDeadline"] # noqa
|
||||
cls.enabled = settings.get("asset_dependencies", True)
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ from maya import cmds
|
|||
import pyblish.api
|
||||
|
||||
from ayon_core.hosts.maya.api import lib
|
||||
from ayon_core.hosts.maya.api.yeti import get_yeti_user_variables
|
||||
|
||||
|
||||
SETTINGS = {
|
||||
|
|
@ -34,7 +35,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
|
|||
- "increaseRenderBounds"
|
||||
- "imageSearchPath"
|
||||
|
||||
Other information is the name of the transform and it's Colorbleed ID
|
||||
Other information is the name of the transform and its `cbId`
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.45
|
||||
|
|
@ -54,6 +55,16 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
|
|||
# Get specific node attributes
|
||||
attr_data = {}
|
||||
for attr in SETTINGS:
|
||||
# Ignore non-existing attributes with a warning, e.g. cbId
|
||||
# if they have not been generated yet
|
||||
if not cmds.attributeQuery(attr, node=shape, exists=True):
|
||||
self.log.warning(
|
||||
"Attribute '{}' not found on Yeti node: {}".format(
|
||||
attr, shape
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
current = cmds.getAttr("%s.%s" % (shape, attr))
|
||||
# change None to empty string as Maya doesn't support
|
||||
# NoneType in attributes
|
||||
|
|
@ -61,6 +72,12 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
|
|||
current = ""
|
||||
attr_data[attr] = current
|
||||
|
||||
# Get user variable attributes
|
||||
user_variable_attrs = {
|
||||
attr: lib.get_attribute("{}.{}".format(shape, attr))
|
||||
for attr in get_yeti_user_variables(shape)
|
||||
}
|
||||
|
||||
# Get transform data
|
||||
parent = cmds.listRelatives(shape, parent=True)[0]
|
||||
transform_data = {"name": parent, "cbId": lib.get_id(parent)}
|
||||
|
|
@ -70,6 +87,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
|
|||
"name": shape,
|
||||
"cbId": lib.get_id(shape),
|
||||
"attrs": attr_data,
|
||||
"user_variables": user_variable_attrs
|
||||
}
|
||||
|
||||
settings["nodes"].append(shape_data)
|
||||
|
|
|
|||
|
|
@ -299,4 +299,10 @@ def transfer_image_planes(source_cameras, target_cameras,
|
|||
|
||||
def _attach_image_plane(camera, image_plane):
|
||||
cmds.imagePlane(image_plane, edit=True, detach=True)
|
||||
|
||||
# Attaching to a camera resets it to identity size, so we counter that
|
||||
size_x = cmds.getAttr(f"{image_plane}.sizeX")
|
||||
size_y = cmds.getAttr(f"{image_plane}.sizeY")
|
||||
cmds.imagePlane(image_plane, edit=True, camera=camera)
|
||||
cmds.setAttr(f"{image_plane}.sizeX", size_x)
|
||||
cmds.setAttr(f"{image_plane}.sizeY", size_y)
|
||||
|
|
|
|||
|
|
@ -5,7 +5,13 @@ import os
|
|||
from maya import cmds
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_core.hosts.maya.api.lib import maintained_selection
|
||||
from ayon_core.hosts.maya.api.lib import (
|
||||
maintained_selection,
|
||||
renderlayer
|
||||
)
|
||||
from ayon_core.hosts.maya.api.render_setup_tools import (
|
||||
allow_export_from_render_setup_layer
|
||||
)
|
||||
|
||||
|
||||
class ExtractRedshiftProxy(publish.Extractor):
|
||||
|
|
@ -18,6 +24,9 @@ class ExtractRedshiftProxy(publish.Extractor):
|
|||
def process(self, instance):
|
||||
"""Extractor entry point."""
|
||||
|
||||
# Make sure Redshift is loaded
|
||||
cmds.loadPlugin("redshift4maya", quiet=True)
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
file_name = "{}.rs".format(instance.name)
|
||||
file_path = os.path.join(staging_dir, file_name)
|
||||
|
|
@ -60,14 +69,22 @@ class ExtractRedshiftProxy(publish.Extractor):
|
|||
|
||||
# Write out rs file
|
||||
self.log.debug("Writing: '%s'" % file_path)
|
||||
|
||||
# Allow overriding what renderlayer to export from. By default force
|
||||
# it to the default render layer. (Note that the renderlayer isn't
|
||||
# currently exposed as an attribute to artists)
|
||||
layer = instance.data.get("renderLayer", "defaultRenderLayer")
|
||||
|
||||
with maintained_selection():
|
||||
cmds.select(instance.data["setMembers"], noExpand=True)
|
||||
cmds.file(file_path,
|
||||
pr=False,
|
||||
force=True,
|
||||
type="Redshift Proxy",
|
||||
exportSelected=True,
|
||||
options=rs_options)
|
||||
with renderlayer(layer):
|
||||
with allow_export_from_render_setup_layer():
|
||||
cmds.select(instance.data["setMembers"], noExpand=True)
|
||||
cmds.file(file_path,
|
||||
preserveReferences=False,
|
||||
force=True,
|
||||
type="Redshift Proxy",
|
||||
exportSelected=True,
|
||||
options=rs_options)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ from maya import cmds
|
|||
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline.publish import (
|
||||
ValidateContentsOrder,
|
||||
RepairContextAction,
|
||||
PublishValidationError
|
||||
)
|
||||
|
|
|
|||
|
|
@ -45,6 +45,11 @@ class ValidateMeshNgons(pyblish.api.InstancePlugin,
|
|||
# Get all faces
|
||||
faces = ['{0}.f[*]'.format(node) for node in meshes]
|
||||
|
||||
# Skip meshes that for some reason have no faces, e.g. empty meshes
|
||||
faces = cmds.ls(faces)
|
||||
if not faces:
|
||||
return []
|
||||
|
||||
# Filter to n-sided polygon faces (ngons)
|
||||
invalid = lib.polyConstraint(faces,
|
||||
t=0x0008, # type=face
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
import inspect
|
||||
|
||||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
|
|
@ -29,8 +31,8 @@ class ValidateMeshUVSetMap1(pyblish.api.InstancePlugin,
|
|||
actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction,
|
||||
RepairAction]
|
||||
|
||||
@staticmethod
|
||||
def get_invalid(instance):
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
meshes = cmds.ls(instance, type='mesh', long=True)
|
||||
|
||||
|
|
@ -40,6 +42,11 @@ class ValidateMeshUVSetMap1(pyblish.api.InstancePlugin,
|
|||
# Get existing mapping of uv sets by index
|
||||
indices = cmds.polyUVSet(mesh, query=True, allUVSetsIndices=True)
|
||||
maps = cmds.polyUVSet(mesh, query=True, allUVSets=True)
|
||||
if not indices or not maps:
|
||||
cls.log.warning("Mesh has no UV set: %s", mesh)
|
||||
invalid.append(mesh)
|
||||
continue
|
||||
|
||||
mapping = dict(zip(indices, maps))
|
||||
|
||||
# Get the uv set at index zero.
|
||||
|
|
@ -56,8 +63,14 @@ class ValidateMeshUVSetMap1(pyblish.api.InstancePlugin,
|
|||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
|
||||
invalid_list = "\n".join(f"- {node}" for node in invalid)
|
||||
|
||||
raise PublishValidationError(
|
||||
"Meshes found without 'map1' UV set: {0}".format(invalid))
|
||||
"Meshes found without 'map1' UV set:\n"
|
||||
"{0}".format(invalid_list),
|
||||
description=self.get_description()
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
|
|
@ -68,6 +81,12 @@ class ValidateMeshUVSetMap1(pyblish.api.InstancePlugin,
|
|||
# Get existing mapping of uv sets by index
|
||||
indices = cmds.polyUVSet(mesh, query=True, allUVSetsIndices=True)
|
||||
maps = cmds.polyUVSet(mesh, query=True, allUVSets=True)
|
||||
if not indices or not maps:
|
||||
# No UV set exist at all, create a `map1` uv set
|
||||
# This may fail silently if the mesh has no geometry at all
|
||||
cmds.polyUVSet(mesh, create=True, uvSet="map1")
|
||||
continue
|
||||
|
||||
mapping = dict(zip(indices, maps))
|
||||
|
||||
# Ensure there is no uv set named map1 to avoid
|
||||
|
|
@ -97,3 +116,23 @@ class ValidateMeshUVSetMap1(pyblish.api.InstancePlugin,
|
|||
rename=True,
|
||||
uvSet=original,
|
||||
newUVSet="map1")
|
||||
|
||||
@staticmethod
|
||||
def get_description():
|
||||
return inspect.cleandoc("""### Mesh found without map1 uv set
|
||||
|
||||
A mesh must have a default UV set named `map1` to adhere to the default
|
||||
mesh behavior of Maya meshes.
|
||||
|
||||
There may be meshes that:
|
||||
- Have no UV set
|
||||
- Have no `map1` uv set but are using a different name
|
||||
- Have a `map1` uv set, but it's not the default (first index)
|
||||
|
||||
|
||||
#### Repair
|
||||
|
||||
Using repair will try to make the first UV set the `map1` uv set. If it
|
||||
does not exist yet it will be created or renames the current first
|
||||
UV set to `map1`.
|
||||
""")
|
||||
|
|
|
|||
|
|
@ -1,17 +1,27 @@
|
|||
import inspect
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
import pyblish.api
|
||||
|
||||
import ayon_core.hosts.maya.api.action
|
||||
from ayon_core.hosts.maya.api import lib
|
||||
from ayon_core.pipeline.publish import (
|
||||
OptionalPyblishPluginMixin, PublishValidationError, ValidatePipelineOrder)
|
||||
from ayon_api import get_folders
|
||||
|
||||
|
||||
def is_valid_uuid(value) -> bool:
|
||||
"""Return whether value is a valid UUID"""
|
||||
try:
|
||||
uuid.UUID(value)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class ValidateNodeIDsRelated(pyblish.api.InstancePlugin,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""Validate nodes have a related Colorbleed Id to the
|
||||
instance.data[folderPath]
|
||||
|
||||
"""
|
||||
"""Validate nodes have a related `cbId` to the instance.data[folderPath]"""
|
||||
|
||||
order = ValidatePipelineOrder
|
||||
label = 'Node Ids Related (ID)'
|
||||
|
|
@ -39,21 +49,24 @@ class ValidateNodeIDsRelated(pyblish.api.InstancePlugin,
|
|||
# Ensure all nodes have a cbId
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
|
||||
invalid_list = "\n".join(f"- {node}" for node in sorted(invalid))
|
||||
|
||||
raise PublishValidationError((
|
||||
"Nodes IDs found that are not related to folder '{}' : {}"
|
||||
).format(
|
||||
instance.data["folderPath"], invalid
|
||||
))
|
||||
"Nodes IDs found that are not related to folder '{}':\n{}"
|
||||
).format(instance.data["folderPath"], invalid_list),
|
||||
description=self.get_description()
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
"""Return the member nodes that are invalid"""
|
||||
invalid = list()
|
||||
|
||||
folder_id = instance.data["folderEntity"]["id"]
|
||||
|
||||
# We do want to check the referenced nodes as we it might be
|
||||
# We do want to check the referenced nodes as it might be
|
||||
# part of the end product
|
||||
invalid = list()
|
||||
nodes_by_other_folder_ids = defaultdict(set)
|
||||
for node in instance:
|
||||
_id = lib.get_id(node)
|
||||
if not _id:
|
||||
|
|
@ -62,5 +75,48 @@ class ValidateNodeIDsRelated(pyblish.api.InstancePlugin,
|
|||
node_folder_id = _id.split(":", 1)[0]
|
||||
if node_folder_id != folder_id:
|
||||
invalid.append(node)
|
||||
nodes_by_other_folder_ids[node_folder_id].add(node)
|
||||
|
||||
# Log what other assets were found.
|
||||
if nodes_by_other_folder_ids:
|
||||
project_name = instance.context.data["projectName"]
|
||||
other_folder_ids = set(nodes_by_other_folder_ids.keys())
|
||||
|
||||
# Remove folder ids that are not valid UUID identifiers, these
|
||||
# may be legacy OpenPype ids
|
||||
other_folder_ids = {folder_id for folder_id in other_folder_ids
|
||||
if is_valid_uuid(folder_id)}
|
||||
if not other_folder_ids:
|
||||
return invalid
|
||||
|
||||
folder_entities = get_folders(project_name=project_name,
|
||||
folder_ids=other_folder_ids,
|
||||
fields=["path"])
|
||||
if folder_entities:
|
||||
# Log names of other assets detected
|
||||
# We disregard logging nodes/ids for asset ids where no asset
|
||||
# was found in the database because ValidateNodeIdsInDatabase
|
||||
# takes care of that.
|
||||
folder_paths = {entity["path"] for entity in folder_entities}
|
||||
cls.log.error(
|
||||
"Found nodes related to other folders:\n{}".format(
|
||||
"\n".join(f"- {path}" for path in sorted(folder_paths))
|
||||
)
|
||||
)
|
||||
|
||||
return invalid
|
||||
|
||||
@staticmethod
|
||||
def get_description():
|
||||
return inspect.cleandoc("""### Node IDs must match folder id
|
||||
|
||||
The node ids must match the folder entity id you are publishing to.
|
||||
|
||||
Usually these mismatch occurs if you are re-using nodes from another
|
||||
folder or project.
|
||||
|
||||
#### How to repair?
|
||||
|
||||
The repair action will regenerate new ids for
|
||||
the invalid nodes to match the instance's folder.
|
||||
""")
|
||||
|
|
|
|||
|
|
@ -46,6 +46,6 @@ class ValidateSceneSetWorkspace(pyblish.api.ContextPlugin):
|
|||
raise PublishValidationError(
|
||||
"Maya workspace is not set correctly.\n\n"
|
||||
f"Current workfile `{scene_name}` is not inside the "
|
||||
"current Maya project root directory `{root_dir}`.\n\n"
|
||||
f"current Maya project root directory `{root_dir}`.\n\n"
|
||||
"Please use Workfile app to re-save."
|
||||
)
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import sys
|
|||
import six
|
||||
import random
|
||||
import string
|
||||
from collections import OrderedDict, defaultdict
|
||||
from collections import defaultdict
|
||||
|
||||
from ayon_core.settings import get_current_project_settings
|
||||
from ayon_core.lib import (
|
||||
|
|
|
|||
|
|
@ -586,7 +586,6 @@ def prompt_new_file_with_mesh(mesh_filepath):
|
|||
# TODO: find a way to improve the process event to
|
||||
# load more complicated mesh
|
||||
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 3000)
|
||||
|
||||
file_dialog.done(file_dialog.Accepted)
|
||||
app.processEvents(QtCore.QEventLoop.AllEvents)
|
||||
|
||||
|
|
@ -606,7 +605,7 @@ def prompt_new_file_with_mesh(mesh_filepath):
|
|||
mesh_select.setVisible(False)
|
||||
|
||||
# Ensure UI is visually up-to-date
|
||||
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents)
|
||||
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 8000)
|
||||
|
||||
# Trigger the 'select file' dialog to set the path and have the
|
||||
# new file dialog to use the path.
|
||||
|
|
@ -623,8 +622,6 @@ def prompt_new_file_with_mesh(mesh_filepath):
|
|||
"Failed to set mesh path with the prompt dialog:"
|
||||
f"{mesh_filepath}\n\n"
|
||||
"Creating new project directly with the mesh path instead.")
|
||||
else:
|
||||
dialog.done(dialog.Accepted)
|
||||
|
||||
new_action = _get_new_project_action()
|
||||
if not new_action:
|
||||
|
|
|
|||
|
|
@ -144,7 +144,8 @@ class CreateTextures(Creator):
|
|||
9: "512",
|
||||
10: "1024",
|
||||
11: "2048",
|
||||
12: "4096"
|
||||
12: "4096",
|
||||
13: "8192"
|
||||
},
|
||||
default=None,
|
||||
label="Size"),
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
import copy
|
||||
from qtpy import QtWidgets, QtCore
|
||||
from ayon_core.pipeline import (
|
||||
load,
|
||||
get_representation_path,
|
||||
|
|
@ -8,10 +10,133 @@ from ayon_core.hosts.substancepainter.api.pipeline import (
|
|||
set_container_metadata,
|
||||
remove_container_metadata
|
||||
)
|
||||
from ayon_core.hosts.substancepainter.api.lib import prompt_new_file_with_mesh
|
||||
|
||||
import substance_painter.project
|
||||
import qargparse
|
||||
|
||||
|
||||
def _convert(substance_attr):
|
||||
"""Return Substance Painter Python API Project attribute from string.
|
||||
|
||||
This converts a string like "ProjectWorkflow.Default" to for example
|
||||
the Substance Painter Python API equivalent object, like:
|
||||
`substance_painter.project.ProjectWorkflow.Default`
|
||||
|
||||
Args:
|
||||
substance_attr (str): The `substance_painter.project` attribute,
|
||||
for example "ProjectWorkflow.Default"
|
||||
|
||||
Returns:
|
||||
Any: Substance Python API object of the project attribute.
|
||||
|
||||
Raises:
|
||||
ValueError: If attribute does not exist on the
|
||||
`substance_painter.project` python api.
|
||||
"""
|
||||
root = substance_painter.project
|
||||
for attr in substance_attr.split("."):
|
||||
root = getattr(root, attr, None)
|
||||
if root is None:
|
||||
raise ValueError(
|
||||
"Substance Painter project attribute"
|
||||
f" does not exist: {substance_attr}")
|
||||
|
||||
return root
|
||||
|
||||
|
||||
def get_template_by_name(name: str, templates: list[dict]) -> dict:
|
||||
return next(
|
||||
template for template in templates
|
||||
if template["name"] == name
|
||||
)
|
||||
|
||||
|
||||
class SubstanceProjectConfigurationWindow(QtWidgets.QDialog):
|
||||
"""The pop-up dialog allows users to choose material
|
||||
duplicate options for importing Max objects when updating
|
||||
or switching assets.
|
||||
"""
|
||||
def __init__(self, project_templates):
|
||||
super(SubstanceProjectConfigurationWindow, self).__init__()
|
||||
self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint)
|
||||
|
||||
self.configuration = None
|
||||
self.template_names = [template["name"] for template
|
||||
in project_templates]
|
||||
self.project_templates = project_templates
|
||||
|
||||
self.widgets = {
|
||||
"label": QtWidgets.QLabel(
|
||||
"Select your template for project configuration"),
|
||||
"template_options": QtWidgets.QComboBox(),
|
||||
"import_cameras": QtWidgets.QCheckBox("Import Cameras"),
|
||||
"preserve_strokes": QtWidgets.QCheckBox("Preserve Strokes"),
|
||||
"clickbox": QtWidgets.QWidget(),
|
||||
"combobox": QtWidgets.QWidget(),
|
||||
"buttons": QtWidgets.QDialogButtonBox(
|
||||
QtWidgets.QDialogButtonBox.Ok
|
||||
| QtWidgets.QDialogButtonBox.Cancel)
|
||||
}
|
||||
|
||||
self.widgets["template_options"].addItems(self.template_names)
|
||||
|
||||
template_name = self.widgets["template_options"].currentText()
|
||||
self._update_to_match_template(template_name)
|
||||
# Build clickboxes
|
||||
layout = QtWidgets.QHBoxLayout(self.widgets["clickbox"])
|
||||
layout.addWidget(self.widgets["import_cameras"])
|
||||
layout.addWidget(self.widgets["preserve_strokes"])
|
||||
# Build combobox
|
||||
layout = QtWidgets.QHBoxLayout(self.widgets["combobox"])
|
||||
layout.addWidget(self.widgets["template_options"])
|
||||
# Build buttons
|
||||
layout = QtWidgets.QHBoxLayout(self.widgets["buttons"])
|
||||
# Build layout.
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.addWidget(self.widgets["label"])
|
||||
layout.addWidget(self.widgets["combobox"])
|
||||
layout.addWidget(self.widgets["clickbox"])
|
||||
layout.addWidget(self.widgets["buttons"])
|
||||
|
||||
self.widgets["template_options"].currentTextChanged.connect(
|
||||
self._update_to_match_template)
|
||||
self.widgets["buttons"].accepted.connect(self.on_accept)
|
||||
self.widgets["buttons"].rejected.connect(self.on_reject)
|
||||
|
||||
def on_accept(self):
|
||||
self.configuration = self.get_project_configuration()
|
||||
self.close()
|
||||
|
||||
def on_reject(self):
|
||||
self.close()
|
||||
|
||||
def _update_to_match_template(self, template_name):
|
||||
template = get_template_by_name(template_name, self.project_templates)
|
||||
self.widgets["import_cameras"].setChecked(template["import_cameras"])
|
||||
self.widgets["preserve_strokes"].setChecked(
|
||||
template["preserve_strokes"])
|
||||
|
||||
def get_project_configuration(self):
|
||||
templates = self.project_templates
|
||||
template_name = self.widgets["template_options"].currentText()
|
||||
template = get_template_by_name(template_name, templates)
|
||||
template = copy.deepcopy(template) # do not edit the original
|
||||
template["import_cameras"] = self.widgets["import_cameras"].isChecked()
|
||||
template["preserve_strokes"] = (
|
||||
self.widgets["preserve_strokes"].isChecked()
|
||||
)
|
||||
for key in ["normal_map_format",
|
||||
"project_workflow",
|
||||
"tangent_space_mode"]:
|
||||
template[key] = _convert(template[key])
|
||||
return template
|
||||
|
||||
@classmethod
|
||||
def prompt(cls, templates):
|
||||
dialog = cls(templates)
|
||||
dialog.exec_()
|
||||
configuration = dialog.configuration
|
||||
dialog.deleteLater()
|
||||
return configuration
|
||||
|
||||
|
||||
class SubstanceLoadProjectMesh(load.LoaderPlugin):
|
||||
|
|
@ -25,48 +150,35 @@ class SubstanceLoadProjectMesh(load.LoaderPlugin):
|
|||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
options = [
|
||||
qargparse.Boolean(
|
||||
"preserve_strokes",
|
||||
default=True,
|
||||
help="Preserve strokes positions on mesh.\n"
|
||||
"(only relevant when loading into existing project)"
|
||||
),
|
||||
qargparse.Boolean(
|
||||
"import_cameras",
|
||||
default=True,
|
||||
help="Import cameras from the mesh file."
|
||||
)
|
||||
]
|
||||
# Defined via settings
|
||||
project_templates = []
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
def load(self, context, name, namespace, options=None):
|
||||
|
||||
# Get user inputs
|
||||
import_cameras = data.get("import_cameras", True)
|
||||
preserve_strokes = data.get("preserve_strokes", True)
|
||||
sp_settings = substance_painter.project.Settings(
|
||||
import_cameras=import_cameras
|
||||
)
|
||||
result = SubstanceProjectConfigurationWindow.prompt(
|
||||
self.project_templates)
|
||||
if not result:
|
||||
# cancelling loader action
|
||||
return
|
||||
if not substance_painter.project.is_open():
|
||||
# Allow to 'initialize' a new project
|
||||
path = self.filepath_from_context(context)
|
||||
# TODO: improve the prompt dialog function to not
|
||||
# only works for simple polygon scene
|
||||
result = prompt_new_file_with_mesh(mesh_filepath=path)
|
||||
if not result:
|
||||
self.log.info("User cancelled new project prompt."
|
||||
"Creating new project directly from"
|
||||
" Substance Painter API Instead.")
|
||||
settings = substance_painter.project.create(
|
||||
mesh_file_path=path, settings=sp_settings
|
||||
)
|
||||
|
||||
sp_settings = substance_painter.project.Settings(
|
||||
import_cameras=result["import_cameras"],
|
||||
normal_map_format=result["normal_map_format"],
|
||||
project_workflow=result["project_workflow"],
|
||||
tangent_space_mode=result["tangent_space_mode"],
|
||||
default_texture_resolution=result["default_texture_resolution"]
|
||||
)
|
||||
settings = substance_painter.project.create(
|
||||
mesh_file_path=path, settings=sp_settings
|
||||
)
|
||||
else:
|
||||
# Reload the mesh
|
||||
settings = substance_painter.project.MeshReloadingSettings(
|
||||
import_cameras=import_cameras,
|
||||
preserve_strokes=preserve_strokes
|
||||
)
|
||||
import_cameras=result["import_cameras"],
|
||||
preserve_strokes=result["preserve_strokes"])
|
||||
|
||||
def on_mesh_reload(status: substance_painter.project.ReloadMeshStatus): # noqa
|
||||
if status == substance_painter.project.ReloadMeshStatus.SUCCESS: # noqa
|
||||
|
|
@ -92,7 +204,7 @@ class SubstanceLoadProjectMesh(load.LoaderPlugin):
|
|||
# from the user's original choice. We don't store 'preserve_strokes'
|
||||
# as we always preserve strokes on updates.
|
||||
container["options"] = {
|
||||
"import_cameras": import_cameras,
|
||||
"import_cameras": result["import_cameras"],
|
||||
}
|
||||
|
||||
set_container_metadata(project_mesh_object_name, container)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import os
|
||||
|
||||
from pathlib import Path
|
||||
from ayon_core.lib import get_ayon_launcher_args
|
||||
from ayon_core.lib.execute import run_detached_process
|
||||
from ayon_core.addon import (
|
||||
|
|
@ -57,3 +58,62 @@ def launch():
|
|||
from ayon_core.tools import traypublisher
|
||||
|
||||
traypublisher.main()
|
||||
|
||||
|
||||
@cli_main.command()
|
||||
@click_wrap.option(
|
||||
"--filepath",
|
||||
help="Full path to CSV file with data",
|
||||
type=str,
|
||||
required=True
|
||||
)
|
||||
@click_wrap.option(
|
||||
"--project",
|
||||
help="Project name in which the context will be used",
|
||||
type=str,
|
||||
required=True
|
||||
)
|
||||
@click_wrap.option(
|
||||
"--folder-path",
|
||||
help="Asset name in which the context will be used",
|
||||
type=str,
|
||||
required=True
|
||||
)
|
||||
@click_wrap.option(
|
||||
"--task",
|
||||
help="Task name under Asset in which the context will be used",
|
||||
type=str,
|
||||
required=False
|
||||
)
|
||||
@click_wrap.option(
|
||||
"--ignore-validators",
|
||||
help="Option to ignore validators",
|
||||
type=bool,
|
||||
is_flag=True,
|
||||
required=False
|
||||
)
|
||||
def ingestcsv(
|
||||
filepath,
|
||||
project,
|
||||
folder_path,
|
||||
task,
|
||||
ignore_validators
|
||||
):
|
||||
"""Ingest CSV file into project.
|
||||
|
||||
This command will ingest CSV file into project. CSV file must be in
|
||||
specific format. See documentation for more information.
|
||||
"""
|
||||
from .csv_publish import csvpublish
|
||||
|
||||
# use Path to check if csv_filepath exists
|
||||
if not Path(filepath).exists():
|
||||
raise FileNotFoundError(f"File {filepath} does not exist.")
|
||||
|
||||
csvpublish(
|
||||
filepath,
|
||||
project,
|
||||
folder_path,
|
||||
task,
|
||||
ignore_validators
|
||||
)
|
||||
|
|
|
|||
86
client/ayon_core/hosts/traypublisher/csv_publish.py
Normal file
86
client/ayon_core/hosts/traypublisher/csv_publish.py
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
import pyblish.util
|
||||
|
||||
from ayon_api import get_folder_by_path, get_task_by_name
|
||||
from ayon_core.lib.attribute_definitions import FileDefItem
|
||||
from ayon_core.pipeline import install_host
|
||||
from ayon_core.pipeline.create import CreateContext
|
||||
|
||||
from ayon_core.hosts.traypublisher.api import TrayPublisherHost
|
||||
|
||||
|
||||
def csvpublish(
|
||||
filepath,
|
||||
project_name,
|
||||
folder_path,
|
||||
task_name=None,
|
||||
ignore_validators=False
|
||||
):
|
||||
"""Publish CSV file.
|
||||
|
||||
Args:
|
||||
filepath (str): Path to CSV file.
|
||||
project_name (str): Project name.
|
||||
folder_path (str): Folder path.
|
||||
task_name (Optional[str]): Task name.
|
||||
ignore_validators (Optional[bool]): Option to ignore validators.
|
||||
"""
|
||||
|
||||
# initialization of host
|
||||
host = TrayPublisherHost()
|
||||
install_host(host)
|
||||
|
||||
# setting host context into project
|
||||
host.set_project_name(project_name)
|
||||
|
||||
# form precreate data with field values
|
||||
file_field = FileDefItem.from_paths([filepath], False).pop().to_dict()
|
||||
precreate_data = {
|
||||
"csv_filepath_data": file_field,
|
||||
}
|
||||
|
||||
# create context initialization
|
||||
create_context = CreateContext(host, headless=True)
|
||||
folder_entity = get_folder_by_path(
|
||||
project_name,
|
||||
folder_path=folder_path,
|
||||
)
|
||||
|
||||
if not folder_entity:
|
||||
ValueError(
|
||||
f"Folder path '{folder_path}' doesn't "
|
||||
f"exists at project '{project_name}'."
|
||||
)
|
||||
|
||||
task_entity = get_task_by_name(
|
||||
project_name,
|
||||
folder_entity["id"],
|
||||
task_name,
|
||||
)
|
||||
|
||||
if not task_entity:
|
||||
ValueError(
|
||||
f"Task name '{task_name}' doesn't "
|
||||
f"exists at folder '{folder_path}'."
|
||||
)
|
||||
|
||||
create_context.create(
|
||||
"io.ayon.creators.traypublisher.csv_ingest",
|
||||
"Main",
|
||||
folder_entity=folder_entity,
|
||||
task_entity=task_entity,
|
||||
pre_create_data=precreate_data,
|
||||
)
|
||||
|
||||
# publishing context initialization
|
||||
pyblish_context = pyblish.api.Context()
|
||||
pyblish_context.data["create_context"] = create_context
|
||||
|
||||
# redefine targets (skip 'local' to disable validators)
|
||||
if ignore_validators:
|
||||
targets = ["default", "ingest"]
|
||||
|
||||
# publishing
|
||||
pyblish.util.publish(context=pyblish_context, targets=targets)
|
||||
|
|
@ -0,0 +1,741 @@
|
|||
import os
|
||||
import re
|
||||
import csv
|
||||
import clique
|
||||
from io import StringIO
|
||||
from copy import deepcopy, copy
|
||||
|
||||
from ayon_api import get_folder_by_path, get_task_by_name
|
||||
from ayon_core.pipeline.create import get_product_name
|
||||
from ayon_core.pipeline import CreatedInstance
|
||||
from ayon_core.lib import FileDef, BoolDef
|
||||
from ayon_core.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS, IMAGE_EXTENSIONS
|
||||
)
|
||||
from ayon_core.pipeline.create import CreatorError
|
||||
from ayon_core.hosts.traypublisher.api.plugin import (
|
||||
TrayPublishCreator
|
||||
)
|
||||
|
||||
|
||||
class IngestCSV(TrayPublishCreator):
|
||||
"""CSV ingest creator class"""
|
||||
|
||||
icon = "fa.file"
|
||||
|
||||
label = "CSV Ingest"
|
||||
product_type = "csv_ingest_file"
|
||||
identifier = "io.ayon.creators.traypublisher.csv_ingest"
|
||||
|
||||
default_variants = ["Main"]
|
||||
|
||||
description = "Ingest products' data from CSV file"
|
||||
detailed_description = """
|
||||
Ingest products' data from CSV file following column and representation
|
||||
configuration in project settings.
|
||||
"""
|
||||
|
||||
# Position in the list of creators.
|
||||
order = 10
|
||||
|
||||
# settings for this creator
|
||||
columns_config = {}
|
||||
representations_config = {}
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
"""Create an product from each row found in the CSV.
|
||||
|
||||
Args:
|
||||
subset_name (str): The subset name.
|
||||
instance_data (dict): The instance data.
|
||||
pre_create_data (dict):
|
||||
"""
|
||||
|
||||
csv_filepath_data = pre_create_data.get("csv_filepath_data", {})
|
||||
|
||||
folder = csv_filepath_data.get("directory", "")
|
||||
if not os.path.exists(folder):
|
||||
raise CreatorError(
|
||||
f"Directory '{folder}' does not exist."
|
||||
)
|
||||
filename = csv_filepath_data.get("filenames", [])
|
||||
self._process_csv_file(subset_name, instance_data, folder, filename[0])
|
||||
|
||||
def _process_csv_file(
|
||||
self, subset_name, instance_data, staging_dir, filename):
|
||||
"""Process CSV file.
|
||||
|
||||
Args:
|
||||
subset_name (str): The subset name.
|
||||
instance_data (dict): The instance data.
|
||||
staging_dir (str): The staging directory.
|
||||
filename (str): The filename.
|
||||
"""
|
||||
|
||||
# create new instance from the csv file via self function
|
||||
self._pass_data_to_csv_instance(
|
||||
instance_data,
|
||||
staging_dir,
|
||||
filename
|
||||
)
|
||||
|
||||
csv_instance = CreatedInstance(
|
||||
self.product_type, subset_name, instance_data, self
|
||||
)
|
||||
self._store_new_instance(csv_instance)
|
||||
|
||||
csv_instance["csvFileData"] = {
|
||||
"filename": filename,
|
||||
"staging_dir": staging_dir,
|
||||
}
|
||||
|
||||
# from special function get all data from csv file and convert them
|
||||
# to new instances
|
||||
csv_data_for_instances = self._get_data_from_csv(
|
||||
staging_dir, filename)
|
||||
|
||||
# create instances from csv data via self function
|
||||
self._create_instances_from_csv_data(
|
||||
csv_data_for_instances, staging_dir
|
||||
)
|
||||
|
||||
def _create_instances_from_csv_data(
|
||||
self,
|
||||
csv_data_for_instances,
|
||||
staging_dir
|
||||
):
|
||||
"""Create instances from csv data"""
|
||||
|
||||
for folder_path, prepared_data in csv_data_for_instances.items():
|
||||
project_name = self.create_context.get_current_project_name()
|
||||
products = prepared_data["products"]
|
||||
|
||||
for instance_name, product_data in products.items():
|
||||
# get important instance variables
|
||||
task_name = product_data["task_name"]
|
||||
task_type = product_data["task_type"]
|
||||
variant = product_data["variant"]
|
||||
product_type = product_data["product_type"]
|
||||
version = product_data["version"]
|
||||
|
||||
# create subset/product name
|
||||
product_name = get_product_name(
|
||||
project_name,
|
||||
task_name,
|
||||
task_type,
|
||||
self.host_name,
|
||||
product_type,
|
||||
variant
|
||||
)
|
||||
|
||||
# make sure frame start/end is inherited from csv columns
|
||||
# expected frame range data are handles excluded
|
||||
for _, repre_data in product_data["representations"].items(): # noqa: E501
|
||||
frame_start = repre_data["frameStart"]
|
||||
frame_end = repre_data["frameEnd"]
|
||||
handle_start = repre_data["handleStart"]
|
||||
handle_end = repre_data["handleEnd"]
|
||||
fps = repre_data["fps"]
|
||||
break
|
||||
|
||||
# try to find any version comment in representation data
|
||||
version_comment = next(
|
||||
iter(
|
||||
repre_data["comment"]
|
||||
for repre_data in product_data["representations"].values() # noqa: E501
|
||||
if repre_data["comment"]
|
||||
),
|
||||
None
|
||||
)
|
||||
|
||||
# try to find any slate switch in representation data
|
||||
slate_exists = any(
|
||||
repre_data["slate"]
|
||||
for _, repre_data in product_data["representations"].items() # noqa: E501
|
||||
)
|
||||
|
||||
# get representations from product data
|
||||
representations = product_data["representations"]
|
||||
label = f"{folder_path}_{product_name}_v{version:>03}"
|
||||
|
||||
families = ["csv_ingest"]
|
||||
if slate_exists:
|
||||
# adding slate to families mainly for loaders to be able
|
||||
# to filter out slates
|
||||
families.append("slate")
|
||||
|
||||
# make product data
|
||||
product_data = {
|
||||
"name": instance_name,
|
||||
"folderPath": folder_path,
|
||||
"families": families,
|
||||
"label": label,
|
||||
"task": task_name,
|
||||
"variant": variant,
|
||||
"source": "csv",
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"fps": fps,
|
||||
"version": version,
|
||||
"comment": version_comment,
|
||||
}
|
||||
|
||||
# create new instance
|
||||
new_instance = CreatedInstance(
|
||||
product_type, product_name, product_data, self
|
||||
)
|
||||
self._store_new_instance(new_instance)
|
||||
|
||||
if not new_instance.get("prepared_data_for_repres"):
|
||||
new_instance["prepared_data_for_repres"] = []
|
||||
|
||||
base_thumbnail_repre_data = {
|
||||
"name": "thumbnail",
|
||||
"ext": None,
|
||||
"files": None,
|
||||
"stagingDir": None,
|
||||
"stagingDir_persistent": True,
|
||||
"tags": ["thumbnail", "delete"],
|
||||
}
|
||||
# need to populate all thumbnails for all representations
|
||||
# so we can check if unique thumbnail per representation
|
||||
# is needed
|
||||
thumbnails = [
|
||||
repre_data["thumbnailPath"]
|
||||
for repre_data in representations.values()
|
||||
if repre_data["thumbnailPath"]
|
||||
]
|
||||
multiple_thumbnails = len(set(thumbnails)) > 1
|
||||
explicit_output_name = None
|
||||
thumbnails_processed = False
|
||||
for filepath, repre_data in representations.items():
|
||||
# check if any review derivate tag is present
|
||||
reviewable = any(
|
||||
tag for tag in repre_data.get("tags", [])
|
||||
# tag can be `ftrackreview` or `review`
|
||||
if "review" in tag
|
||||
)
|
||||
# since we need to populate multiple thumbnails as
|
||||
# representation with outputName for (Ftrack instance
|
||||
# integrator) pairing with reviewable video representations
|
||||
if (
|
||||
thumbnails
|
||||
and multiple_thumbnails
|
||||
and reviewable
|
||||
):
|
||||
# multiple unique thumbnails per representation needs
|
||||
# grouping by outputName
|
||||
# mainly used in Ftrack instance integrator
|
||||
explicit_output_name = repre_data["representationName"]
|
||||
relative_thumbnail_path = repre_data["thumbnailPath"]
|
||||
# representation might not have thumbnail path
|
||||
# so ignore this one
|
||||
if not relative_thumbnail_path:
|
||||
continue
|
||||
thumb_dir, thumb_file = \
|
||||
self._get_refactor_thumbnail_path(
|
||||
staging_dir, relative_thumbnail_path)
|
||||
filename, ext = os.path.splitext(thumb_file)
|
||||
thumbnail_repr_data = deepcopy(
|
||||
base_thumbnail_repre_data)
|
||||
thumbnail_repr_data.update({
|
||||
"name": "thumbnail_{}".format(filename),
|
||||
"ext": ext[1:],
|
||||
"files": thumb_file,
|
||||
"stagingDir": thumb_dir,
|
||||
"outputName": explicit_output_name,
|
||||
})
|
||||
new_instance["prepared_data_for_repres"].append({
|
||||
"type": "thumbnail",
|
||||
"colorspace": None,
|
||||
"representation": thumbnail_repr_data,
|
||||
})
|
||||
# also add thumbnailPath for ayon to integrate
|
||||
if not new_instance.get("thumbnailPath"):
|
||||
new_instance["thumbnailPath"] = (
|
||||
os.path.join(thumb_dir, thumb_file)
|
||||
)
|
||||
elif (
|
||||
thumbnails
|
||||
and not multiple_thumbnails
|
||||
and not thumbnails_processed
|
||||
or not reviewable
|
||||
):
|
||||
"""
|
||||
For case where we have only one thumbnail
|
||||
and not reviewable medias. This needs to be processed
|
||||
only once per instance.
|
||||
"""
|
||||
if not thumbnails:
|
||||
continue
|
||||
# here we will use only one thumbnail for
|
||||
# all representations
|
||||
relative_thumbnail_path = repre_data["thumbnailPath"]
|
||||
# popping last thumbnail from list since it is only one
|
||||
# and we do not need to iterate again over it
|
||||
if not relative_thumbnail_path:
|
||||
relative_thumbnail_path = thumbnails.pop()
|
||||
thumb_dir, thumb_file = \
|
||||
self._get_refactor_thumbnail_path(
|
||||
staging_dir, relative_thumbnail_path)
|
||||
_, ext = os.path.splitext(thumb_file)
|
||||
thumbnail_repr_data = deepcopy(
|
||||
base_thumbnail_repre_data)
|
||||
thumbnail_repr_data.update({
|
||||
"ext": ext[1:],
|
||||
"files": thumb_file,
|
||||
"stagingDir": thumb_dir
|
||||
})
|
||||
new_instance["prepared_data_for_repres"].append({
|
||||
"type": "thumbnail",
|
||||
"colorspace": None,
|
||||
"representation": thumbnail_repr_data,
|
||||
})
|
||||
# also add thumbnailPath for ayon to integrate
|
||||
if not new_instance.get("thumbnailPath"):
|
||||
new_instance["thumbnailPath"] = (
|
||||
os.path.join(thumb_dir, thumb_file)
|
||||
)
|
||||
|
||||
thumbnails_processed = True
|
||||
|
||||
# get representation data
|
||||
representation_data = self._get_representation_data(
|
||||
filepath, repre_data, staging_dir,
|
||||
explicit_output_name
|
||||
)
|
||||
|
||||
new_instance["prepared_data_for_repres"].append({
|
||||
"type": "media",
|
||||
"colorspace": repre_data["colorspace"],
|
||||
"representation": representation_data,
|
||||
})
|
||||
|
||||
def _get_refactor_thumbnail_path(
|
||||
self, staging_dir, relative_thumbnail_path):
|
||||
thumbnail_abs_path = os.path.join(
|
||||
staging_dir, relative_thumbnail_path)
|
||||
return os.path.split(
|
||||
thumbnail_abs_path)
|
||||
|
||||
def _get_representation_data(
|
||||
self, filepath, repre_data, staging_dir, explicit_output_name=None
|
||||
):
|
||||
"""Get representation data
|
||||
|
||||
Args:
|
||||
filepath (str): Filepath to representation file.
|
||||
repre_data (dict): Representation data from CSV file.
|
||||
staging_dir (str): Staging directory.
|
||||
explicit_output_name (Optional[str]): Explicit output name.
|
||||
For grouping purposes with reviewable components.
|
||||
Defaults to None.
|
||||
"""
|
||||
|
||||
# get extension of file
|
||||
basename = os.path.basename(filepath)
|
||||
extension = os.path.splitext(filepath)[-1].lower()
|
||||
|
||||
# validate filepath is having correct extension based on output
|
||||
repre_name = repre_data["representationName"]
|
||||
repre_config_data = None
|
||||
for repre in self.representations_config["representations"]:
|
||||
if repre["name"] == repre_name:
|
||||
repre_config_data = repre
|
||||
break
|
||||
|
||||
if not repre_config_data:
|
||||
raise CreatorError(
|
||||
f"Representation '{repre_name}' not found "
|
||||
"in config representation data."
|
||||
)
|
||||
|
||||
validate_extensions = repre_config_data["extensions"]
|
||||
if extension not in validate_extensions:
|
||||
raise CreatorError(
|
||||
f"File extension '{extension}' not valid for "
|
||||
f"output '{validate_extensions}'."
|
||||
)
|
||||
|
||||
is_sequence = (extension in IMAGE_EXTENSIONS)
|
||||
# convert ### string in file name to %03d
|
||||
# this is for correct frame range validation
|
||||
# example: file.###.exr -> file.%03d.exr
|
||||
if "#" in basename:
|
||||
padding = len(basename.split("#")) - 1
|
||||
basename = basename.replace("#" * padding, f"%0{padding}d")
|
||||
is_sequence = True
|
||||
|
||||
# make absolute path to file
|
||||
absfilepath = os.path.normpath(os.path.join(staging_dir, filepath))
|
||||
dirname = os.path.dirname(absfilepath)
|
||||
|
||||
# check if dirname exists
|
||||
if not os.path.isdir(dirname):
|
||||
raise CreatorError(
|
||||
f"Directory '{dirname}' does not exist."
|
||||
)
|
||||
|
||||
# collect all data from dirname
|
||||
paths_for_collection = []
|
||||
for file in os.listdir(dirname):
|
||||
filepath = os.path.join(dirname, file)
|
||||
paths_for_collection.append(filepath)
|
||||
|
||||
collections, _ = clique.assemble(paths_for_collection)
|
||||
|
||||
if collections:
|
||||
collections = collections[0]
|
||||
else:
|
||||
if is_sequence:
|
||||
raise CreatorError(
|
||||
f"No collections found in directory '{dirname}'."
|
||||
)
|
||||
|
||||
frame_start = None
|
||||
frame_end = None
|
||||
if is_sequence:
|
||||
files = [os.path.basename(file) for file in collections]
|
||||
frame_start = list(collections.indexes)[0]
|
||||
frame_end = list(collections.indexes)[-1]
|
||||
else:
|
||||
files = basename
|
||||
|
||||
tags = deepcopy(repre_data["tags"])
|
||||
# if slate in repre_data is True then remove one frame from start
|
||||
if repre_data["slate"]:
|
||||
tags.append("has_slate")
|
||||
|
||||
# get representation data
|
||||
representation_data = {
|
||||
"name": repre_name,
|
||||
"ext": extension[1:],
|
||||
"files": files,
|
||||
"stagingDir": dirname,
|
||||
"stagingDir_persistent": True,
|
||||
"tags": tags,
|
||||
}
|
||||
if extension in VIDEO_EXTENSIONS:
|
||||
representation_data.update({
|
||||
"fps": repre_data["fps"],
|
||||
"outputName": repre_name,
|
||||
})
|
||||
|
||||
if explicit_output_name:
|
||||
representation_data["outputName"] = explicit_output_name
|
||||
|
||||
if frame_start:
|
||||
representation_data["frameStart"] = frame_start
|
||||
if frame_end:
|
||||
representation_data["frameEnd"] = frame_end
|
||||
|
||||
return representation_data
|
||||
|
||||
def _get_data_from_csv(
|
||||
self, package_dir, filename
|
||||
):
|
||||
"""Generate instances from the csv file"""
|
||||
# get current project name and code from context.data
|
||||
project_name = self.create_context.get_current_project_name()
|
||||
|
||||
csv_file_path = os.path.join(
|
||||
package_dir, filename
|
||||
)
|
||||
|
||||
# make sure csv file contains columns from following list
|
||||
required_columns = [
|
||||
column["name"] for column in self.columns_config["columns"]
|
||||
if column["required_column"]
|
||||
]
|
||||
|
||||
# read csv file
|
||||
with open(csv_file_path, "r") as csv_file:
|
||||
csv_content = csv_file.read()
|
||||
|
||||
# read csv file with DictReader
|
||||
csv_reader = csv.DictReader(
|
||||
StringIO(csv_content),
|
||||
delimiter=self.columns_config["csv_delimiter"]
|
||||
)
|
||||
|
||||
# fix fieldnames
|
||||
# sometimes someone can keep extra space at the start or end of
|
||||
# the column name
|
||||
all_columns = [
|
||||
" ".join(column.rsplit()) for column in csv_reader.fieldnames]
|
||||
|
||||
# return back fixed fieldnames
|
||||
csv_reader.fieldnames = all_columns
|
||||
|
||||
# check if csv file contains all required columns
|
||||
if any(column not in all_columns for column in required_columns):
|
||||
raise CreatorError(
|
||||
f"Missing required columns: {required_columns}"
|
||||
)
|
||||
|
||||
csv_data = {}
|
||||
# get data from csv file
|
||||
for row in csv_reader:
|
||||
# Get required columns first
|
||||
# TODO: will need to be folder path in CSV
|
||||
# TODO: `context_asset_name` is now `folder_path`
|
||||
folder_path = self._get_row_value_with_validation(
|
||||
"Folder Path", row)
|
||||
task_name = self._get_row_value_with_validation(
|
||||
"Task Name", row)
|
||||
version = self._get_row_value_with_validation(
|
||||
"Version", row)
|
||||
|
||||
# Get optional columns
|
||||
variant = self._get_row_value_with_validation(
|
||||
"Variant", row)
|
||||
product_type = self._get_row_value_with_validation(
|
||||
"Product Type", row)
|
||||
|
||||
pre_product_name = (
|
||||
f"{task_name}{variant}{product_type}"
|
||||
f"{version}".replace(" ", "").lower()
|
||||
)
|
||||
|
||||
# get representation data
|
||||
filename, representation_data = \
|
||||
self._get_representation_row_data(row)
|
||||
|
||||
# TODO: batch query of all folder paths and task names
|
||||
|
||||
# get folder entity from folder path
|
||||
folder_entity = get_folder_by_path(
|
||||
project_name, folder_path)
|
||||
|
||||
# make sure asset exists
|
||||
if not folder_entity:
|
||||
raise CreatorError(
|
||||
f"Asset '{folder_path}' not found."
|
||||
)
|
||||
|
||||
# first get all tasks on the folder entity and then find
|
||||
task_entity = get_task_by_name(
|
||||
project_name, folder_entity["id"], task_name)
|
||||
|
||||
# check if task name is valid task in asset doc
|
||||
if not task_entity:
|
||||
raise CreatorError(
|
||||
f"Task '{task_name}' not found in asset doc."
|
||||
)
|
||||
|
||||
# get all csv data into one dict and make sure there are no
|
||||
# duplicates data are already validated and sorted under
|
||||
# correct existing asset also check if asset exists and if
|
||||
# task name is valid task in asset doc and representations
|
||||
# are distributed under products following variants
|
||||
if folder_path not in csv_data:
|
||||
csv_data[folder_path] = {
|
||||
"folder_entity": folder_entity,
|
||||
"products": {
|
||||
pre_product_name: {
|
||||
"task_name": task_name,
|
||||
"task_type": task_entity["taskType"],
|
||||
"variant": variant,
|
||||
"product_type": product_type,
|
||||
"version": version,
|
||||
"representations": {
|
||||
filename: representation_data,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
else:
|
||||
csv_products = csv_data[folder_path]["products"]
|
||||
if pre_product_name not in csv_products:
|
||||
csv_products[pre_product_name] = {
|
||||
"task_name": task_name,
|
||||
"task_type": task_entity["taskType"],
|
||||
"variant": variant,
|
||||
"product_type": product_type,
|
||||
"version": version,
|
||||
"representations": {
|
||||
filename: representation_data,
|
||||
},
|
||||
}
|
||||
else:
|
||||
csv_representations = \
|
||||
csv_products[pre_product_name]["representations"]
|
||||
if filename in csv_representations:
|
||||
raise CreatorError(
|
||||
f"Duplicate filename '{filename}' in csv file."
|
||||
)
|
||||
csv_representations[filename] = representation_data
|
||||
|
||||
return csv_data
|
||||
|
||||
def _get_representation_row_data(self, row_data):
|
||||
"""Get representation row data"""
|
||||
# Get required columns first
|
||||
file_path = self._get_row_value_with_validation(
|
||||
"File Path", row_data)
|
||||
frame_start = self._get_row_value_with_validation(
|
||||
"Frame Start", row_data)
|
||||
frame_end = self._get_row_value_with_validation(
|
||||
"Frame End", row_data)
|
||||
handle_start = self._get_row_value_with_validation(
|
||||
"Handle Start", row_data)
|
||||
handle_end = self._get_row_value_with_validation(
|
||||
"Handle End", row_data)
|
||||
fps = self._get_row_value_with_validation(
|
||||
"FPS", row_data)
|
||||
|
||||
# Get optional columns
|
||||
thumbnail_path = self._get_row_value_with_validation(
|
||||
"Version Thumbnail", row_data)
|
||||
colorspace = self._get_row_value_with_validation(
|
||||
"Representation Colorspace", row_data)
|
||||
comment = self._get_row_value_with_validation(
|
||||
"Version Comment", row_data)
|
||||
repre = self._get_row_value_with_validation(
|
||||
"Representation", row_data)
|
||||
slate_exists = self._get_row_value_with_validation(
|
||||
"Slate Exists", row_data)
|
||||
repre_tags = self._get_row_value_with_validation(
|
||||
"Representation Tags", row_data)
|
||||
|
||||
# convert tags value to list
|
||||
tags_list = copy(self.representations_config["default_tags"])
|
||||
if repre_tags:
|
||||
tags_list = []
|
||||
tags_delimiter = self.representations_config["tags_delimiter"]
|
||||
# strip spaces from repre_tags
|
||||
if tags_delimiter in repre_tags:
|
||||
tags = repre_tags.split(tags_delimiter)
|
||||
for _tag in tags:
|
||||
tags_list.append(("".join(_tag.strip())).lower())
|
||||
else:
|
||||
tags_list.append(repre_tags)
|
||||
|
||||
representation_data = {
|
||||
"colorspace": colorspace,
|
||||
"comment": comment,
|
||||
"representationName": repre,
|
||||
"slate": slate_exists,
|
||||
"tags": tags_list,
|
||||
"thumbnailPath": thumbnail_path,
|
||||
"frameStart": int(frame_start),
|
||||
"frameEnd": int(frame_end),
|
||||
"handleStart": int(handle_start),
|
||||
"handleEnd": int(handle_end),
|
||||
"fps": float(fps),
|
||||
}
|
||||
return file_path, representation_data
|
||||
|
||||
def _get_row_value_with_validation(
|
||||
self, column_name, row_data, default_value=None
|
||||
):
|
||||
"""Get row value with validation"""
|
||||
|
||||
# get column data from column config
|
||||
column_data = None
|
||||
for column in self.columns_config["columns"]:
|
||||
if column["name"] == column_name:
|
||||
column_data = column
|
||||
break
|
||||
|
||||
if not column_data:
|
||||
raise CreatorError(
|
||||
f"Column '{column_name}' not found in column config."
|
||||
)
|
||||
|
||||
# get column value from row
|
||||
column_value = row_data.get(column_name)
|
||||
column_required = column_data["required_column"]
|
||||
|
||||
# check if column value is not empty string and column is required
|
||||
if column_value == "" and column_required:
|
||||
raise CreatorError(
|
||||
f"Value in column '{column_name}' is required."
|
||||
)
|
||||
|
||||
# get column type
|
||||
column_type = column_data["type"]
|
||||
# get column validation regex
|
||||
column_validation = column_data["validation_pattern"]
|
||||
# get column default value
|
||||
column_default = default_value or column_data["default"]
|
||||
|
||||
if column_type in ["number", "decimal"] and column_default == 0:
|
||||
column_default = None
|
||||
|
||||
# check if column value is not empty string
|
||||
if column_value == "":
|
||||
# set default value if column value is empty string
|
||||
column_value = column_default
|
||||
|
||||
# set column value to correct type following column type
|
||||
if column_type == "number" and column_value is not None:
|
||||
column_value = int(column_value)
|
||||
elif column_type == "decimal" and column_value is not None:
|
||||
column_value = float(column_value)
|
||||
elif column_type == "bool":
|
||||
column_value = column_value in ["true", "True"]
|
||||
|
||||
# check if column value matches validation regex
|
||||
if (
|
||||
column_value is not None and
|
||||
not re.match(str(column_validation), str(column_value))
|
||||
):
|
||||
raise CreatorError(
|
||||
f"Column '{column_name}' value '{column_value}' "
|
||||
f"does not match validation regex '{column_validation}' \n"
|
||||
f"Row data: {row_data} \n"
|
||||
f"Column data: {column_data}"
|
||||
)
|
||||
|
||||
return column_value
|
||||
|
||||
def _pass_data_to_csv_instance(
|
||||
self, instance_data, staging_dir, filename
|
||||
):
|
||||
"""Pass CSV representation file to instance data"""
|
||||
|
||||
representation = {
|
||||
"name": "csv",
|
||||
"ext": "csv",
|
||||
"files": filename,
|
||||
"stagingDir": staging_dir,
|
||||
"stagingDir_persistent": True,
|
||||
}
|
||||
|
||||
instance_data.update({
|
||||
"label": f"CSV: {filename}",
|
||||
"representations": [representation],
|
||||
"stagingDir": staging_dir,
|
||||
"stagingDir_persistent": True,
|
||||
})
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
BoolDef(
|
||||
"add_review_family",
|
||||
default=True,
|
||||
label="Review"
|
||||
)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
"""Creating pre-create attributes at creator plugin.
|
||||
|
||||
Returns:
|
||||
list: list of attribute object instances
|
||||
"""
|
||||
# Use same attributes as for instance attributes
|
||||
attr_defs = [
|
||||
FileDef(
|
||||
"csv_filepath_data",
|
||||
folders=False,
|
||||
extensions=[".csv"],
|
||||
allow_sequences=False,
|
||||
single_item=True,
|
||||
label="CSV File",
|
||||
),
|
||||
]
|
||||
return attr_defs
|
||||
|
|
@ -402,7 +402,7 @@ or updating already created. Publishing will create OTIO file.
|
|||
):
|
||||
continue
|
||||
|
||||
instance = self._make_product_instance(
|
||||
self._make_product_instance(
|
||||
otio_clip,
|
||||
product_type_preset,
|
||||
deepcopy(base_instance_data),
|
||||
|
|
|
|||
|
|
@ -0,0 +1,47 @@
|
|||
from pprint import pformat
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline import publish
|
||||
|
||||
|
||||
class CollectCSVIngestInstancesData(
|
||||
pyblish.api.InstancePlugin,
|
||||
publish.AYONPyblishPluginMixin,
|
||||
publish.ColormanagedPyblishPluginMixin
|
||||
):
|
||||
"""Collect CSV Ingest data from instance.
|
||||
"""
|
||||
|
||||
label = "Collect CSV Ingest instances data"
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
hosts = ["traypublisher"]
|
||||
families = ["csv_ingest"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
# expecting [(colorspace, repre_data), ...]
|
||||
prepared_repres_data_items = instance.data[
|
||||
"prepared_data_for_repres"]
|
||||
|
||||
for prep_repre_data in prepared_repres_data_items:
|
||||
type = prep_repre_data["type"]
|
||||
colorspace = prep_repre_data["colorspace"]
|
||||
repre_data = prep_repre_data["representation"]
|
||||
|
||||
# thumbnails should be skipped
|
||||
if type == "media":
|
||||
# colorspace name is passed from CSV column
|
||||
self.set_representation_colorspace(
|
||||
repre_data, instance.context, colorspace
|
||||
)
|
||||
elif type == "media" and colorspace is None:
|
||||
# TODO: implement colorspace file rules file parsing
|
||||
self.log.warning(
|
||||
"Colorspace is not defined in csv for following"
|
||||
f" representation: {pformat(repre_data)}"
|
||||
)
|
||||
pass
|
||||
elif type == "thumbnail":
|
||||
# thumbnails should be skipped
|
||||
pass
|
||||
|
||||
instance.data["representations"].append(repre_data)
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
|
||||
|
||||
class ExtractCSVFile(publish.Extractor):
|
||||
"""
|
||||
Extractor export CSV file
|
||||
"""
|
||||
|
||||
label = "Extract CSV file"
|
||||
order = pyblish.api.ExtractorOrder - 0.45
|
||||
families = ["csv_ingest_file"]
|
||||
hosts = ["traypublisher"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
csv_file_data = instance.data["csvFileData"]
|
||||
|
||||
representation_csv = {
|
||||
'name': "csv_data",
|
||||
'ext': "csv",
|
||||
'files': csv_file_data["filename"],
|
||||
"stagingDir": csv_file_data["staging_dir"],
|
||||
"stagingDir_persistent": True
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation_csv)
|
||||
|
||||
self.log.info("Added CSV file representation: {}".format(
|
||||
representation_csv))
|
||||
|
|
@ -16,6 +16,7 @@ class ValidateExistingVersion(
|
|||
order = ValidateContentsOrder
|
||||
|
||||
hosts = ["traypublisher"]
|
||||
targets = ["local"]
|
||||
|
||||
actions = [RepairAction]
|
||||
|
||||
|
|
|
|||
|
|
@ -16,6 +16,8 @@ class ValidateFrameRange(OptionalPyblishPluginMixin,
|
|||
label = "Validate Frame Range"
|
||||
hosts = ["traypublisher"]
|
||||
families = ["render", "plate"]
|
||||
targets = ["local"]
|
||||
|
||||
order = ValidateContentsOrder
|
||||
|
||||
optional = True
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
import os
|
||||
|
||||
from ayon_core.lib import StringTemplate
|
||||
from ayon_core.pipeline import (
|
||||
registered_host,
|
||||
get_current_context,
|
||||
|
|
@ -111,8 +110,6 @@ class LoadWorkfile(plugin.Loader):
|
|||
|
||||
data["version"] = version
|
||||
|
||||
filename = StringTemplate.format_strict_template(
|
||||
file_template, data
|
||||
)
|
||||
filename = work_template["file"].format_strict(data)
|
||||
path = os.path.join(work_root, filename)
|
||||
host.save_workfile(path)
|
||||
|
|
|
|||
|
|
@ -28,9 +28,11 @@ from .pipeline import (
|
|||
)
|
||||
|
||||
__all__ = [
|
||||
"UnrealActorCreator",
|
||||
"UnrealAssetCreator",
|
||||
"Loader",
|
||||
"install",
|
||||
"uninstall",
|
||||
"Loader",
|
||||
"ls",
|
||||
"publish",
|
||||
"containerise",
|
||||
|
|
|
|||
|
|
@ -94,8 +94,12 @@ def prepare_template_data(fill_pairs):
|
|||
output = {}
|
||||
for item in valid_items:
|
||||
keys, value = item
|
||||
upper_value = value.upper()
|
||||
capitalized_value = _capitalize_value(value)
|
||||
# Convert only string values
|
||||
if isinstance(value, str):
|
||||
upper_value = value.upper()
|
||||
capitalized_value = _capitalize_value(value)
|
||||
else:
|
||||
upper_value = capitalized_value = value
|
||||
|
||||
first_key = keys.pop(0)
|
||||
if not keys:
|
||||
|
|
|
|||
|
|
@ -103,17 +103,17 @@ class FusionSubmitDeadline(
|
|||
|
||||
# Collect all saver instances in context that are to be rendered
|
||||
saver_instances = []
|
||||
for instance in context:
|
||||
if instance.data["productType"] != "render":
|
||||
for inst in context:
|
||||
if inst.data["productType"] != "render":
|
||||
# Allow only saver family instances
|
||||
continue
|
||||
|
||||
if not instance.data.get("publish", True):
|
||||
if not inst.data.get("publish", True):
|
||||
# Skip inactive instances
|
||||
continue
|
||||
|
||||
self.log.debug(instance.data["name"])
|
||||
saver_instances.append(instance)
|
||||
self.log.debug(inst.data["name"])
|
||||
saver_instances.append(inst)
|
||||
|
||||
if not saver_instances:
|
||||
raise RuntimeError("No instances found for Deadline submission")
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ class LoaderAddon(AYONAddon, ITrayAddon):
|
|||
# Add library tool
|
||||
self._loader_imported = False
|
||||
try:
|
||||
from ayon_core.tools.loader.ui import LoaderWindow
|
||||
from ayon_core.tools.loader.ui import LoaderWindow # noqa F401
|
||||
|
||||
self._loader_imported = True
|
||||
except Exception:
|
||||
|
|
|
|||
|
|
@ -1,13 +1,14 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Wrapper around Royal Render API."""
|
||||
import sys
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ayon_core.lib.local_settings import AYONSettingsRegistry
|
||||
from ayon_core.lib import Logger, run_subprocess
|
||||
from .rr_job import RRJob, SubmitFile, SubmitterParameter
|
||||
from ayon_core.lib import Logger, run_subprocess, AYONSettingsRegistry
|
||||
from ayon_core.lib.vendor_bin_utils import find_tool_in_custom_paths
|
||||
|
||||
from .rr_job import SubmitFile
|
||||
from .rr_job import RRjob, SubmitterParameter # noqa F401
|
||||
|
||||
|
||||
class Api:
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@
|
|||
import os
|
||||
import attr
|
||||
import json
|
||||
import re
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
|
|
|||
|
|
@ -549,7 +549,7 @@ class Anatomy(BaseAnatomy):
|
|||
)
|
||||
else:
|
||||
# Ask sync server to get roots overrides
|
||||
roots_overrides = sitesync.get_site_root_overrides(
|
||||
roots_overrides = sitesync_addon.get_site_root_overrides(
|
||||
project_name, site_name
|
||||
)
|
||||
site_cache.update_data(roots_overrides)
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ from .exceptions import (
|
|||
TemplateMissingKey,
|
||||
AnatomyTemplateUnsolved,
|
||||
)
|
||||
from .roots import RootItem
|
||||
|
||||
_PLACEHOLDER = object()
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
"""Core pipeline functionality"""
|
||||
|
||||
import os
|
||||
import types
|
||||
import logging
|
||||
import platform
|
||||
import uuid
|
||||
|
|
@ -21,7 +20,6 @@ from .anatomy import Anatomy
|
|||
from .template_data import get_template_data_with_names
|
||||
from .workfile import (
|
||||
get_workdir,
|
||||
get_workfile_template_key,
|
||||
get_custom_workfile_template_by_string_context,
|
||||
)
|
||||
from . import (
|
||||
|
|
|
|||
|
|
@ -1790,10 +1790,10 @@ class CreateContext:
|
|||
|
||||
creator_identifier = creator_class.identifier
|
||||
if creator_identifier in creators:
|
||||
self.log.warning((
|
||||
"Duplicated Creator identifier. "
|
||||
"Using first and skipping following"
|
||||
))
|
||||
self.log.warning(
|
||||
"Duplicate Creator identifier: '%s'. Using first Creator "
|
||||
"and skipping: %s", creator_identifier, creator_class
|
||||
)
|
||||
continue
|
||||
|
||||
# Filter by host name
|
||||
|
|
|
|||
|
|
@ -6,13 +6,11 @@ from copy import deepcopy
|
|||
|
||||
import attr
|
||||
import ayon_api
|
||||
import pyblish.api
|
||||
import clique
|
||||
|
||||
from ayon_core.pipeline import (
|
||||
get_current_project_name,
|
||||
get_representation_path,
|
||||
Anatomy,
|
||||
)
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.pipeline.publish import KnownPublishError
|
||||
|
|
@ -137,7 +135,7 @@ def get_transferable_representations(instance):
|
|||
list of dicts: List of transferable representations.
|
||||
|
||||
"""
|
||||
anatomy = instance.context.data["anatomy"] # type: Anatomy
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
to_transfer = []
|
||||
|
||||
for representation in instance.data.get("representations", []):
|
||||
|
|
@ -166,7 +164,6 @@ def get_transferable_representations(instance):
|
|||
|
||||
def create_skeleton_instance(
|
||||
instance, families_transfer=None, instance_transfer=None):
|
||||
# type: (pyblish.api.Instance, list, dict) -> dict
|
||||
"""Create skeleton instance from original instance data.
|
||||
|
||||
This will create dictionary containing skeleton
|
||||
|
|
@ -191,7 +188,7 @@ def create_skeleton_instance(
|
|||
|
||||
context = instance.context
|
||||
data = instance.data.copy()
|
||||
anatomy = instance.context.data["anatomy"] # type: Anatomy
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
|
||||
# get time related data from instance (or context)
|
||||
time_data = get_time_data_from_instance_or_context(instance)
|
||||
|
|
@ -620,15 +617,32 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
|||
aov_patterns = aov_filter
|
||||
|
||||
preview = match_aov_pattern(app, aov_patterns, render_file_name)
|
||||
# toggle preview on if multipart is on
|
||||
if instance.data.get("multipartExr"):
|
||||
log.debug("Adding preview tag because its multipartExr")
|
||||
preview = True
|
||||
|
||||
new_instance = deepcopy(skeleton)
|
||||
new_instance["productName"] = product_name
|
||||
new_instance["productGroup"] = group_name
|
||||
|
||||
# toggle preview on if multipart is on
|
||||
# Because we cant query the multipartExr data member of each AOV we'll
|
||||
# need to have hardcoded rule of excluding any renders with
|
||||
# "cryptomatte" in the file name from being a multipart EXR. This issue
|
||||
# happens with Redshift that forces Cryptomatte renders to be separate
|
||||
# files even when the rest of the AOVs are merged into a single EXR.
|
||||
# There might be an edge case where the main instance has cryptomatte
|
||||
# in the name even though it's a multipart EXR.
|
||||
if instance.data.get("renderer") == "redshift":
|
||||
if (
|
||||
instance.data.get("multipartExr") and
|
||||
"cryptomatte" not in render_file_name.lower()
|
||||
):
|
||||
log.debug("Adding preview tag because it's multipartExr")
|
||||
preview = True
|
||||
else:
|
||||
new_instance["multipartExr"] = False
|
||||
elif instance.data.get("multipartExr"):
|
||||
log.debug("Adding preview tag because its multipartExr")
|
||||
preview = True
|
||||
|
||||
# explicitly disable review by user
|
||||
preview = preview and not do_not_add_review
|
||||
if preview:
|
||||
|
|
@ -751,7 +765,6 @@ def get_resources(project_name, version_entity, extension=None):
|
|||
|
||||
|
||||
def create_skeleton_instance_cache(instance):
|
||||
# type: (pyblish.api.Instance, list, dict) -> dict
|
||||
"""Create skeleton instance from original instance data.
|
||||
|
||||
This will create dictionary containing skeleton
|
||||
|
|
@ -771,7 +784,7 @@ def create_skeleton_instance_cache(instance):
|
|||
|
||||
context = instance.context
|
||||
data = instance.data.copy()
|
||||
anatomy = instance.context.data["anatomy"] # type: Anatomy
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
|
||||
# get time related data from instance (or context)
|
||||
time_data = get_time_data_from_instance_or_context(instance)
|
||||
|
|
@ -1005,7 +1018,7 @@ def copy_extend_frames(instance, representation):
|
|||
start = instance.data.get("frameStart")
|
||||
end = instance.data.get("frameEnd")
|
||||
project_name = instance.context.data["project"]
|
||||
anatomy = instance.context.data["anatomy"] # type: Anatomy
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
|
||||
folder_entity = ayon_api.get_folder_by_path(
|
||||
project_name, instance.data.get("folderPath")
|
||||
|
|
|
|||
|
|
@ -81,6 +81,9 @@ class RenderInstance(object):
|
|||
outputDir = attr.ib(default=None)
|
||||
context = attr.ib(default=None)
|
||||
|
||||
# The source instance the data of this render instance should merge into
|
||||
source_instance = attr.ib(default=None, type=pyblish.api.Instance)
|
||||
|
||||
@frameStart.validator
|
||||
def check_frame_start(self, _, value):
|
||||
"""Validate if frame start is not larger then end."""
|
||||
|
|
@ -214,8 +217,11 @@ class AbstractCollectRender(pyblish.api.ContextPlugin):
|
|||
data = self.add_additional_data(data)
|
||||
render_instance_dict = attr.asdict(render_instance)
|
||||
|
||||
instance = context.create_instance(render_instance.name)
|
||||
instance.data["label"] = render_instance.label
|
||||
# Merge into source instance if provided, otherwise create instance
|
||||
instance = render_instance_dict.pop("source_instance", None)
|
||||
if instance is None:
|
||||
instance = context.create_instance(render_instance.name)
|
||||
|
||||
instance.data.update(render_instance_dict)
|
||||
instance.data.update(data)
|
||||
|
||||
|
|
|
|||
|
|
@ -13,7 +13,6 @@ Resources:
|
|||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import logging
|
||||
|
||||
|
|
|
|||
|
|
@ -329,7 +329,7 @@ class AbstractTemplateBuilder(object):
|
|||
is good practice to check if the same value is not already stored under
|
||||
different key or if the key is not already used for something else.
|
||||
|
||||
Key should be self explanatory to content.
|
||||
Key should be self-explanatory to content.
|
||||
- wrong: 'folder'
|
||||
- good: 'folder_name'
|
||||
|
||||
|
|
@ -375,7 +375,7 @@ class AbstractTemplateBuilder(object):
|
|||
is good practice to check if the same value is not already stored under
|
||||
different key or if the key is not already used for something else.
|
||||
|
||||
Key should be self explanatory to content.
|
||||
Key should be self-explanatory to content.
|
||||
- wrong: 'folder'
|
||||
- good: 'folder_path'
|
||||
|
||||
|
|
@ -395,7 +395,7 @@ class AbstractTemplateBuilder(object):
|
|||
is good practice to check if the same value is not already stored under
|
||||
different key or if the key is not already used for something else.
|
||||
|
||||
Key should be self explanatory to content.
|
||||
Key should be self-explanatory to content.
|
||||
- wrong: 'folder'
|
||||
- good: 'folder_path'
|
||||
|
||||
|
|
@ -466,7 +466,7 @@ class AbstractTemplateBuilder(object):
|
|||
|
||||
return list(sorted(
|
||||
placeholders,
|
||||
key=lambda i: i.order
|
||||
key=lambda placeholder: placeholder.order
|
||||
))
|
||||
|
||||
def build_template(
|
||||
|
|
@ -498,15 +498,21 @@ class AbstractTemplateBuilder(object):
|
|||
process if version is created
|
||||
|
||||
"""
|
||||
template_preset = self.get_template_preset()
|
||||
|
||||
if template_path is None:
|
||||
template_path = template_preset["path"]
|
||||
|
||||
if keep_placeholders is None:
|
||||
keep_placeholders = template_preset["keep_placeholder"]
|
||||
if create_first_version is None:
|
||||
create_first_version = template_preset["create_first_version"]
|
||||
if any(
|
||||
value is None
|
||||
for value in [
|
||||
template_path,
|
||||
keep_placeholders,
|
||||
create_first_version,
|
||||
]
|
||||
):
|
||||
template_preset = self.get_template_preset()
|
||||
if template_path is None:
|
||||
template_path = template_preset["path"]
|
||||
if keep_placeholders is None:
|
||||
keep_placeholders = template_preset["keep_placeholder"]
|
||||
if create_first_version is None:
|
||||
create_first_version = template_preset["create_first_version"]
|
||||
|
||||
# check if first version is created
|
||||
created_version_workfile = False
|
||||
|
|
@ -685,7 +691,7 @@ class AbstractTemplateBuilder(object):
|
|||
for placeholder in placeholders
|
||||
}
|
||||
all_processed = len(placeholders) == 0
|
||||
# Counter is checked at the ned of a loop so the loop happens at least
|
||||
# Counter is checked at the end of a loop so the loop happens at least
|
||||
# once.
|
||||
iter_counter = 0
|
||||
while not all_processed:
|
||||
|
|
@ -772,12 +778,14 @@ class AbstractTemplateBuilder(object):
|
|||
- 'project_settings/{host name}/templated_workfile_build/profiles'
|
||||
|
||||
Returns:
|
||||
str: Path to a template file with placeholders.
|
||||
dict: Dictionary with `path`, `keep_placeholder` and
|
||||
`create_first_version` settings from the template preset
|
||||
for current context.
|
||||
|
||||
Raises:
|
||||
TemplateProfileNotFound: When profiles are not filled.
|
||||
TemplateLoadFailed: Profile was found but path is not set.
|
||||
TemplateNotFound: Path was set but file does not exists.
|
||||
TemplateNotFound: Path was set but file does not exist.
|
||||
"""
|
||||
|
||||
host_name = self.host_name
|
||||
|
|
@ -1045,7 +1053,7 @@ class PlaceholderPlugin(object):
|
|||
|
||||
Using shared data from builder but stored under plugin identifier.
|
||||
|
||||
Key should be self explanatory to content.
|
||||
Key should be self-explanatory to content.
|
||||
- wrong: 'folder'
|
||||
- good: 'folder_path'
|
||||
|
||||
|
|
@ -1085,7 +1093,7 @@ class PlaceholderPlugin(object):
|
|||
|
||||
Using shared data from builder but stored under plugin identifier.
|
||||
|
||||
Key should be self explanatory to content.
|
||||
Key should be self-explanatory to content.
|
||||
- wrong: 'folder'
|
||||
- good: 'folder_path'
|
||||
|
||||
|
|
@ -1107,10 +1115,10 @@ class PlaceholderItem(object):
|
|||
"""Item representing single item in scene that is a placeholder to process.
|
||||
|
||||
Items are always created and updated by their plugins. Each plugin can use
|
||||
modified class of 'PlacehoderItem' but only to add more options instead of
|
||||
modified class of 'PlaceholderItem' but only to add more options instead of
|
||||
new other.
|
||||
|
||||
Scene identifier is used to avoid processing of the palceholder item
|
||||
Scene identifier is used to avoid processing of the placeholder item
|
||||
multiple times so must be unique across whole workfile builder.
|
||||
|
||||
Args:
|
||||
|
|
@ -1162,7 +1170,7 @@ class PlaceholderItem(object):
|
|||
"""Placeholder data which can modify how placeholder is processed.
|
||||
|
||||
Possible general keys
|
||||
- order: Can define the order in which is palceholder processed.
|
||||
- order: Can define the order in which is placeholder processed.
|
||||
Lower == earlier.
|
||||
|
||||
Other keys are defined by placeholder and should validate them on item
|
||||
|
|
@ -1264,11 +1272,9 @@ class PlaceholderLoadMixin(object):
|
|||
"""Unified attribute definitions for load placeholder.
|
||||
|
||||
Common function for placeholder plugins used for loading of
|
||||
repsentations. Use it in 'get_placeholder_options'.
|
||||
representations. Use it in 'get_placeholder_options'.
|
||||
|
||||
Args:
|
||||
plugin (PlaceholderPlugin): Plugin used for loading of
|
||||
representations.
|
||||
options (Dict[str, Any]): Already available options which are used
|
||||
as defaults for attributes.
|
||||
|
||||
|
|
@ -1468,7 +1474,9 @@ class PlaceholderLoadMixin(object):
|
|||
product_name_regex = None
|
||||
if product_name_regex_value:
|
||||
product_name_regex = re.compile(product_name_regex_value)
|
||||
product_type = placeholder.data["family"]
|
||||
product_type = placeholder.data.get("product_type")
|
||||
if product_type is None:
|
||||
product_type = placeholder.data["family"]
|
||||
|
||||
builder_type = placeholder.data["builder_type"]
|
||||
folder_ids = []
|
||||
|
|
@ -1529,35 +1537,22 @@ class PlaceholderLoadMixin(object):
|
|||
|
||||
pass
|
||||
|
||||
def _reduce_last_version_repre_entities(self, representations):
|
||||
"""Reduce representations to last verison."""
|
||||
def _reduce_last_version_repre_entities(self, repre_contexts):
|
||||
"""Reduce representations to last version."""
|
||||
|
||||
mapping = {}
|
||||
# TODO use representation context with entities
|
||||
# - using 'folder', 'subset' and 'version' from context on
|
||||
# representation is danger
|
||||
for repre_entity in representations:
|
||||
repre_context = repre_entity["context"]
|
||||
|
||||
folder_name = repre_context["asset"]
|
||||
product_name = repre_context["subset"]
|
||||
version = repre_context.get("version", -1)
|
||||
|
||||
if folder_name not in mapping:
|
||||
mapping[folder_name] = {}
|
||||
|
||||
product_mapping = mapping[folder_name]
|
||||
if product_name not in product_mapping:
|
||||
product_mapping[product_name] = collections.defaultdict(list)
|
||||
|
||||
version_mapping = product_mapping[product_name]
|
||||
version_mapping[version].append(repre_entity)
|
||||
version_mapping_by_product_id = {}
|
||||
for repre_context in repre_contexts:
|
||||
product_id = repre_context["product"]["id"]
|
||||
version = repre_context["version"]["version"]
|
||||
version_mapping = version_mapping_by_product_id.setdefault(
|
||||
product_id, {}
|
||||
)
|
||||
version_mapping.setdefault(version, []).append(repre_context)
|
||||
|
||||
output = []
|
||||
for product_mapping in mapping.values():
|
||||
for version_mapping in product_mapping.values():
|
||||
last_version = tuple(sorted(version_mapping.keys()))[-1]
|
||||
output.extend(version_mapping[last_version])
|
||||
for version_mapping in version_mapping_by_product_id.values():
|
||||
last_version = max(version_mapping.keys())
|
||||
output.extend(version_mapping[last_version])
|
||||
return output
|
||||
|
||||
def populate_load_placeholder(self, placeholder, ignore_repre_ids=None):
|
||||
|
|
@ -1585,32 +1580,33 @@ class PlaceholderLoadMixin(object):
|
|||
loader_name = placeholder.data["loader"]
|
||||
loader_args = self.parse_loader_args(placeholder.data["loader_args"])
|
||||
|
||||
placeholder_representations = self._get_representations(placeholder)
|
||||
placeholder_representations = [
|
||||
repre_entity
|
||||
for repre_entity in self._get_representations(placeholder)
|
||||
if repre_entity["id"] not in ignore_repre_ids
|
||||
]
|
||||
|
||||
filtered_representations = []
|
||||
for representation in self._reduce_last_version_repre_entities(
|
||||
placeholder_representations
|
||||
):
|
||||
repre_id = representation["id"]
|
||||
if repre_id not in ignore_repre_ids:
|
||||
filtered_representations.append(representation)
|
||||
|
||||
if not filtered_representations:
|
||||
repre_load_contexts = get_representation_contexts(
|
||||
self.project_name, placeholder_representations
|
||||
)
|
||||
filtered_repre_contexts = self._reduce_last_version_repre_entities(
|
||||
repre_load_contexts.values()
|
||||
)
|
||||
if not filtered_repre_contexts:
|
||||
self.log.info((
|
||||
"There's no representation for this placeholder: {}"
|
||||
).format(placeholder.scene_identifier))
|
||||
if not placeholder.data.get("keep_placeholder", True):
|
||||
self.delete_placeholder(placeholder)
|
||||
return
|
||||
|
||||
repre_load_contexts = get_representation_contexts(
|
||||
self.project_name, filtered_representations
|
||||
)
|
||||
loaders_by_name = self.builder.get_loaders_by_name()
|
||||
self._before_placeholder_load(
|
||||
placeholder
|
||||
)
|
||||
|
||||
failed = False
|
||||
for repre_load_context in repre_load_contexts.values():
|
||||
for repre_load_context in filtered_repre_contexts:
|
||||
folder_path = repre_load_context["folder"]["path"]
|
||||
product_name = repre_load_context["product"]["name"]
|
||||
representation = repre_load_context["representation"]
|
||||
|
|
@ -1695,8 +1691,6 @@ class PlaceholderCreateMixin(object):
|
|||
publishable instances. Use it with 'get_placeholder_options'.
|
||||
|
||||
Args:
|
||||
plugin (PlaceholderPlugin): Plugin used for creating of
|
||||
publish instances.
|
||||
options (Dict[str, Any]): Already available options which are used
|
||||
as defaults for attributes.
|
||||
|
||||
|
|
|
|||
|
|
@ -3,8 +3,6 @@ import platform
|
|||
import subprocess
|
||||
from string import Formatter
|
||||
|
||||
import ayon_api
|
||||
|
||||
from ayon_core.pipeline import (
|
||||
Anatomy,
|
||||
LauncherAction,
|
||||
|
|
|
|||
|
|
@ -1,501 +1,426 @@
|
|||
# TODO This plugin is not converted for AYON
|
||||
#
|
||||
# import collections
|
||||
# import os
|
||||
# import uuid
|
||||
#
|
||||
# import clique
|
||||
# import ayon_api
|
||||
# from pymongo import UpdateOne
|
||||
# import qargparse
|
||||
# from qtpy import QtWidgets, QtCore
|
||||
#
|
||||
# from ayon_core import style
|
||||
# from ayon_core.addon import AddonsManager
|
||||
# from ayon_core.lib import format_file_size
|
||||
# from ayon_core.pipeline import load, Anatomy
|
||||
# from ayon_core.pipeline.load import (
|
||||
# get_representation_path_with_anatomy,
|
||||
# InvalidRepresentationContext,
|
||||
# )
|
||||
#
|
||||
#
|
||||
# class DeleteOldVersions(load.ProductLoaderPlugin):
|
||||
# """Deletes specific number of old version"""
|
||||
#
|
||||
# is_multiple_contexts_compatible = True
|
||||
# sequence_splitter = "__sequence_splitter__"
|
||||
#
|
||||
# representations = {"*"}
|
||||
# product_types = {"*"}
|
||||
# tool_names = ["library_loader"]
|
||||
#
|
||||
# label = "Delete Old Versions"
|
||||
# order = 35
|
||||
# icon = "trash"
|
||||
# color = "#d8d8d8"
|
||||
#
|
||||
# options = [
|
||||
# qargparse.Integer(
|
||||
# "versions_to_keep", default=2, min=0, help="Versions to keep:"
|
||||
# ),
|
||||
# qargparse.Boolean(
|
||||
# "remove_publish_folder", help="Remove publish folder:"
|
||||
# )
|
||||
# ]
|
||||
#
|
||||
# def delete_whole_dir_paths(self, dir_paths, delete=True):
|
||||
# size = 0
|
||||
#
|
||||
# for dir_path in dir_paths:
|
||||
# # Delete all files and fodlers in dir path
|
||||
# for root, dirs, files in os.walk(dir_path, topdown=False):
|
||||
# for name in files:
|
||||
# file_path = os.path.join(root, name)
|
||||
# size += os.path.getsize(file_path)
|
||||
# if delete:
|
||||
# os.remove(file_path)
|
||||
# self.log.debug("Removed file: {}".format(file_path))
|
||||
#
|
||||
# for name in dirs:
|
||||
# if delete:
|
||||
# os.rmdir(os.path.join(root, name))
|
||||
#
|
||||
# if not delete:
|
||||
# continue
|
||||
#
|
||||
# # Delete even the folder and it's parents folders if they are empty
|
||||
# while True:
|
||||
# if not os.path.exists(dir_path):
|
||||
# dir_path = os.path.dirname(dir_path)
|
||||
# continue
|
||||
#
|
||||
# if len(os.listdir(dir_path)) != 0:
|
||||
# break
|
||||
#
|
||||
# os.rmdir(os.path.join(dir_path))
|
||||
#
|
||||
# return size
|
||||
#
|
||||
# def path_from_representation(self, representation, anatomy):
|
||||
# try:
|
||||
# context = representation["context"]
|
||||
# except KeyError:
|
||||
# return (None, None)
|
||||
#
|
||||
# try:
|
||||
# path = get_representation_path_with_anatomy(
|
||||
# representation, anatomy
|
||||
# )
|
||||
# except InvalidRepresentationContext:
|
||||
# return (None, None)
|
||||
#
|
||||
# sequence_path = None
|
||||
# if "frame" in context:
|
||||
# context["frame"] = self.sequence_splitter
|
||||
# sequence_path = get_representation_path_with_anatomy(
|
||||
# representation, anatomy
|
||||
# )
|
||||
#
|
||||
# if sequence_path:
|
||||
# sequence_path = sequence_path.normalized()
|
||||
#
|
||||
# return (path.normalized(), sequence_path)
|
||||
#
|
||||
# def delete_only_repre_files(self, dir_paths, file_paths, delete=True):
|
||||
# size = 0
|
||||
#
|
||||
# for dir_id, dir_path in dir_paths.items():
|
||||
# dir_files = os.listdir(dir_path)
|
||||
# collections, remainders = clique.assemble(dir_files)
|
||||
# for file_path, seq_path in file_paths[dir_id]:
|
||||
# file_path_base = os.path.split(file_path)[1]
|
||||
# # Just remove file if `frame` key was not in context or
|
||||
# # filled path is in remainders (single file sequence)
|
||||
# if not seq_path or file_path_base in remainders:
|
||||
# if not os.path.exists(file_path):
|
||||
# self.log.debug(
|
||||
# "File was not found: {}".format(file_path)
|
||||
# )
|
||||
# continue
|
||||
#
|
||||
# size += os.path.getsize(file_path)
|
||||
#
|
||||
# if delete:
|
||||
# os.remove(file_path)
|
||||
# self.log.debug("Removed file: {}".format(file_path))
|
||||
#
|
||||
# if file_path_base in remainders:
|
||||
# remainders.remove(file_path_base)
|
||||
# continue
|
||||
#
|
||||
# seq_path_base = os.path.split(seq_path)[1]
|
||||
# head, tail = seq_path_base.split(self.sequence_splitter)
|
||||
#
|
||||
# final_col = None
|
||||
# for collection in collections:
|
||||
# if head != collection.head or tail != collection.tail:
|
||||
# continue
|
||||
# final_col = collection
|
||||
# break
|
||||
#
|
||||
# if final_col is not None:
|
||||
# # Fill full path to head
|
||||
# final_col.head = os.path.join(dir_path, final_col.head)
|
||||
# for _file_path in final_col:
|
||||
# if os.path.exists(_file_path):
|
||||
#
|
||||
# size += os.path.getsize(_file_path)
|
||||
#
|
||||
# if delete:
|
||||
# os.remove(_file_path)
|
||||
# self.log.debug(
|
||||
# "Removed file: {}".format(_file_path)
|
||||
# )
|
||||
#
|
||||
# _seq_path = final_col.format("{head}{padding}{tail}")
|
||||
# self.log.debug("Removed files: {}".format(_seq_path))
|
||||
# collections.remove(final_col)
|
||||
#
|
||||
# elif os.path.exists(file_path):
|
||||
# size += os.path.getsize(file_path)
|
||||
#
|
||||
# if delete:
|
||||
# os.remove(file_path)
|
||||
# self.log.debug("Removed file: {}".format(file_path))
|
||||
# else:
|
||||
# self.log.debug(
|
||||
# "File was not found: {}".format(file_path)
|
||||
# )
|
||||
#
|
||||
# # Delete as much as possible parent folders
|
||||
# if not delete:
|
||||
# return size
|
||||
#
|
||||
# for dir_path in dir_paths.values():
|
||||
# while True:
|
||||
# if not os.path.exists(dir_path):
|
||||
# dir_path = os.path.dirname(dir_path)
|
||||
# continue
|
||||
#
|
||||
# if len(os.listdir(dir_path)) != 0:
|
||||
# break
|
||||
#
|
||||
# self.log.debug("Removed folder: {}".format(dir_path))
|
||||
# os.rmdir(dir_path)
|
||||
#
|
||||
# return size
|
||||
#
|
||||
# def message(self, text):
|
||||
# msgBox = QtWidgets.QMessageBox()
|
||||
# msgBox.setText(text)
|
||||
# msgBox.setStyleSheet(style.load_stylesheet())
|
||||
# msgBox.setWindowFlags(
|
||||
# msgBox.windowFlags() | QtCore.Qt.FramelessWindowHint
|
||||
# )
|
||||
# msgBox.exec_()
|
||||
#
|
||||
# def get_data(self, context, versions_count):
|
||||
# product_entity = context["product"]
|
||||
# folder_entity = context["folder"]
|
||||
# project_name = context["project"]["name"]
|
||||
# anatomy = Anatomy(project_name)
|
||||
#
|
||||
# versions = list(ayon_api.get_versions(
|
||||
# project_name, product_ids=[product_entity["id"]]
|
||||
# ))
|
||||
#
|
||||
# versions_by_parent = collections.defaultdict(list)
|
||||
# for ent in versions:
|
||||
# versions_by_parent[ent["productId"]].append(ent)
|
||||
#
|
||||
# def sort_func(ent):
|
||||
# return int(ent["version"])
|
||||
#
|
||||
# all_last_versions = []
|
||||
# for _parent_id, _versions in versions_by_parent.items():
|
||||
# for idx, version in enumerate(
|
||||
# sorted(_versions, key=sort_func, reverse=True)
|
||||
# ):
|
||||
# if idx >= versions_count:
|
||||
# break
|
||||
# all_last_versions.append(version)
|
||||
#
|
||||
# self.log.debug("Collected versions ({})".format(len(versions)))
|
||||
#
|
||||
# # Filter latest versions
|
||||
# for version in all_last_versions:
|
||||
# versions.remove(version)
|
||||
#
|
||||
# # Update versions_by_parent without filtered versions
|
||||
# versions_by_parent = collections.defaultdict(list)
|
||||
# for ent in versions:
|
||||
# versions_by_parent[ent["productId"]].append(ent)
|
||||
#
|
||||
# # Filter already deleted versions
|
||||
# versions_to_pop = []
|
||||
# for version in versions:
|
||||
# version_tags = version["data"].get("tags")
|
||||
# if version_tags and "deleted" in version_tags:
|
||||
# versions_to_pop.append(version)
|
||||
#
|
||||
# for version in versions_to_pop:
|
||||
# msg = "Folder: \"{}\" | Product: \"{}\" | Version: \"{}\"".format(
|
||||
# folder_entity["path"],
|
||||
# product_entity["name"],
|
||||
# version["version"]
|
||||
# )
|
||||
# self.log.debug((
|
||||
# "Skipping version. Already tagged as `deleted`. < {} >"
|
||||
# ).format(msg))
|
||||
# versions.remove(version)
|
||||
#
|
||||
# version_ids = [ent["id"] for ent in versions]
|
||||
#
|
||||
# self.log.debug(
|
||||
# "Filtered versions to delete ({})".format(len(version_ids))
|
||||
# )
|
||||
#
|
||||
# if not version_ids:
|
||||
# msg = "Skipping processing. Nothing to delete on {}/{}".format(
|
||||
# folder_entity["path"], product_entity["name"]
|
||||
# )
|
||||
# self.log.info(msg)
|
||||
# print(msg)
|
||||
# return
|
||||
#
|
||||
# repres = list(ayon_api.get_representations(
|
||||
# project_name, version_ids=version_ids
|
||||
# ))
|
||||
#
|
||||
# self.log.debug(
|
||||
# "Collected representations to remove ({})".format(len(repres))
|
||||
# )
|
||||
#
|
||||
# dir_paths = {}
|
||||
# file_paths_by_dir = collections.defaultdict(list)
|
||||
# for repre in repres:
|
||||
# file_path, seq_path = self.path_from_representation(
|
||||
# repre, anatomy
|
||||
# )
|
||||
# if file_path is None:
|
||||
# self.log.debug((
|
||||
# "Could not format path for represenation \"{}\""
|
||||
# ).format(str(repre)))
|
||||
# continue
|
||||
#
|
||||
# dir_path = os.path.dirname(file_path)
|
||||
# dir_id = None
|
||||
# for _dir_id, _dir_path in dir_paths.items():
|
||||
# if _dir_path == dir_path:
|
||||
# dir_id = _dir_id
|
||||
# break
|
||||
#
|
||||
# if dir_id is None:
|
||||
# dir_id = uuid.uuid4()
|
||||
# dir_paths[dir_id] = dir_path
|
||||
#
|
||||
# file_paths_by_dir[dir_id].append([file_path, seq_path])
|
||||
#
|
||||
# dir_ids_to_pop = []
|
||||
# for dir_id, dir_path in dir_paths.items():
|
||||
# if os.path.exists(dir_path):
|
||||
# continue
|
||||
#
|
||||
# dir_ids_to_pop.append(dir_id)
|
||||
#
|
||||
# # Pop dirs from both dictionaries
|
||||
# for dir_id in dir_ids_to_pop:
|
||||
# dir_paths.pop(dir_id)
|
||||
# paths = file_paths_by_dir.pop(dir_id)
|
||||
# # TODO report of missing directories?
|
||||
# paths_msg = ", ".join([
|
||||
# "'{}'".format(path[0].replace("\\", "/")) for path in paths
|
||||
# ])
|
||||
# self.log.debug((
|
||||
# "Folder does not exist. Deleting it's files skipped: {}"
|
||||
# ).format(paths_msg))
|
||||
#
|
||||
# return {
|
||||
# "dir_paths": dir_paths,
|
||||
# "file_paths_by_dir": file_paths_by_dir,
|
||||
# "versions": versions,
|
||||
# "folder": folder_entity,
|
||||
# "product": product_entity,
|
||||
# "archive_product": versions_count == 0
|
||||
# }
|
||||
#
|
||||
# def main(self, project_name, data, remove_publish_folder):
|
||||
# # Size of files.
|
||||
# size = 0
|
||||
# if not data:
|
||||
# return size
|
||||
#
|
||||
# if remove_publish_folder:
|
||||
# size = self.delete_whole_dir_paths(data["dir_paths"].values())
|
||||
# else:
|
||||
# size = self.delete_only_repre_files(
|
||||
# data["dir_paths"], data["file_paths_by_dir"]
|
||||
# )
|
||||
#
|
||||
# mongo_changes_bulk = []
|
||||
# for version in data["versions"]:
|
||||
# orig_version_tags = version["data"].get("tags") or []
|
||||
# version_tags = [tag for tag in orig_version_tags]
|
||||
# if "deleted" not in version_tags:
|
||||
# version_tags.append("deleted")
|
||||
#
|
||||
# if version_tags == orig_version_tags:
|
||||
# continue
|
||||
#
|
||||
# update_query = {"id": version["id"]}
|
||||
# update_data = {"$set": {"data.tags": version_tags}}
|
||||
# mongo_changes_bulk.append(UpdateOne(update_query, update_data))
|
||||
#
|
||||
# if data["archive_product"]:
|
||||
# mongo_changes_bulk.append(UpdateOne(
|
||||
# {
|
||||
# "id": data["product"]["id"],
|
||||
# "type": "subset"
|
||||
# },
|
||||
# {"$set": {"type": "archived_subset"}}
|
||||
# ))
|
||||
#
|
||||
# if mongo_changes_bulk:
|
||||
# dbcon = AvalonMongoDB()
|
||||
# dbcon.Session["AYON_PROJECT_NAME"] = project_name
|
||||
# dbcon.install()
|
||||
# dbcon.bulk_write(mongo_changes_bulk)
|
||||
# dbcon.uninstall()
|
||||
#
|
||||
# self._ftrack_delete_versions(data)
|
||||
#
|
||||
# return size
|
||||
#
|
||||
# def _ftrack_delete_versions(self, data):
|
||||
# """Delete version on ftrack.
|
||||
#
|
||||
# Handling of ftrack logic in this plugin is not ideal. But in OP3 it is
|
||||
# almost impossible to solve the issue other way.
|
||||
#
|
||||
# Note:
|
||||
# Asset versions on ftrack are not deleted but marked as
|
||||
# "not published" which cause that they're invisible.
|
||||
#
|
||||
# Args:
|
||||
# data (dict): Data sent to product loader with full context.
|
||||
# """
|
||||
#
|
||||
# # First check for ftrack id on folder entity
|
||||
# # - skip if ther is none
|
||||
# ftrack_id = data["folder"]["attrib"].get("ftrackId")
|
||||
# if not ftrack_id:
|
||||
# self.log.info((
|
||||
# "Folder does not have filled ftrack id. Skipped delete"
|
||||
# " of ftrack version."
|
||||
# ))
|
||||
# return
|
||||
#
|
||||
# # Check if ftrack module is enabled
|
||||
# addons_manager = AddonsManager()
|
||||
# ftrack_addon = addons_manager.get("ftrack")
|
||||
# if not ftrack_addon or not ftrack_addon.enabled:
|
||||
# return
|
||||
#
|
||||
# import ftrack_api
|
||||
#
|
||||
# session = ftrack_api.Session()
|
||||
# product_name = data["product"]["name"]
|
||||
# versions = {
|
||||
# '"{}"'.format(version_doc["name"])
|
||||
# for version_doc in data["versions"]
|
||||
# }
|
||||
# asset_versions = session.query(
|
||||
# (
|
||||
# "select id, is_published from AssetVersion where"
|
||||
# " asset.parent.id is \"{}\""
|
||||
# " and asset.name is \"{}\""
|
||||
# " and version in ({})"
|
||||
# ).format(
|
||||
# ftrack_id,
|
||||
# product_name,
|
||||
# ",".join(versions)
|
||||
# )
|
||||
# ).all()
|
||||
#
|
||||
# # Set attribute `is_published` to `False` on ftrack AssetVersions
|
||||
# for asset_version in asset_versions:
|
||||
# asset_version["is_published"] = False
|
||||
#
|
||||
# try:
|
||||
# session.commit()
|
||||
#
|
||||
# except Exception:
|
||||
# msg = (
|
||||
# "Could not set `is_published` attribute to `False`"
|
||||
# " for selected AssetVersions."
|
||||
# )
|
||||
# self.log.error(msg)
|
||||
# self.message(msg)
|
||||
#
|
||||
# def load(self, contexts, name=None, namespace=None, options=None):
|
||||
# try:
|
||||
# size = 0
|
||||
# for count, context in enumerate(contexts):
|
||||
# versions_to_keep = 2
|
||||
# remove_publish_folder = False
|
||||
# if options:
|
||||
# versions_to_keep = options.get(
|
||||
# "versions_to_keep", versions_to_keep
|
||||
# )
|
||||
# remove_publish_folder = options.get(
|
||||
# "remove_publish_folder", remove_publish_folder
|
||||
# )
|
||||
#
|
||||
# data = self.get_data(context, versions_to_keep)
|
||||
# if not data:
|
||||
# continue
|
||||
#
|
||||
# project_name = context["project"]["name"]
|
||||
# size += self.main(project_name, data, remove_publish_folder)
|
||||
# print("Progressing {}/{}".format(count + 1, len(contexts)))
|
||||
#
|
||||
# msg = "Total size of files: {}".format(format_file_size(size))
|
||||
# self.log.info(msg)
|
||||
# self.message(msg)
|
||||
#
|
||||
# except Exception:
|
||||
# self.log.error("Failed to delete versions.", exc_info=True)
|
||||
#
|
||||
#
|
||||
# class CalculateOldVersions(DeleteOldVersions):
|
||||
# """Calculate file size of old versions"""
|
||||
# label = "Calculate Old Versions"
|
||||
# order = 30
|
||||
# tool_names = ["library_loader"]
|
||||
#
|
||||
# options = [
|
||||
# qargparse.Integer(
|
||||
# "versions_to_keep", default=2, min=0, help="Versions to keep:"
|
||||
# ),
|
||||
# qargparse.Boolean(
|
||||
# "remove_publish_folder", help="Remove publish folder:"
|
||||
# )
|
||||
# ]
|
||||
#
|
||||
# def main(self, project_name, data, remove_publish_folder):
|
||||
# size = 0
|
||||
#
|
||||
# if not data:
|
||||
# return size
|
||||
#
|
||||
# if remove_publish_folder:
|
||||
# size = self.delete_whole_dir_paths(
|
||||
# data["dir_paths"].values(), delete=False
|
||||
# )
|
||||
# else:
|
||||
# size = self.delete_only_repre_files(
|
||||
# data["dir_paths"], data["file_paths_by_dir"], delete=False
|
||||
# )
|
||||
#
|
||||
# return size
|
||||
import collections
|
||||
import os
|
||||
import uuid
|
||||
|
||||
import clique
|
||||
import ayon_api
|
||||
from ayon_api.operations import OperationsSession
|
||||
import qargparse
|
||||
from qtpy import QtWidgets, QtCore
|
||||
|
||||
from ayon_core import style
|
||||
from ayon_core.lib import format_file_size
|
||||
from ayon_core.pipeline import load, Anatomy
|
||||
from ayon_core.pipeline.load import (
|
||||
get_representation_path_with_anatomy,
|
||||
InvalidRepresentationContext,
|
||||
)
|
||||
|
||||
|
||||
class DeleteOldVersions(load.ProductLoaderPlugin):
|
||||
"""Deletes specific number of old version"""
|
||||
|
||||
is_multiple_contexts_compatible = True
|
||||
sequence_splitter = "__sequence_splitter__"
|
||||
|
||||
representations = ["*"]
|
||||
product_types = {"*"}
|
||||
tool_names = ["library_loader"]
|
||||
|
||||
label = "Delete Old Versions"
|
||||
order = 35
|
||||
icon = "trash"
|
||||
color = "#d8d8d8"
|
||||
|
||||
options = [
|
||||
qargparse.Integer(
|
||||
"versions_to_keep", default=2, min=0, help="Versions to keep:"
|
||||
),
|
||||
qargparse.Boolean(
|
||||
"remove_publish_folder", help="Remove publish folder:"
|
||||
)
|
||||
]
|
||||
|
||||
def delete_whole_dir_paths(self, dir_paths, delete=True):
|
||||
size = 0
|
||||
|
||||
for dir_path in dir_paths:
|
||||
# Delete all files and fodlers in dir path
|
||||
for root, dirs, files in os.walk(dir_path, topdown=False):
|
||||
for name in files:
|
||||
file_path = os.path.join(root, name)
|
||||
size += os.path.getsize(file_path)
|
||||
if delete:
|
||||
os.remove(file_path)
|
||||
self.log.debug("Removed file: {}".format(file_path))
|
||||
|
||||
for name in dirs:
|
||||
if delete:
|
||||
os.rmdir(os.path.join(root, name))
|
||||
|
||||
if not delete:
|
||||
continue
|
||||
|
||||
# Delete even the folder and it's parents folders if they are empty
|
||||
while True:
|
||||
if not os.path.exists(dir_path):
|
||||
dir_path = os.path.dirname(dir_path)
|
||||
continue
|
||||
|
||||
if len(os.listdir(dir_path)) != 0:
|
||||
break
|
||||
|
||||
os.rmdir(os.path.join(dir_path))
|
||||
|
||||
return size
|
||||
|
||||
def path_from_representation(self, representation, anatomy):
|
||||
try:
|
||||
context = representation["context"]
|
||||
except KeyError:
|
||||
return (None, None)
|
||||
|
||||
try:
|
||||
path = get_representation_path_with_anatomy(
|
||||
representation, anatomy
|
||||
)
|
||||
except InvalidRepresentationContext:
|
||||
return (None, None)
|
||||
|
||||
sequence_path = None
|
||||
if "frame" in context:
|
||||
context["frame"] = self.sequence_splitter
|
||||
sequence_path = get_representation_path_with_anatomy(
|
||||
representation, anatomy
|
||||
)
|
||||
|
||||
if sequence_path:
|
||||
sequence_path = sequence_path.normalized()
|
||||
|
||||
return (path.normalized(), sequence_path)
|
||||
|
||||
def delete_only_repre_files(self, dir_paths, file_paths, delete=True):
|
||||
size = 0
|
||||
|
||||
for dir_id, dir_path in dir_paths.items():
|
||||
dir_files = os.listdir(dir_path)
|
||||
collections, remainders = clique.assemble(dir_files)
|
||||
for file_path, seq_path in file_paths[dir_id]:
|
||||
file_path_base = os.path.split(file_path)[1]
|
||||
# Just remove file if `frame` key was not in context or
|
||||
# filled path is in remainders (single file sequence)
|
||||
if not seq_path or file_path_base in remainders:
|
||||
if not os.path.exists(file_path):
|
||||
self.log.debug(
|
||||
"File was not found: {}".format(file_path)
|
||||
)
|
||||
continue
|
||||
|
||||
size += os.path.getsize(file_path)
|
||||
|
||||
if delete:
|
||||
os.remove(file_path)
|
||||
self.log.debug("Removed file: {}".format(file_path))
|
||||
|
||||
if file_path_base in remainders:
|
||||
remainders.remove(file_path_base)
|
||||
continue
|
||||
|
||||
seq_path_base = os.path.split(seq_path)[1]
|
||||
head, tail = seq_path_base.split(self.sequence_splitter)
|
||||
|
||||
final_col = None
|
||||
for collection in collections:
|
||||
if head != collection.head or tail != collection.tail:
|
||||
continue
|
||||
final_col = collection
|
||||
break
|
||||
|
||||
if final_col is not None:
|
||||
# Fill full path to head
|
||||
final_col.head = os.path.join(dir_path, final_col.head)
|
||||
for _file_path in final_col:
|
||||
if os.path.exists(_file_path):
|
||||
|
||||
size += os.path.getsize(_file_path)
|
||||
|
||||
if delete:
|
||||
os.remove(_file_path)
|
||||
self.log.debug(
|
||||
"Removed file: {}".format(_file_path)
|
||||
)
|
||||
|
||||
_seq_path = final_col.format("{head}{padding}{tail}")
|
||||
self.log.debug("Removed files: {}".format(_seq_path))
|
||||
collections.remove(final_col)
|
||||
|
||||
elif os.path.exists(file_path):
|
||||
size += os.path.getsize(file_path)
|
||||
|
||||
if delete:
|
||||
os.remove(file_path)
|
||||
self.log.debug("Removed file: {}".format(file_path))
|
||||
else:
|
||||
self.log.debug(
|
||||
"File was not found: {}".format(file_path)
|
||||
)
|
||||
|
||||
# Delete as much as possible parent folders
|
||||
if not delete:
|
||||
return size
|
||||
|
||||
for dir_path in dir_paths.values():
|
||||
while True:
|
||||
if not os.path.exists(dir_path):
|
||||
dir_path = os.path.dirname(dir_path)
|
||||
continue
|
||||
|
||||
if len(os.listdir(dir_path)) != 0:
|
||||
break
|
||||
|
||||
self.log.debug("Removed folder: {}".format(dir_path))
|
||||
os.rmdir(dir_path)
|
||||
|
||||
return size
|
||||
|
||||
def message(self, text):
|
||||
msgBox = QtWidgets.QMessageBox()
|
||||
msgBox.setText(text)
|
||||
msgBox.setStyleSheet(style.load_stylesheet())
|
||||
msgBox.setWindowFlags(
|
||||
msgBox.windowFlags() | QtCore.Qt.FramelessWindowHint
|
||||
)
|
||||
msgBox.exec_()
|
||||
|
||||
def get_data(self, context, versions_count):
|
||||
product_entity = context["product"]
|
||||
folder_entity = context["folder"]
|
||||
project_name = context["project"]["name"]
|
||||
anatomy = Anatomy(project_name, project_entity=context["project"])
|
||||
|
||||
version_fields = ayon_api.get_default_fields_for_type("version")
|
||||
version_fields.add("tags")
|
||||
versions = list(ayon_api.get_versions(
|
||||
project_name,
|
||||
product_ids=[product_entity["id"]],
|
||||
active=None,
|
||||
hero=False,
|
||||
fields=version_fields
|
||||
))
|
||||
self.log.debug(
|
||||
"Version Number ({})".format(len(versions))
|
||||
)
|
||||
versions_by_parent = collections.defaultdict(list)
|
||||
for ent in versions:
|
||||
versions_by_parent[ent["productId"]].append(ent)
|
||||
|
||||
def sort_func(ent):
|
||||
return int(ent["version"])
|
||||
|
||||
all_last_versions = []
|
||||
for _parent_id, _versions in versions_by_parent.items():
|
||||
for idx, version in enumerate(
|
||||
sorted(_versions, key=sort_func, reverse=True)
|
||||
):
|
||||
if idx >= versions_count:
|
||||
break
|
||||
all_last_versions.append(version)
|
||||
|
||||
self.log.debug("Collected versions ({})".format(len(versions)))
|
||||
|
||||
# Filter latest versions
|
||||
for version in all_last_versions:
|
||||
versions.remove(version)
|
||||
|
||||
# Update versions_by_parent without filtered versions
|
||||
versions_by_parent = collections.defaultdict(list)
|
||||
for ent in versions:
|
||||
versions_by_parent[ent["productId"]].append(ent)
|
||||
|
||||
# Filter already deleted versions
|
||||
versions_to_pop = []
|
||||
for version in versions:
|
||||
if "deleted" in version["tags"]:
|
||||
versions_to_pop.append(version)
|
||||
|
||||
for version in versions_to_pop:
|
||||
msg = "Folder: \"{}\" | Product: \"{}\" | Version: \"{}\"".format(
|
||||
folder_entity["path"],
|
||||
product_entity["name"],
|
||||
version["version"]
|
||||
)
|
||||
self.log.debug((
|
||||
"Skipping version. Already tagged as inactive. < {} >"
|
||||
).format(msg))
|
||||
versions.remove(version)
|
||||
|
||||
version_ids = [ent["id"] for ent in versions]
|
||||
|
||||
self.log.debug(
|
||||
"Filtered versions to delete ({})".format(len(version_ids))
|
||||
)
|
||||
|
||||
if not version_ids:
|
||||
msg = "Skipping processing. Nothing to delete on {}/{}".format(
|
||||
folder_entity["path"], product_entity["name"]
|
||||
)
|
||||
self.log.info(msg)
|
||||
print(msg)
|
||||
return
|
||||
|
||||
repres = list(ayon_api.get_representations(
|
||||
project_name, version_ids=version_ids
|
||||
))
|
||||
|
||||
self.log.debug(
|
||||
"Collected representations to remove ({})".format(len(repres))
|
||||
)
|
||||
|
||||
dir_paths = {}
|
||||
file_paths_by_dir = collections.defaultdict(list)
|
||||
for repre in repres:
|
||||
file_path, seq_path = self.path_from_representation(
|
||||
repre, anatomy
|
||||
)
|
||||
if file_path is None:
|
||||
self.log.debug((
|
||||
"Could not format path for represenation \"{}\""
|
||||
).format(str(repre)))
|
||||
continue
|
||||
|
||||
dir_path = os.path.dirname(file_path)
|
||||
dir_id = None
|
||||
for _dir_id, _dir_path in dir_paths.items():
|
||||
if _dir_path == dir_path:
|
||||
dir_id = _dir_id
|
||||
break
|
||||
|
||||
if dir_id is None:
|
||||
dir_id = uuid.uuid4()
|
||||
dir_paths[dir_id] = dir_path
|
||||
|
||||
file_paths_by_dir[dir_id].append([file_path, seq_path])
|
||||
|
||||
dir_ids_to_pop = []
|
||||
for dir_id, dir_path in dir_paths.items():
|
||||
if os.path.exists(dir_path):
|
||||
continue
|
||||
|
||||
dir_ids_to_pop.append(dir_id)
|
||||
|
||||
# Pop dirs from both dictionaries
|
||||
for dir_id in dir_ids_to_pop:
|
||||
dir_paths.pop(dir_id)
|
||||
paths = file_paths_by_dir.pop(dir_id)
|
||||
# TODO report of missing directories?
|
||||
paths_msg = ", ".join([
|
||||
"'{}'".format(path[0].replace("\\", "/")) for path in paths
|
||||
])
|
||||
self.log.debug((
|
||||
"Folder does not exist. Deleting its files skipped: {}"
|
||||
).format(paths_msg))
|
||||
|
||||
return {
|
||||
"dir_paths": dir_paths,
|
||||
"file_paths_by_dir": file_paths_by_dir,
|
||||
"versions": versions,
|
||||
"folder": folder_entity,
|
||||
"product": product_entity,
|
||||
"archive_product": versions_count == 0
|
||||
}
|
||||
|
||||
def main(self, project_name, data, remove_publish_folder):
|
||||
# Size of files.
|
||||
size = 0
|
||||
if not data:
|
||||
return size
|
||||
|
||||
if remove_publish_folder:
|
||||
size = self.delete_whole_dir_paths(data["dir_paths"].values())
|
||||
else:
|
||||
size = self.delete_only_repre_files(
|
||||
data["dir_paths"], data["file_paths_by_dir"]
|
||||
)
|
||||
|
||||
op_session = OperationsSession()
|
||||
for version in data["versions"]:
|
||||
orig_version_tags = version["tags"]
|
||||
version_tags = list(orig_version_tags)
|
||||
changes = {}
|
||||
if "deleted" not in version_tags:
|
||||
version_tags.append("deleted")
|
||||
changes["tags"] = version_tags
|
||||
|
||||
if version["active"]:
|
||||
changes["active"] = False
|
||||
|
||||
if not changes:
|
||||
continue
|
||||
op_session.update_entity(
|
||||
project_name, "version", version["id"], changes
|
||||
)
|
||||
|
||||
op_session.commit()
|
||||
|
||||
return size
|
||||
|
||||
def load(self, contexts, name=None, namespace=None, options=None):
|
||||
try:
|
||||
size = 0
|
||||
for count, context in enumerate(contexts):
|
||||
versions_to_keep = 2
|
||||
remove_publish_folder = False
|
||||
if options:
|
||||
versions_to_keep = options.get(
|
||||
"versions_to_keep", versions_to_keep
|
||||
)
|
||||
remove_publish_folder = options.get(
|
||||
"remove_publish_folder", remove_publish_folder
|
||||
)
|
||||
|
||||
data = self.get_data(context, versions_to_keep)
|
||||
if not data:
|
||||
continue
|
||||
project_name = context["project"]["name"]
|
||||
size += self.main(project_name, data, remove_publish_folder)
|
||||
print("Progressing {}/{}".format(count + 1, len(contexts)))
|
||||
|
||||
msg = "Total size of files: {}".format(format_file_size(size))
|
||||
self.log.info(msg)
|
||||
self.message(msg)
|
||||
|
||||
except Exception:
|
||||
self.log.error("Failed to delete versions.", exc_info=True)
|
||||
|
||||
|
||||
class CalculateOldVersions(DeleteOldVersions):
|
||||
"""Calculate file size of old versions"""
|
||||
label = "Calculate Old Versions"
|
||||
order = 30
|
||||
tool_names = ["library_loader"]
|
||||
|
||||
options = [
|
||||
qargparse.Integer(
|
||||
"versions_to_keep", default=2, min=0, help="Versions to keep:"
|
||||
),
|
||||
qargparse.Boolean(
|
||||
"remove_publish_folder", help="Remove publish folder:"
|
||||
)
|
||||
]
|
||||
|
||||
def main(self, project_name, data, remove_publish_folder):
|
||||
size = 0
|
||||
|
||||
if not data:
|
||||
return size
|
||||
|
||||
if remove_publish_folder:
|
||||
size = self.delete_whole_dir_paths(
|
||||
data["dir_paths"].values(), delete=False
|
||||
)
|
||||
else:
|
||||
size = self.delete_only_repre_files(
|
||||
data["dir_paths"], data["file_paths_by_dir"], delete=False
|
||||
)
|
||||
|
||||
return size
|
||||
|
|
|
|||
|
|
@ -167,7 +167,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
"uasset",
|
||||
"blendScene",
|
||||
"yeticacheUE",
|
||||
"tycache"
|
||||
"tycache",
|
||||
"csv_ingest_file",
|
||||
]
|
||||
|
||||
default_template_name = "publish"
|
||||
|
|
|
|||
|
|
@ -82,20 +82,6 @@ class BaseObj:
|
|||
def main_style(self):
|
||||
return load_default_style()
|
||||
|
||||
def height(self):
|
||||
raise NotImplementedError(
|
||||
"Attribute `height` is not implemented for <{}>".format(
|
||||
self.__clas__.__name__
|
||||
)
|
||||
)
|
||||
|
||||
def width(self):
|
||||
raise NotImplementedError(
|
||||
"Attribute `width` is not implemented for <{}>".format(
|
||||
self.__clas__.__name__
|
||||
)
|
||||
)
|
||||
|
||||
def collect_data(self):
|
||||
return None
|
||||
|
||||
|
|
|
|||
|
|
@ -284,7 +284,13 @@ class ProductsModel(QtGui.QStandardItemModel):
|
|||
model_item.setData(label, QtCore.Qt.DisplayRole)
|
||||
return model_item
|
||||
|
||||
def _set_version_data_to_product_item(self, model_item, version_item):
|
||||
def _set_version_data_to_product_item(
|
||||
self,
|
||||
model_item,
|
||||
version_item,
|
||||
repre_count_by_version_id=None,
|
||||
sync_availability_by_version_id=None,
|
||||
):
|
||||
"""
|
||||
|
||||
Args:
|
||||
|
|
@ -292,6 +298,10 @@ class ProductsModel(QtGui.QStandardItemModel):
|
|||
from version item.
|
||||
version_item (VersionItem): Item from entities model with
|
||||
information about version.
|
||||
repre_count_by_version_id (Optional[str, int]): Mapping of
|
||||
representation count by version id.
|
||||
sync_availability_by_version_id (Optional[str, Tuple[int, int]]):
|
||||
Mapping of sync availability by version id.
|
||||
"""
|
||||
|
||||
model_item.setData(version_item.version_id, VERSION_ID_ROLE)
|
||||
|
|
@ -312,12 +322,20 @@ class ProductsModel(QtGui.QStandardItemModel):
|
|||
# TODO call site sync methods for all versions at once
|
||||
project_name = self._last_project_name
|
||||
version_id = version_item.version_id
|
||||
repre_count = self._controller.get_versions_representation_count(
|
||||
project_name, [version_id]
|
||||
)[version_id]
|
||||
active, remote = self._controller.get_version_sync_availability(
|
||||
project_name, [version_id]
|
||||
)[version_id]
|
||||
if repre_count_by_version_id is None:
|
||||
repre_count_by_version_id = (
|
||||
self._controller.get_versions_representation_count(
|
||||
project_name, [version_id]
|
||||
)
|
||||
)
|
||||
if sync_availability_by_version_id is None:
|
||||
sync_availability_by_version_id = (
|
||||
self._controller.get_version_sync_availability(
|
||||
project_name, [version_id]
|
||||
)
|
||||
)
|
||||
repre_count = repre_count_by_version_id[version_id]
|
||||
active, remote = sync_availability_by_version_id[version_id]
|
||||
|
||||
model_item.setData(repre_count, REPRESENTATIONS_COUNT_ROLE)
|
||||
model_item.setData(active, SYNC_ACTIVE_SITE_AVAILABILITY)
|
||||
|
|
@ -327,7 +345,9 @@ class ProductsModel(QtGui.QStandardItemModel):
|
|||
self,
|
||||
product_item,
|
||||
active_site_icon,
|
||||
remote_site_icon
|
||||
remote_site_icon,
|
||||
repre_count_by_version_id,
|
||||
sync_availability_by_version_id,
|
||||
):
|
||||
model_item = self._items_by_id.get(product_item.product_id)
|
||||
versions = list(product_item.version_items.values())
|
||||
|
|
@ -357,7 +377,12 @@ class ProductsModel(QtGui.QStandardItemModel):
|
|||
model_item.setData(active_site_icon, ACTIVE_SITE_ICON_ROLE)
|
||||
model_item.setData(remote_site_icon, REMOTE_SITE_ICON_ROLE)
|
||||
|
||||
self._set_version_data_to_product_item(model_item, last_version)
|
||||
self._set_version_data_to_product_item(
|
||||
model_item,
|
||||
last_version,
|
||||
repre_count_by_version_id,
|
||||
sync_availability_by_version_id,
|
||||
)
|
||||
return model_item
|
||||
|
||||
def get_last_project_name(self):
|
||||
|
|
@ -387,6 +412,24 @@ class ProductsModel(QtGui.QStandardItemModel):
|
|||
product_item.product_id: product_item
|
||||
for product_item in product_items
|
||||
}
|
||||
last_version_id_by_product_id = {}
|
||||
for product_item in product_items:
|
||||
versions = list(product_item.version_items.values())
|
||||
versions.sort()
|
||||
last_version = versions[-1]
|
||||
last_version_id_by_product_id[product_item.product_id] = (
|
||||
last_version.version_id
|
||||
)
|
||||
|
||||
version_ids = set(last_version_id_by_product_id.values())
|
||||
repre_count_by_version_id = self._controller.get_versions_representation_count(
|
||||
project_name, version_ids
|
||||
)
|
||||
sync_availability_by_version_id = (
|
||||
self._controller.get_version_sync_availability(
|
||||
project_name, version_ids
|
||||
)
|
||||
)
|
||||
|
||||
# Prepare product groups
|
||||
product_name_matches_by_group = collections.defaultdict(dict)
|
||||
|
|
@ -443,6 +486,8 @@ class ProductsModel(QtGui.QStandardItemModel):
|
|||
product_item,
|
||||
active_site_icon,
|
||||
remote_site_icon,
|
||||
repre_count_by_version_id,
|
||||
sync_availability_by_version_id,
|
||||
)
|
||||
new_items.append(item)
|
||||
|
||||
|
|
@ -463,6 +508,8 @@ class ProductsModel(QtGui.QStandardItemModel):
|
|||
product_item,
|
||||
active_site_icon,
|
||||
remote_site_icon,
|
||||
repre_count_by_version_id,
|
||||
sync_availability_by_version_id,
|
||||
)
|
||||
new_merged_items.append(item)
|
||||
merged_product_types.add(product_item.product_type)
|
||||
|
|
|
|||
|
|
@ -343,8 +343,9 @@ class QtRemotePublishController(BasePublisherController):
|
|||
|
||||
@abstractmethod
|
||||
def _send_instance_changes_to_client(self):
|
||||
instance_changes = self._get_instance_changes_for_client()
|
||||
# Implement to send 'instance_changes' value to client
|
||||
# TODO Implement to send 'instance_changes' value to client
|
||||
# instance_changes = self._get_instance_changes_for_client()
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def save_changes(self):
|
||||
|
|
|
|||
|
|
@ -552,7 +552,7 @@ class TrayStarter(QtCore.QObject):
|
|||
def main():
|
||||
app = get_ayon_qt_app()
|
||||
|
||||
starter = TrayStarter(app)
|
||||
starter = TrayStarter(app) # noqa F841
|
||||
|
||||
if not is_running_from_build() and os.name == "nt":
|
||||
import ctypes
|
||||
|
|
|
|||
|
|
@ -562,11 +562,11 @@ class HSLInputs(QtWidgets.QWidget):
|
|||
return
|
||||
|
||||
self._block_changes = True
|
||||
h, s, l, _ = self.color.getHsl()
|
||||
hue, sat, lum, _ = self.color.getHsl()
|
||||
|
||||
self.input_hue.setValue(h)
|
||||
self.input_sat.setValue(s)
|
||||
self.input_light.setValue(l)
|
||||
self.input_hue.setValue(hue)
|
||||
self.input_sat.setValue(sat)
|
||||
self.input_light.setValue(lum)
|
||||
|
||||
self._block_changes = False
|
||||
|
||||
|
|
|
|||
|
|
@ -578,7 +578,8 @@ class OptionalAction(QtWidgets.QWidgetAction):
|
|||
def set_option_tip(self, options):
|
||||
sep = "\n\n"
|
||||
if not options or not isinstance(options[0], AbstractAttrDef):
|
||||
mak = (lambda opt: opt["name"] + " :\n " + opt["help"])
|
||||
def mak(opt):
|
||||
return opt["name"] + " :\n " + opt["help"]
|
||||
self.option_tip = sep.join(mak(opt) for opt in options)
|
||||
return
|
||||
|
||||
|
|
|
|||
|
|
@ -8,12 +8,12 @@ from ayon_core.tools.utils.dialogs import show_message_dialog
|
|||
def open_template_ui(builder, main_window):
|
||||
"""Open template from `builder`
|
||||
|
||||
Asks user about overwriting current scene and feedsback exceptions.
|
||||
Asks user about overwriting current scene and feedback exceptions.
|
||||
"""
|
||||
result = QtWidgets.QMessageBox.question(
|
||||
main_window,
|
||||
"Opening template",
|
||||
"Caution! You will loose unsaved changes.\nDo you want to continue?",
|
||||
"Caution! You will lose unsaved changes.\nDo you want to continue?",
|
||||
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
|
||||
)
|
||||
if result == QtWidgets.QMessageBox.Yes:
|
||||
|
|
|
|||
|
|
@ -20,6 +20,8 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel):
|
|||
controller (AbstractWorkfilesFrontend): The control object.
|
||||
"""
|
||||
|
||||
refreshed = QtCore.Signal()
|
||||
|
||||
def __init__(self, controller):
|
||||
super(WorkAreaFilesModel, self).__init__()
|
||||
|
||||
|
|
@ -163,6 +165,12 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel):
|
|||
self._fill_items()
|
||||
|
||||
def _fill_items(self):
|
||||
try:
|
||||
self._fill_items_impl()
|
||||
finally:
|
||||
self.refreshed.emit()
|
||||
|
||||
def _fill_items_impl(self):
|
||||
folder_id = self._selected_folder_id
|
||||
task_id = self._selected_task_id
|
||||
if not folder_id or not task_id:
|
||||
|
|
@ -285,6 +293,7 @@ class WorkAreaFilesWidget(QtWidgets.QWidget):
|
|||
selection_model.selectionChanged.connect(self._on_selection_change)
|
||||
view.double_clicked.connect(self._on_mouse_double_click)
|
||||
view.customContextMenuRequested.connect(self._on_context_menu)
|
||||
model.refreshed.connect(self._on_model_refresh)
|
||||
|
||||
controller.register_event_callback(
|
||||
"expected_selection_changed",
|
||||
|
|
@ -298,6 +307,7 @@ class WorkAreaFilesWidget(QtWidgets.QWidget):
|
|||
self._controller = controller
|
||||
|
||||
self._published_mode = False
|
||||
self._change_selection_on_refresh = True
|
||||
|
||||
def set_published_mode(self, published_mode):
|
||||
"""Set the published mode.
|
||||
|
|
@ -379,7 +389,9 @@ class WorkAreaFilesWidget(QtWidgets.QWidget):
|
|||
if not workfile_info["current"]:
|
||||
return
|
||||
|
||||
self._change_selection_on_refresh = False
|
||||
self._model.refresh()
|
||||
self._change_selection_on_refresh = True
|
||||
|
||||
workfile_name = workfile_info["name"]
|
||||
if (
|
||||
|
|
@ -394,3 +406,30 @@ class WorkAreaFilesWidget(QtWidgets.QWidget):
|
|||
self._controller.expected_workfile_selected(
|
||||
event["folder"]["id"], event["task"]["name"], workfile_name
|
||||
)
|
||||
|
||||
def _on_model_refresh(self):
|
||||
if (
|
||||
not self._change_selection_on_refresh
|
||||
or self._proxy_model.rowCount() < 1
|
||||
):
|
||||
return
|
||||
|
||||
# Find the row with latest date modified
|
||||
latest_index = max(
|
||||
(
|
||||
self._proxy_model.index(idx, 0)
|
||||
for idx in range(self._proxy_model.rowCount())
|
||||
),
|
||||
key=lambda model_index: model_index.data(DATE_MODIFIED_ROLE)
|
||||
)
|
||||
|
||||
# Select row of latest modified
|
||||
selection_model = self._view.selectionModel()
|
||||
selection_model.select(
|
||||
latest_index,
|
||||
(
|
||||
QtCore.QItemSelectionModel.ClearAndSelect
|
||||
| QtCore.QItemSelectionModel.Current
|
||||
| QtCore.QItemSelectionModel.Rows
|
||||
)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -118,11 +118,11 @@ class WorkfilesToolWindow(QtWidgets.QWidget):
|
|||
overlay_invalid_host = InvalidHostOverlay(self)
|
||||
overlay_invalid_host.setVisible(False)
|
||||
|
||||
first_show_timer = QtCore.QTimer()
|
||||
first_show_timer.setSingleShot(True)
|
||||
first_show_timer.setInterval(50)
|
||||
show_timer = QtCore.QTimer()
|
||||
show_timer.setSingleShot(True)
|
||||
show_timer.setInterval(50)
|
||||
|
||||
first_show_timer.timeout.connect(self._on_first_show)
|
||||
show_timer.timeout.connect(self._on_show)
|
||||
|
||||
controller.register_event_callback(
|
||||
"save_as.finished",
|
||||
|
|
@ -159,7 +159,7 @@ class WorkfilesToolWindow(QtWidgets.QWidget):
|
|||
self._tasks_widget = tasks_widget
|
||||
self._side_panel = side_panel
|
||||
|
||||
self._first_show_timer = first_show_timer
|
||||
self._show_timer = show_timer
|
||||
|
||||
self._post_init()
|
||||
|
||||
|
|
@ -287,9 +287,9 @@ class WorkfilesToolWindow(QtWidgets.QWidget):
|
|||
|
||||
def showEvent(self, event):
|
||||
super(WorkfilesToolWindow, self).showEvent(event)
|
||||
self._show_timer.start()
|
||||
if self._first_show:
|
||||
self._first_show = False
|
||||
self._first_show_timer.start()
|
||||
self.setStyleSheet(style.load_stylesheet())
|
||||
|
||||
def keyPressEvent(self, event):
|
||||
|
|
@ -303,9 +303,8 @@ class WorkfilesToolWindow(QtWidgets.QWidget):
|
|||
|
||||
pass
|
||||
|
||||
def _on_first_show(self):
|
||||
if not self._controller_refreshed:
|
||||
self.refresh()
|
||||
def _on_show(self):
|
||||
self.refresh()
|
||||
|
||||
def _on_file_text_filter_change(self, text):
|
||||
self._files_widget.set_text_filter(text)
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue