Merge branch 'develop' into feature/AY-1261_Running-DCC-from-CLI-in-Ayon-context

# Conflicts:
#	server_addon/applications/package.py
This commit is contained in:
Jakub Trllo 2024-04-30 10:17:25 +02:00
commit f6b8b7c3da
126 changed files with 4214 additions and 2188 deletions

View file

@ -8,14 +8,11 @@ from ayon_core.lib import Logger, register_event_callback
from ayon_core.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
register_workfile_build_plugin_path,
AVALON_CONTAINER_ID,
AVALON_INSTANCE_ID,
AYON_INSTANCE_ID,
)
from ayon_core.hosts.aftereffects.api.workfile_template_builder import (
AEPlaceholderLoadPlugin,
AEPlaceholderCreatePlugin
)
from ayon_core.pipeline.load import any_outdated_containers
import ayon_core.hosts.aftereffects
@ -40,6 +37,7 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build")
class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
@ -76,6 +74,7 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
register_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
register_event_callback("application.launched", application_launch)
@ -118,12 +117,6 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
item["id"] = "publish_context"
self.stub.imprint(item["id"], item)
def get_workfile_build_placeholder_plugins(self):
return [
AEPlaceholderLoadPlugin,
AEPlaceholderCreatePlugin
]
# created instances section
def list_instances(self):
"""List all created instances from current workfile which

View file

@ -1,6 +1,7 @@
import os.path
import uuid
import shutil
from abc import abstractmethod
from ayon_core.pipeline import registered_host
from ayon_core.tools.workfile_template_build import (
@ -9,13 +10,9 @@ from ayon_core.tools.workfile_template_build import (
from ayon_core.pipeline.workfile.workfile_template_builder import (
AbstractTemplateBuilder,
PlaceholderPlugin,
LoadPlaceholderItem,
CreatePlaceholderItem,
PlaceholderLoadMixin,
PlaceholderCreateMixin
PlaceholderItem
)
from ayon_core.hosts.aftereffects.api import get_stub
from ayon_core.hosts.aftereffects.api.lib import set_settings
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
PLACEHOLDER_ID = "openpype.placeholder"
@ -51,6 +48,10 @@ class AETemplateBuilder(AbstractTemplateBuilder):
class AEPlaceholderPlugin(PlaceholderPlugin):
"""Contains generic methods for all PlaceholderPlugins."""
@abstractmethod
def _create_placeholder_item(self, item_data: dict) -> PlaceholderItem:
pass
def collect_placeholders(self):
"""Collect info from file metadata about created placeholders.
@ -63,17 +64,7 @@ class AEPlaceholderPlugin(PlaceholderPlugin):
if item.get("plugin_identifier") != self.identifier:
continue
if isinstance(self, AEPlaceholderLoadPlugin):
item = LoadPlaceholderItem(item["uuid"],
item["data"],
self)
elif isinstance(self, AEPlaceholderCreatePlugin):
item = CreatePlaceholderItem(item["uuid"],
item["data"],
self)
else:
raise NotImplementedError(f"Not implemented for {type(self)}")
item = self._create_placeholder_item(item)
output.append(item)
return output
@ -135,87 +126,6 @@ class AEPlaceholderPlugin(PlaceholderPlugin):
stub.imprint(item_id, container_data)
class AEPlaceholderCreatePlugin(AEPlaceholderPlugin, PlaceholderCreateMixin):
"""Adds Create placeholder.
This adds composition and runs Create
"""
identifier = "aftereffects.create"
label = "AfterEffects create"
def create_placeholder(self, placeholder_data):
stub = get_stub()
name = "CREATEPLACEHOLDER"
item_id = stub.add_item(name, "COMP")
self._imprint_item(item_id, name, placeholder_data, stub)
def populate_placeholder(self, placeholder):
"""Replace 'placeholder' with publishable instance.
Renames prepared composition name, creates publishable instance, sets
frame/duration settings according to DB.
"""
pre_create_data = {"use_selection": True}
item_id, item = self._get_item(placeholder)
get_stub().select_items([item_id])
self.populate_create_placeholder(placeholder, pre_create_data)
# apply settings for populated composition
item_id, metadata_item = self._get_item(placeholder)
set_settings(True, True, [item_id])
def get_placeholder_options(self, options=None):
return self.get_create_plugin_options(options)
class AEPlaceholderLoadPlugin(AEPlaceholderPlugin, PlaceholderLoadMixin):
identifier = "aftereffects.load"
label = "AfterEffects load"
def create_placeholder(self, placeholder_data):
"""Creates AE's Placeholder item in Project items list.
Sets dummy resolution/duration/fps settings, will be replaced when
populated.
"""
stub = get_stub()
name = "LOADERPLACEHOLDER"
item_id = stub.add_placeholder(name, 1920, 1060, 25, 10)
self._imprint_item(item_id, name, placeholder_data, stub)
def populate_placeholder(self, placeholder):
"""Use Openpype Loader from `placeholder` to create new FootageItems
New FootageItems are created, files are imported.
"""
self.populate_load_placeholder(placeholder)
errors = placeholder.get_errors()
stub = get_stub()
if errors:
stub.print_msg("\n".join(errors))
else:
if not placeholder.data["keep_placeholder"]:
metadata = stub.get_metadata()
for item in metadata:
if not item.get("is_placeholder"):
continue
scene_identifier = item.get("uuid")
if (scene_identifier and
scene_identifier == placeholder.scene_identifier):
stub.delete_item(item["members"][0])
stub.remove_instance(placeholder.scene_identifier, metadata)
def get_placeholder_options(self, options=None):
return self.get_load_plugin_options(options)
def load_succeed(self, placeholder, container):
placeholder_item_id, _ = self._get_item(placeholder)
item_id = container.id
get_stub().add_item_instead_placeholder(placeholder_item_id, item_id)
def build_workfile_template(*args, **kwargs):
builder = AETemplateBuilder(registered_host())
builder.build_template(*args, **kwargs)

View file

@ -0,0 +1,49 @@
from ayon_core.pipeline.workfile.workfile_template_builder import (
CreatePlaceholderItem,
PlaceholderCreateMixin
)
from ayon_core.hosts.aftereffects.api import get_stub
from ayon_core.hosts.aftereffects.api.lib import set_settings
import ayon_core.hosts.aftereffects.api.workfile_template_builder as wtb
class AEPlaceholderCreatePlugin(wtb.AEPlaceholderPlugin,
PlaceholderCreateMixin):
"""Adds Create placeholder.
This adds composition and runs Create
"""
identifier = "aftereffects.create"
label = "AfterEffects create"
def _create_placeholder_item(self, item_data) -> CreatePlaceholderItem:
return CreatePlaceholderItem(
scene_identifier=item_data["uuid"],
data=item_data["data"],
plugin=self
)
def create_placeholder(self, placeholder_data):
stub = get_stub()
name = "CREATEPLACEHOLDER"
item_id = stub.add_item(name, "COMP")
self._imprint_item(item_id, name, placeholder_data, stub)
def populate_placeholder(self, placeholder):
"""Replace 'placeholder' with publishable instance.
Renames prepared composition name, creates publishable instance, sets
frame/duration settings according to DB.
"""
pre_create_data = {"use_selection": True}
item_id, item = self._get_item(placeholder)
get_stub().select_items([item_id])
self.populate_create_placeholder(placeholder, pre_create_data)
# apply settings for populated composition
item_id, metadata_item = self._get_item(placeholder)
set_settings(True, True, [item_id])
def get_placeholder_options(self, options=None):
return self.get_create_plugin_options(options)

View file

@ -0,0 +1,60 @@
from ayon_core.pipeline.workfile.workfile_template_builder import (
LoadPlaceholderItem,
PlaceholderLoadMixin
)
from ayon_core.hosts.aftereffects.api import get_stub
import ayon_core.hosts.aftereffects.api.workfile_template_builder as wtb
class AEPlaceholderLoadPlugin(wtb.AEPlaceholderPlugin, PlaceholderLoadMixin):
identifier = "aftereffects.load"
label = "AfterEffects load"
def _create_placeholder_item(self, item_data) -> LoadPlaceholderItem:
return LoadPlaceholderItem(
scene_identifier=item_data["uuid"],
data=item_data["data"],
plugin=self
)
def create_placeholder(self, placeholder_data):
"""Creates AE's Placeholder item in Project items list.
Sets dummy resolution/duration/fps settings, will be replaced when
populated.
"""
stub = get_stub()
name = "LOADERPLACEHOLDER"
item_id = stub.add_placeholder(name, 1920, 1060, 25, 10)
self._imprint_item(item_id, name, placeholder_data, stub)
def populate_placeholder(self, placeholder):
"""Use Openpype Loader from `placeholder` to create new FootageItems
New FootageItems are created, files are imported.
"""
self.populate_load_placeholder(placeholder)
errors = placeholder.get_errors()
stub = get_stub()
if errors:
stub.print_msg("\n".join(errors))
else:
if not placeholder.data["keep_placeholder"]:
metadata = stub.get_metadata()
for item in metadata:
if not item.get("is_placeholder"):
continue
scene_identifier = item.get("uuid")
if (scene_identifier and
scene_identifier == placeholder.scene_identifier):
stub.delete_item(item["members"][0])
stub.remove_instance(placeholder.scene_identifier, metadata)
def get_placeholder_options(self, options=None):
return self.get_load_plugin_options(options)
def load_succeed(self, placeholder, container):
placeholder_item_id, _ = self._get_item(placeholder)
item_id = container.id
get_stub().add_item_instead_placeholder(placeholder_item_id, item_id)

View file

@ -2,6 +2,7 @@ import os
import bpy
from ayon_core.lib import BoolDef
from ayon_core.pipeline import publish
from ayon_core.hosts.blender.api import plugin
@ -17,6 +18,8 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
if not self.is_active(instance.data):
return
attr_values = self.get_attr_values_from_data(instance.data)
# Define extract output file path
stagingdir = self.staging_dir(instance)
folder_name = instance.data["folderEntity"]["name"]
@ -46,7 +49,8 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
bpy.ops.wm.alembic_export(
filepath=filepath,
selected=True,
flatten=False
flatten=False,
subdiv_schema=attr_values.get("subdiv_schema", False)
)
plugin.deselect_all()
@ -65,6 +69,21 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
self.log.debug("Extracted instance '%s' to: %s",
instance.name, representation)
@classmethod
def get_attribute_defs(cls):
return [
BoolDef(
"subdiv_schema",
label="Alembic Mesh Subdiv Schema",
tooltip="Export Meshes using Alembic's subdivision schema.\n"
"Enabling this includes creases with the export but "
"excludes the mesh's normals.\n"
"Enabling this usually result in smaller file size "
"due to lack of normals.",
default=False
)
]
class ExtractModelABC(ExtractABC):
"""Extract model as ABC."""

View file

@ -811,6 +811,43 @@ def get_current_context_template_data_with_folder_attrs():
return template_data
def set_review_color_space(opengl_node, review_color_space="", log=None):
"""Set ociocolorspace parameter for the given OpenGL node.
Set `ociocolorspace` parameter of the given OpenGl node
to to the given review_color_space value.
If review_color_space is empty, a default colorspace corresponding to
the display & view of the current Houdini session will be used.
Args:
opengl_node (hou.Node): ROP node to set its ociocolorspace parm.
review_color_space (str): Colorspace value for ociocolorspace parm.
log (logging.Logger): Logger to log to.
"""
if log is None:
log = self.log
# Set Color Correction parameter to OpenColorIO
colorcorrect_parm = opengl_node.parm("colorcorrect")
if colorcorrect_parm.eval() != 2:
colorcorrect_parm.set(2)
log.debug(
"'Color Correction' parm on '{}' has been set to"
" 'OpenColorIO'".format(opengl_node.path())
)
opengl_node.setParms(
{"ociocolorspace": review_color_space}
)
log.debug(
"'OCIO Colorspace' parm on '{}' has been set to "
"the view color space '{}'"
.format(opengl_node, review_color_space)
)
def get_context_var_changes():
"""get context var changes."""

View file

@ -0,0 +1,64 @@
from ayon_applications import PreLaunchHook, LaunchTypes
class SetDefaultDisplayView(PreLaunchHook):
"""Set default view and default display for houdini via OpenColorIO.
Houdini's defaultDisplay and defaultView are set by
setting 'OCIO_ACTIVE_DISPLAYS' and 'OCIO_ACTIVE_VIEWS'
environment variables respectively.
More info: https://www.sidefx.com/docs/houdini/io/ocio.html#set-up
"""
app_groups = {"houdini"}
launch_types = {LaunchTypes.local}
def execute(self):
OCIO = self.launch_context.env.get("OCIO")
# This is a cheap way to skip this hook if either global color
# management or houdini color management was disabled because the
# OCIO var would be set by the global OCIOEnvHook
if not OCIO:
return
# workfile settings added in '0.2.13'
houdini_color_settings = \
self.data["project_settings"]["houdini"]["imageio"].get("workfile")
if not houdini_color_settings:
self.log.info("Hook 'SetDefaultDisplayView' requires Houdini "
"addon version >= '0.2.13'")
return
if not houdini_color_settings["enabled"]:
self.log.info(
"Houdini workfile color management is disabled."
)
return
# 'OCIO_ACTIVE_DISPLAYS', 'OCIO_ACTIVE_VIEWS' are checked
# as Admins can add them in Ayon env vars or Ayon tools.
default_display = houdini_color_settings["default_display"]
if default_display:
# get 'OCIO_ACTIVE_DISPLAYS' value if exists.
self._set_context_env("OCIO_ACTIVE_DISPLAYS", default_display)
default_view = houdini_color_settings["default_view"]
if default_view:
# get 'OCIO_ACTIVE_VIEWS' value if exists.
self._set_context_env("OCIO_ACTIVE_VIEWS", default_view)
def _set_context_env(self, env_var, default_value):
env_value = self.launch_context.env.get(env_var, "")
new_value = ":".join(
key for key in [default_value, env_value] if key
)
self.log.info(
"Setting {} environment to: {}"
.format(env_var, new_value)
)
self.launch_context.env[env_var] = new_value

View file

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating openGL reviews."""
from ayon_core.hosts.houdini.api import plugin
from ayon_core.hosts.houdini.api import lib, plugin
from ayon_core.lib import EnumDef, BoolDef, NumberDef
import os
@ -14,6 +14,16 @@ class CreateReview(plugin.HoudiniCreator):
label = "Review"
product_type = "review"
icon = "video-camera"
review_color_space = ""
def apply_settings(self, project_settings):
super(CreateReview, self).apply_settings(project_settings)
# workfile settings added in '0.2.13'
color_settings = project_settings["houdini"]["imageio"].get(
"workfile", {}
)
if color_settings.get("enabled"):
self.review_color_space = color_settings.get("review_color_space")
def create(self, product_name, instance_data, pre_create_data):
@ -85,10 +95,20 @@ class CreateReview(plugin.HoudiniCreator):
instance_node.setParms(parms)
# Set OCIO Colorspace to the default output colorspace
# Set OCIO Colorspace to the default colorspace
# if there's OCIO
if os.getenv("OCIO"):
self.set_colorcorrect_to_default_view_space(instance_node)
# Fall to the default value if cls.review_color_space is empty.
if not self.review_color_space:
# cls.review_color_space is an empty string
# when the imageio/workfile setting is disabled or
# when the Review colorspace setting is empty.
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
self.review_color_space = get_default_display_view_colorspace()
lib.set_review_color_space(instance_node,
self.review_color_space,
self.log)
to_lock = ["id", "productType"]
@ -131,23 +151,3 @@ class CreateReview(plugin.HoudiniCreator):
minimum=0.0001,
decimals=3)
]
def set_colorcorrect_to_default_view_space(self,
instance_node):
"""Set ociocolorspace to the default output space."""
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
# set Color Correction parameter to OpenColorIO
instance_node.setParms({"colorcorrect": 2})
# Get default view space for ociocolorspace parm.
default_view_space = get_default_display_view_colorspace()
instance_node.setParms(
{"ociocolorspace": default_view_space}
)
self.log.debug(
"'OCIO Colorspace' parm on '{}' has been set to "
"the default view color space '{}'"
.format(instance_node, default_view_space)
)

View file

@ -45,33 +45,11 @@ class AbcLoader(load.LoaderPlugin):
alembic = container.createNode("alembic", node_name=node_name)
alembic.setParms({"fileName": file_path})
# Add unpack node
unpack_name = "unpack_{}".format(name)
unpack = container.createNode("unpack", node_name=unpack_name)
unpack.setInput(0, alembic)
unpack.setParms({"transfer_attributes": "path"})
# Position nodes nicely
container.moveToGoodPosition()
container.layoutChildren()
# Add normal to points
# Order of menu ['point', 'vertex', 'prim', 'detail']
normal_name = "normal_{}".format(name)
normal_node = container.createNode("normal", node_name=normal_name)
normal_node.setParms({"type": 0})
normal_node.setInput(0, unpack)
null = container.createNode("null", node_name="OUT")
null.setInput(0, normal_node)
# Ensure display flag is on the Alembic input node and not on the OUT
# node to optimize "debug" displaying in the viewport.
alembic.setDisplayFlag(True)
# Set new position for unpack node else it gets cluttered
nodes = [container, alembic, unpack, normal_node, null]
for nr, node in enumerate(nodes):
node.setPosition([0, (0 - nr)])
self[:] = nodes
nodes = [container, alembic]
return pipeline.containerise(
node_name,

View file

@ -7,7 +7,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop, splitext
import hou
class ExtractComposite(publish.Extractor):
class ExtractComposite(publish.Extractor,
publish.ColormanagedPyblishPluginMixin):
order = pyblish.api.ExtractorOrder
label = "Extract Composite (Image Sequence)"
@ -45,8 +46,14 @@ class ExtractComposite(publish.Extractor):
"frameEnd": instance.data["frameEndHandle"],
}
from pprint import pformat
self.log.info(pformat(representation))
if ext.lower() == "exr":
# Inject colorspace with 'scene_linear' as that's the
# default Houdini working colorspace and all extracted
# OpenEXR images should be in that colorspace.
# https://www.sidefx.com/docs/houdini/render/linear.html#image-formats
self.set_representation_colorspace(
representation, instance.context,
colorspace="scene_linear"
)
instance.data["representations"].append(representation)

View file

@ -8,7 +8,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop
import hou
class ExtractOpenGL(publish.Extractor):
class ExtractOpenGL(publish.Extractor,
publish.ColormanagedPyblishPluginMixin):
order = pyblish.api.ExtractorOrder - 0.01
label = "Extract OpenGL"
@ -46,6 +47,14 @@ class ExtractOpenGL(publish.Extractor):
"camera_name": instance.data.get("review_camera")
}
if ropnode.evalParm("colorcorrect") == 2: # OpenColorIO enabled
colorspace = ropnode.evalParm("ociocolorspace")
# inject colorspace data
self.set_representation_colorspace(
representation, instance.context,
colorspace=colorspace
)
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["representations"].append(representation)

View file

@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
import sys
import hou
import pyblish.api
import six
from ayon_core.pipeline import PublishValidationError
@ -26,28 +25,21 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin):
invalid = self.get_invalid(instance)
if invalid:
raise PublishValidationError(
("Output node(s) `{}` are incorrect. "
"See plug-in log for details.").format(invalid),
title=self.label
"Output node '{}' is incorrect. "
"See plug-in log for details.".format(invalid),
title=self.label,
description=(
"### Invalid COP output node\n\n"
"The output node path for the instance must be set to a "
"valid COP node path.\n\nSee the log for more details."
)
)
@classmethod
def get_invalid(cls, instance):
output_node = instance.data.get("output_node")
import hou
try:
output_node = instance.data["output_node"]
except KeyError:
six.reraise(
PublishValidationError,
PublishValidationError(
"Can't determine COP output node.",
title=cls.__name__),
sys.exc_info()[2]
)
if output_node is None:
if not output_node:
node = hou.node(instance.data.get("instance_node"))
cls.log.error(
"COP Output node in '%s' does not exist. "
@ -61,8 +53,8 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin):
cls.log.error(
"Output node %s is not a COP node. "
"COP Path must point to a COP node, "
"instead found category type: %s"
% (output_node.path(), output_node.type().category().name())
"instead found category type: %s",
output_node.path(), output_node.type().category().name()
)
return [output_node.path()]
@ -70,9 +62,7 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin):
# is Cop2 to avoid potential edge case scenarios even though
# the isinstance check above should be stricter than this category
if output_node.type().category().name() != "Cop2":
raise PublishValidationError(
(
"Output node {} is not of category Cop2."
" This is a bug..."
).format(output_node.path()),
title=cls.label)
cls.log.error(
"Output node %s is not of category Cop2.", output_node.path()
)
return [output_node.path()]

View file

@ -4,15 +4,19 @@ from ayon_core.pipeline import (
PublishValidationError,
OptionalPyblishPluginMixin
)
from ayon_core.pipeline.publish import RepairAction
from ayon_core.pipeline.publish import (
RepairAction,
get_plugin_settings,
apply_plugin_settings_automatically
)
from ayon_core.hosts.houdini.api.action import SelectROPAction
import os
import hou
class SetDefaultViewSpaceAction(RepairAction):
label = "Set default view colorspace"
class ResetViewSpaceAction(RepairAction):
label = "Reset OCIO colorspace parm"
icon = "mdi.monitor"
@ -27,9 +31,28 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
families = ["review"]
hosts = ["houdini"]
label = "Validate Review Colorspace"
actions = [SetDefaultViewSpaceAction, SelectROPAction]
actions = [ResetViewSpaceAction, SelectROPAction]
optional = True
review_color_space = ""
@classmethod
def apply_settings(cls, project_settings):
# Preserve automatic settings applying logic
settings = get_plugin_settings(plugin=cls,
project_settings=project_settings,
log=cls.log,
category="houdini")
apply_plugin_settings_automatically(cls, settings, logger=cls.log)
# workfile settings added in '0.2.13'
color_settings = project_settings["houdini"]["imageio"].get(
"workfile", {}
)
# Add review color settings
if color_settings.get("enabled"):
cls.review_color_space = color_settings.get("review_color_space")
def process(self, instance):
@ -52,39 +75,54 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
" 'OpenColorIO'".format(rop_node.path())
)
if rop_node.evalParm("ociocolorspace") not in \
hou.Color.ocio_spaces():
current_color_space = rop_node.evalParm("ociocolorspace")
if current_color_space not in hou.Color.ocio_spaces():
raise PublishValidationError(
"Invalid value: Colorspace name doesn't exist.\n"
"Check 'OCIO Colorspace' parameter on '{}' ROP"
.format(rop_node.path())
)
@classmethod
def repair(cls, instance):
"""Set Default View Space Action.
# if houdini/imageio/workfile is enabled and
# Review colorspace setting is empty then this check should
# actually check if the current_color_space setting equals
# the default colorspace value.
# However, it will make the black cmd screen show up more often
# which is very annoying.
if self.review_color_space and \
self.review_color_space != current_color_space:
It is a helper action more than a repair action,
used to set colorspace on opengl node to the default view.
"""
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
rop_node = hou.node(instance.data["instance_node"])
if rop_node.evalParm("colorcorrect") != 2:
rop_node.setParms({"colorcorrect": 2})
cls.log.debug(
"'Color Correction' parm on '{}' has been set to"
" 'OpenColorIO'".format(rop_node.path())
raise PublishValidationError(
"Invalid value: Colorspace name doesn't match"
"the Colorspace specified in settings."
)
# Get default view colorspace name
default_view_space = get_default_display_view_colorspace()
@classmethod
def repair(cls, instance):
"""Reset view colorspace.
rop_node.setParms({"ociocolorspace": default_view_space})
cls.log.info(
"'OCIO Colorspace' parm on '{}' has been set to "
"the default view color space '{}'"
.format(rop_node, default_view_space)
)
It is used to set colorspace on opengl node.
It uses the colorspace value specified in the Houdini addon settings.
If the value in the Houdini addon settings is empty,
it will fall to the default colorspace.
Note:
This repair action assumes that OCIO is enabled.
As if OCIO is disabled the whole validation is skipped
and this repair action won't show up.
"""
from ayon_core.hosts.houdini.api.lib import set_review_color_space
# Fall to the default value if cls.review_color_space is empty.
if not cls.review_color_space:
# cls.review_color_space is an empty string
# when the imageio/workfile setting is disabled or
# when the Review colorspace setting is empty.
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
cls.review_color_space = get_default_display_view_colorspace()
rop_node = hou.node(instance.data["instance_node"])
set_review_color_space(rop_node,
cls.review_color_space,
cls.log)

View file

@ -8,10 +8,15 @@ from typing import Any, Dict, Union
import six
import ayon_api
from ayon_core.pipeline import get_current_project_name, colorspace
from ayon_core.pipeline import (
get_current_project_name,
get_current_folder_path,
get_current_task_name,
colorspace
)
from ayon_core.settings import get_project_settings
from ayon_core.pipeline.context_tools import (
get_current_folder_entity,
get_current_task_entity
)
from ayon_core.style import load_stylesheet
from pymxs import runtime as rt
@ -221,41 +226,30 @@ def reset_scene_resolution():
scene resolution can be overwritten by a folder if the folder.attrib
contains any information regarding scene resolution.
"""
folder_entity = get_current_folder_entity(
fields={"attrib.resolutionWidth", "attrib.resolutionHeight"}
)
folder_attributes = folder_entity["attrib"]
width = int(folder_attributes["resolutionWidth"])
height = int(folder_attributes["resolutionHeight"])
task_attributes = get_current_task_entity(fields={"attrib"})["attrib"]
width = int(task_attributes["resolutionWidth"])
height = int(task_attributes["resolutionHeight"])
set_scene_resolution(width, height)
def get_frame_range(folder_entiy=None) -> Union[Dict[str, Any], None]:
"""Get the current folder frame range and handles.
def get_frame_range(task_entity=None) -> Union[Dict[str, Any], None]:
"""Get the current task frame range and handles
Args:
folder_entiy (dict): Folder eneity.
task_entity (dict): Task Entity.
Returns:
dict: with frame start, frame end, handle start, handle end.
"""
# Set frame start/end
if folder_entiy is None:
folder_entiy = get_current_folder_entity()
folder_attributes = folder_entiy["attrib"]
frame_start = folder_attributes.get("frameStart")
frame_end = folder_attributes.get("frameEnd")
if frame_start is None or frame_end is None:
return {}
frame_start = int(frame_start)
frame_end = int(frame_end)
handle_start = int(folder_attributes.get("handleStart", 0))
handle_end = int(folder_attributes.get("handleEnd", 0))
if task_entity is None:
task_entity = get_current_task_entity(fields={"attrib"})
task_attributes = task_entity["attrib"]
frame_start = int(task_attributes["frameStart"])
frame_end = int(task_attributes["frameEnd"])
handle_start = int(task_attributes["handleStart"])
handle_end = int(task_attributes["handleEnd"])
frame_start_handle = frame_start - handle_start
frame_end_handle = frame_end + handle_end
@ -281,9 +275,9 @@ def reset_frame_range(fps: bool = True):
scene frame rate in frames-per-second.
"""
if fps:
project_name = get_current_project_name()
project_entity = ayon_api.get_project(project_name)
fps_number = float(project_entity["attrib"].get("fps"))
task_entity = get_current_task_entity()
task_attributes = task_entity["attrib"]
fps_number = float(task_attributes["fps"])
rt.frameRate = fps_number
frame_range = get_frame_range()
@ -502,9 +496,9 @@ def object_transform_set(container_children):
"""
transform_set = {}
for node in container_children:
name = f"{node.name}.transform"
name = f"{node}.transform"
transform_set[name] = node.pos
name = f"{node.name}.scale"
name = f"{node}.scale"
transform_set[name] = node.scale
return transform_set
@ -525,6 +519,36 @@ def get_plugins() -> list:
return plugin_info_list
def update_modifier_node_names(event, node):
"""Update the name of the nodes after renaming
Args:
event (pymxs.MXSWrapperBase): Event Name (
Mandatory argument for rt.NodeEventCallback)
node (list): Event Number (
Mandatory argument for rt.NodeEventCallback)
"""
containers = [
obj
for obj in rt.Objects
if (
rt.ClassOf(obj) == rt.Container
and rt.getUserProp(obj, "id") == "pyblish.avalon.instance"
and rt.getUserProp(obj, "productType") not in {
"workfile", "tyflow"
}
)
]
if not containers:
return
for container in containers:
ayon_data = container.modifiers[0].openPypeData
updated_node_names = [str(node.node) for node
in ayon_data.all_handles]
rt.setProperty(ayon_data, "sel_list", updated_node_names)
@contextlib.contextmanager
def render_resolution(width, height):
"""Set render resolution option during context

View file

@ -63,6 +63,8 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
rt.callbacks.addScript(rt.Name('postWorkspaceChange'),
self._deferred_menu_creation)
rt.NodeEventCallback(
nameChanged=lib.update_modifier_node_names)
def workfile_has_unsaved_changes(self):
return rt.getSaveRequired()

View file

@ -117,7 +117,7 @@ class MaxSceneLoader(load.LoaderPlugin):
)
for max_obj, obj_name in zip(max_objects, max_object_names):
max_obj.name = f"{namespace}:{obj_name}"
max_container.append(rt.getNodeByName(max_obj.name))
max_container.append(max_obj)
return containerise(
name, max_container, context,
namespace, loader=self.__class__.__name__)
@ -158,11 +158,11 @@ class MaxSceneLoader(load.LoaderPlugin):
current_max_object_names):
max_obj.name = f"{namespace}:{obj_name}"
max_objects.append(max_obj)
max_transform = f"{max_obj.name}.transform"
max_transform = f"{max_obj}.transform"
if max_transform in transform_data.keys():
max_obj.pos = transform_data[max_transform] or 0
max_obj.scale = transform_data[
f"{max_obj.name}.scale"] or 0
f"{max_obj}.scale"] or 0
update_custom_attribute_data(node, max_objects)
lib.imprint(container["instance_node"], {

View file

@ -53,6 +53,7 @@ class ExtractAlembic(publish.Extractor,
hosts = ["max"]
families = ["pointcache"]
optional = True
active = True
def process(self, instance):
if not self.is_active(instance.data):
@ -102,24 +103,27 @@ class ExtractAlembic(publish.Extractor,
@classmethod
def get_attribute_defs(cls):
return [
defs = super(ExtractAlembic, cls).get_attribute_defs()
defs.extend([
BoolDef("custom_attrs",
label="Custom Attributes",
default=False),
]
])
return defs
class ExtractCameraAlembic(ExtractAlembic):
"""Extract Camera with AlembicExport."""
label = "Extract Alembic Camera"
families = ["camera"]
optional = True
class ExtractModel(ExtractAlembic):
class ExtractModelAlembic(ExtractAlembic):
"""Extract Geometry in Alembic Format"""
label = "Extract Geometry (Alembic)"
families = ["model"]
optional = True
def _set_abc_attributes(self, instance):
attr_values = self.get_attr_values_from_data(instance.data)

View file

@ -42,7 +42,7 @@ class ValidateFrameRange(pyblish.api.InstancePlugin,
return
frame_range = get_frame_range(
instance.data["folderEntity"])
instance.data["taskEntity"])
inst_frame_start = instance.data.get("frameStartHandle")
inst_frame_end = instance.data.get("frameEndHandle")

View file

@ -38,7 +38,7 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin,
context_label = "{} > {}".format(*context)
instance_label = "{} > {}".format(folderPath, task)
message = (
"Instance '{}' publishes to different folder or task "
"Instance '{}' publishes to different context(folder or task) "
"than current context: {}. Current context: {}".format(
instance.name, instance_label, context_label
)
@ -46,7 +46,7 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin,
raise PublishValidationError(
message=message,
description=(
"## Publishing to a different context folder or task\n"
"## Publishing to a different context data(folder or task)\n"
"There are publish instances present which are publishing "
"into a different folder path or task than your current context.\n\n"
"Usually this is not what you want but there can be cases "

View file

@ -7,7 +7,10 @@ from ayon_core.pipeline.publish import (
RepairAction,
PublishValidationError
)
from ayon_core.hosts.max.api.lib import reset_scene_resolution
from ayon_core.hosts.max.api.lib import (
reset_scene_resolution,
imprint
)
class ValidateResolutionSetting(pyblish.api.InstancePlugin,
@ -25,8 +28,10 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
if not self.is_active(instance.data):
return
width, height = self.get_folder_resolution(instance)
current_width = rt.renderWidth
current_height = rt.renderHeight
current_width, current_height = (
self.get_current_resolution(instance)
)
if current_width != width and current_height != height:
raise PublishValidationError("Resolution Setting "
"not matching resolution "
@ -41,12 +46,16 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
"not matching resolution set "
"on asset or shot.")
def get_folder_resolution(self, instance):
folder_entity = instance.data["folderEntity"]
if folder_entity:
folder_attributes = folder_entity["attrib"]
width = folder_attributes["resolutionWidth"]
height = folder_attributes["resolutionHeight"]
def get_current_resolution(self, instance):
return rt.renderWidth, rt.renderHeight
@classmethod
def get_folder_resolution(cls, instance):
task_entity = instance.data.get("taskEntity")
if task_entity:
task_attributes = task_entity["attrib"]
width = task_attributes["resolutionWidth"]
height = task_attributes["resolutionHeight"]
return int(width), int(height)
# Defaults if not found in folder entity
@ -55,3 +64,29 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
@classmethod
def repair(cls, instance):
reset_scene_resolution()
class ValidateReviewResolutionSetting(ValidateResolutionSetting):
families = ["review"]
optional = True
actions = [RepairAction]
def get_current_resolution(self, instance):
current_width = instance.data["review_width"]
current_height = instance.data["review_height"]
return current_width, current_height
@classmethod
def repair(cls, instance):
context_width, context_height = (
cls.get_folder_resolution(instance)
)
creator_attrs = instance.data["creator_attributes"]
creator_attrs["review_width"] = context_width
creator_attrs["review_height"] = context_height
creator_attrs_data = {
"creator_attributes": creator_attrs
}
# update the width and height of review
# data in creator_attributes
imprint(instance.data["instance_node"], creator_attrs_data)

View file

@ -720,7 +720,8 @@ class RenderProductsArnold(ARenderProducts):
# AOVs > Legacy > Maya Render View > Mode
aovs_enabled = bool(
self._get_attr("defaultArnoldRenderOptions.aovMode")
self._get_attr(
"defaultArnoldRenderOptions.aovMode", as_string=False)
)
if not aovs_enabled:
return beauty_products

View file

@ -30,9 +30,11 @@ from ayon_core.pipeline import (
register_loader_plugin_path,
register_inventory_action_path,
register_creator_plugin_path,
register_workfile_build_plugin_path,
deregister_loader_plugin_path,
deregister_inventory_action_path,
deregister_creator_plugin_path,
deregister_workfile_build_plugin_path,
AYON_CONTAINER_ID,
AVALON_CONTAINER_ID,
)
@ -47,7 +49,6 @@ from ayon_core.hosts.maya import MAYA_ROOT_DIR
from ayon_core.hosts.maya.lib import create_workspace_mel
from . import menu, lib
from .workfile_template_builder import MayaPlaceholderLoadPlugin
from .workio import (
open_file,
save_file,
@ -64,6 +65,7 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build")
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
@ -93,7 +95,7 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
register_inventory_action_path(INVENTORY_PATH)
self.log.info(PUBLISH_PATH)
register_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
self.log.info("Installing callbacks ... ")
register_event_callback("init", on_init)
@ -148,11 +150,6 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
def get_containers(self):
return ls()
def get_workfile_build_placeholder_plugins(self):
return [
MayaPlaceholderLoadPlugin
]
@contextlib.contextmanager
def maintained_selection(self):
with lib.maintained_selection():
@ -338,6 +335,7 @@ def uninstall():
deregister_loader_plugin_path(LOAD_PATH)
deregister_creator_plugin_path(CREATE_PATH)
deregister_inventory_action_path(INVENTORY_PATH)
deregister_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
menu.uninstall()

View file

@ -19,7 +19,7 @@ from .lib import pairwise
@contextlib.contextmanager
def _allow_export_from_render_setup_layer():
def allow_export_from_render_setup_layer():
"""Context manager to override Maya settings to allow RS layer export"""
try:
@ -102,7 +102,7 @@ def export_in_rs_layer(path, nodes, export=None):
cmds.disconnectAttr(src, dest)
# Export Selected
with _allow_export_from_render_setup_layer():
with allow_export_from_render_setup_layer():
cmds.select(nodes, noExpand=True)
if export:
export()

View file

@ -1,5 +1,3 @@
import json
from maya import cmds
from ayon_core.pipeline import (
@ -10,16 +8,13 @@ from ayon_core.pipeline import (
)
from ayon_core.pipeline.workfile.workfile_template_builder import (
TemplateAlreadyImported,
AbstractTemplateBuilder,
PlaceholderPlugin,
LoadPlaceholderItem,
PlaceholderLoadMixin,
AbstractTemplateBuilder
)
from ayon_core.tools.workfile_template_build import (
WorkfileBuildPlaceholderDialog,
)
from .lib import read, imprint, get_reference_node, get_main_window
from .lib import get_main_window
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
@ -91,255 +86,6 @@ class MayaTemplateBuilder(AbstractTemplateBuilder):
return True
class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
identifier = "maya.load"
label = "Maya load"
def _collect_scene_placeholders(self):
# Cache placeholder data to shared data
placeholder_nodes = self.builder.get_shared_populate_data(
"placeholder_nodes"
)
if placeholder_nodes is None:
attributes = cmds.ls("*.plugin_identifier", long=True)
placeholder_nodes = {}
for attribute in attributes:
node_name = attribute.rpartition(".")[0]
placeholder_nodes[node_name] = (
self._parse_placeholder_node_data(node_name)
)
self.builder.set_shared_populate_data(
"placeholder_nodes", placeholder_nodes
)
return placeholder_nodes
def _parse_placeholder_node_data(self, node_name):
placeholder_data = read(node_name)
parent_name = (
cmds.getAttr(node_name + ".parent", asString=True)
or node_name.rpartition("|")[0]
or ""
)
if parent_name:
siblings = cmds.listRelatives(parent_name, children=True)
else:
siblings = cmds.ls(assemblies=True)
node_shortname = node_name.rpartition("|")[2]
current_index = cmds.getAttr(node_name + ".index", asString=True)
if current_index < 0:
current_index = siblings.index(node_shortname)
placeholder_data.update({
"parent": parent_name,
"index": current_index
})
return placeholder_data
def _create_placeholder_name(self, placeholder_data):
placeholder_name_parts = placeholder_data["builder_type"].split("_")
pos = 1
placeholder_product_type = placeholder_data.get("product_type")
if placeholder_product_type is None:
placeholder_product_type = placeholder_data.get("family")
if placeholder_product_type:
placeholder_name_parts.insert(pos, placeholder_product_type)
pos += 1
# add loader arguments if any
loader_args = placeholder_data["loader_args"]
if loader_args:
loader_args = json.loads(loader_args.replace('\'', '\"'))
values = [v for v in loader_args.values()]
for value in values:
placeholder_name_parts.insert(pos, value)
pos += 1
placeholder_name = "_".join(placeholder_name_parts)
return placeholder_name.capitalize()
def _get_loaded_repre_ids(self):
loaded_representation_ids = self.builder.get_shared_populate_data(
"loaded_representation_ids"
)
if loaded_representation_ids is None:
try:
containers = cmds.sets("AVALON_CONTAINERS", q=True)
except ValueError:
containers = []
loaded_representation_ids = {
cmds.getAttr(container + ".representation")
for container in containers
}
self.builder.set_shared_populate_data(
"loaded_representation_ids", loaded_representation_ids
)
return loaded_representation_ids
def create_placeholder(self, placeholder_data):
selection = cmds.ls(selection=True)
if len(selection) > 1:
raise ValueError("More then one item are selected")
parent = selection[0] if selection else None
placeholder_data["plugin_identifier"] = self.identifier
placeholder_name = self._create_placeholder_name(placeholder_data)
placeholder = cmds.spaceLocator(name=placeholder_name)[0]
if parent:
placeholder = cmds.parent(placeholder, selection[0])[0]
imprint(placeholder, placeholder_data)
# Add helper attributes to keep placeholder info
cmds.addAttr(
placeholder,
longName="parent",
hidden=True,
dataType="string"
)
cmds.addAttr(
placeholder,
longName="index",
hidden=True,
attributeType="short",
defaultValue=-1
)
cmds.setAttr(placeholder + ".parent", "", type="string")
def update_placeholder(self, placeholder_item, placeholder_data):
node_name = placeholder_item.scene_identifier
new_values = {}
for key, value in placeholder_data.items():
placeholder_value = placeholder_item.data.get(key)
if value != placeholder_value:
new_values[key] = value
placeholder_item.data[key] = value
for key in new_values.keys():
cmds.deleteAttr(node_name + "." + key)
imprint(node_name, new_values)
def collect_placeholders(self):
output = []
scene_placeholders = self._collect_scene_placeholders()
for node_name, placeholder_data in scene_placeholders.items():
if placeholder_data.get("plugin_identifier") != self.identifier:
continue
# TODO do data validations and maybe upgrades if they are invalid
output.append(
LoadPlaceholderItem(node_name, placeholder_data, self)
)
return output
def populate_placeholder(self, placeholder):
self.populate_load_placeholder(placeholder)
def repopulate_placeholder(self, placeholder):
repre_ids = self._get_loaded_repre_ids()
self.populate_load_placeholder(placeholder, repre_ids)
def get_placeholder_options(self, options=None):
return self.get_load_plugin_options(options)
def post_placeholder_process(self, placeholder, failed):
"""Cleanup placeholder after load of its corresponding representations.
Args:
placeholder (PlaceholderItem): Item which was just used to load
representation.
failed (bool): Loading of representation failed.
"""
# Hide placeholder and add them to placeholder set
node = placeholder.scene_identifier
cmds.sets(node, addElement=PLACEHOLDER_SET)
cmds.hide(node)
cmds.setAttr(node + ".hiddenInOutliner", True)
def delete_placeholder(self, placeholder):
"""Remove placeholder if building was successful"""
cmds.delete(placeholder.scene_identifier)
def load_succeed(self, placeholder, container):
self._parent_in_hierarchy(placeholder, container)
def _parent_in_hierarchy(self, placeholder, container):
"""Parent loaded container to placeholder's parent.
ie : Set loaded content as placeholder's sibling
Args:
container (str): Placeholder loaded containers
"""
if not container:
return
roots = cmds.sets(container, q=True) or []
ref_node = None
try:
ref_node = get_reference_node(roots)
except AssertionError as e:
self.log.info(e.args[0])
nodes_to_parent = []
for root in roots:
if ref_node:
ref_root = cmds.referenceQuery(root, nodes=True)[0]
ref_root = (
cmds.listRelatives(ref_root, parent=True, path=True) or
[ref_root]
)
nodes_to_parent.extend(ref_root)
continue
if root.endswith("_RN"):
# Backwards compatibility for hardcoded reference names.
refRoot = cmds.referenceQuery(root, n=True)[0]
refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot]
nodes_to_parent.extend(refRoot)
elif root not in cmds.listSets(allSets=True):
nodes_to_parent.append(root)
elif not cmds.sets(root, q=True):
return
# Move loaded nodes to correct index in outliner hierarchy
placeholder_form = cmds.xform(
placeholder.scene_identifier,
q=True,
matrix=True,
worldSpace=True
)
scene_parent = cmds.listRelatives(
placeholder.scene_identifier, parent=True, fullPath=True
)
for node in set(nodes_to_parent):
cmds.reorder(node, front=True)
cmds.reorder(node, relative=placeholder.data["index"])
cmds.xform(node, matrix=placeholder_form, ws=True)
if scene_parent:
cmds.parent(node, scene_parent)
else:
cmds.parent(node, world=True)
holding_sets = cmds.listSets(object=placeholder.scene_identifier)
if not holding_sets:
return
for holding_set in holding_sets:
cmds.sets(roots, forceElement=holding_set)
def build_workfile_template(*args):
builder = MayaTemplateBuilder(registered_host())
builder.build_template()

View file

@ -0,0 +1,101 @@
from typing import List
from maya import cmds
def get_yeti_user_variables(yeti_shape_node: str) -> List[str]:
"""Get user defined yeti user variables for a `pgYetiMaya` shape node.
Arguments:
yeti_shape_node (str): The `pgYetiMaya` shape node.
Returns:
list: Attribute names (for a vector attribute it only lists the top
parent attribute, not the attribute per axis)
"""
attrs = cmds.listAttr(yeti_shape_node,
userDefined=True,
string=("yetiVariableV_*",
"yetiVariableF_*")) or []
valid_attrs = []
for attr in attrs:
attr_type = cmds.attributeQuery(attr, node=yeti_shape_node,
attributeType=True)
if attr.startswith("yetiVariableV_") and attr_type == "double3":
# vector
valid_attrs.append(attr)
elif attr.startswith("yetiVariableF_") and attr_type == "double":
valid_attrs.append(attr)
return valid_attrs
def create_yeti_variable(yeti_shape_node: str,
attr_name: str,
value=None,
force_value: bool = False) -> bool:
"""Get user defined yeti user variables for a `pgYetiMaya` shape node.
Arguments:
yeti_shape_node (str): The `pgYetiMaya` shape node.
attr_name (str): The fully qualified yeti variable name, e.g.
"yetiVariableF_myfloat" or "yetiVariableV_myvector"
value (object): The value to set (must match the type of the attribute)
When value is None it will ignored and not be set.
force_value (bool): Whether to set the value if the attribute already
exists or not.
Returns:
bool: Whether the attribute value was set or not.
"""
exists = cmds.attributeQuery(attr_name, node=yeti_shape_node, exists=True)
if not exists:
if attr_name.startswith("yetiVariableV_"):
_create_vector_yeti_user_variable(yeti_shape_node, attr_name)
if attr_name.startswith("yetiVariableF_"):
_create_float_yeti_user_variable(yeti_shape_node, attr_name)
if value is not None and (not exists or force_value):
plug = "{}.{}".format(yeti_shape_node, attr_name)
if (
isinstance(value, (list, tuple))
and attr_name.startswith("yetiVariableV_")
):
cmds.setAttr(plug, *value, type="double3")
else:
cmds.setAttr(plug, value)
return True
return False
def _create_vector_yeti_user_variable(yeti_shape_node: str, attr_name: str):
if not attr_name.startswith("yetiVariableV_"):
raise ValueError("Must start with yetiVariableV_")
cmds.addAttr(yeti_shape_node,
longName=attr_name,
attributeType="double3",
cachedInternally=True,
keyable=True)
for axis in "XYZ":
cmds.addAttr(yeti_shape_node,
longName="{}{}".format(attr_name, axis),
attributeType="double",
parent=attr_name,
cachedInternally=True,
keyable=True)
def _create_float_yeti_user_variable(yeti_node: str, attr_name: str):
if not attr_name.startswith("yetiVariableF_"):
raise ValueError("Must start with yetiVariableF_")
cmds.addAttr(yeti_node,
longName=attr_name,
attributeType="double",
cachedInternally=True,
softMinValue=0,
softMaxValue=100,
keyable=True)

View file

@ -0,0 +1,39 @@
from ayon_core.lib import (
BoolDef
)
from ayon_core.pipeline import (
load,
registered_host
)
from ayon_core.hosts.maya.api.workfile_template_builder import (
MayaTemplateBuilder
)
class LoadAsTemplate(load.LoaderPlugin):
"""Load workfile as a template """
product_types = {"workfile", "mayaScene"}
label = "Load as template"
representations = ["ma", "mb"]
icon = "wrench"
color = "#775555"
order = 10
options = [
BoolDef("keep_placeholders",
label="Keep Placeholders",
default=False),
BoolDef("create_first_version",
label="Create First Version",
default=False),
]
def load(self, context, name, namespace, data):
keep_placeholders = data.get("keep_placeholders", False)
create_first_version = data.get("create_first_version", False)
path = self.filepath_from_context(context)
builder = MayaTemplateBuilder(registered_host())
builder.build_template(template_path=path,
keep_placeholders=keep_placeholders,
create_first_version=create_first_version)

View file

@ -12,6 +12,7 @@ from ayon_core.pipeline import (
get_representation_path
)
from ayon_core.hosts.maya.api import lib
from ayon_core.hosts.maya.api.yeti import create_yeti_variable
from ayon_core.hosts.maya.api.pipeline import containerise
from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type
@ -23,8 +24,19 @@ SKIP_UPDATE_ATTRS = {
"viewportDensity",
"viewportWidth",
"viewportLength",
"renderDensity",
"renderWidth",
"renderLength",
"increaseRenderBounds"
}
SKIP_ATTR_MESSAGE = (
"Skipping updating %s.%s to %s because it "
"is considered a local overridable attribute. "
"Either set manually or the load the cache "
"anew."
)
def set_attribute(node, attr, value):
"""Wrapper of set attribute which ignores None values"""
@ -209,9 +221,31 @@ class YetiCacheLoader(load.LoaderPlugin):
for attr, value in node_settings["attrs"].items():
if attr in SKIP_UPDATE_ATTRS:
self.log.info(
SKIP_ATTR_MESSAGE, yeti_node, attr, value
)
continue
set_attribute(attr, value, yeti_node)
# Set up user defined attributes
user_variables = node_settings.get("user_variables", {})
for attr, value in user_variables.items():
was_value_set = create_yeti_variable(
yeti_shape_node=yeti_node,
attr_name=attr,
value=value,
# We do not want to update the
# value if it already exists so
# that any local overrides that
# may have been applied still
# persist
force_value=False
)
if not was_value_set:
self.log.info(
SKIP_ATTR_MESSAGE, yeti_node, attr, value
)
cmds.setAttr("{}.representation".format(container_node),
repre_entity["id"],
typ="string")
@ -332,6 +366,13 @@ class YetiCacheLoader(load.LoaderPlugin):
for attr, value in attributes.items():
set_attribute(attr, value, yeti_node)
# Set up user defined attributes
user_variables = node_settings.get("user_variables", {})
for attr, value in user_variables.items():
create_yeti_variable(yeti_shape_node=yeti_node,
attr_name=attr,
value=value)
# Connect to the time node
cmds.connectAttr("time1.outTime", "%s.currentTime" % yeti_node)

View file

@ -1,8 +1,13 @@
from typing import List
import maya.cmds as cmds
from ayon_core.hosts.maya.api import plugin
from ayon_core.hosts.maya.api import lib
from ayon_core.pipeline import registered_host
from ayon_core.pipeline.create import CreateContext
class YetiRigLoader(plugin.ReferenceLoader):
"""This loader will load Yeti rig."""
@ -15,6 +20,9 @@ class YetiRigLoader(plugin.ReferenceLoader):
icon = "code-fork"
color = "orange"
# From settings
create_cache_instance_on_load = True
def process_reference(
self, context, name=None, namespace=None, options=None
):
@ -49,4 +57,41 @@ class YetiRigLoader(plugin.ReferenceLoader):
)
self[:] = nodes
if self.create_cache_instance_on_load:
# Automatically create in instance to allow publishing the loaded
# yeti rig into a yeti cache
self._create_yeti_cache_instance(nodes, variant=namespace)
return nodes
def _create_yeti_cache_instance(self, nodes: List[str], variant: str):
"""Create a yeticache product type instance to publish the output.
This is similar to how loading animation rig will automatically create
an animation instance for publishing any loaded character rigs, but
then for yeti rigs.
Args:
nodes (List[str]): Nodes generated on load.
variant (str): Variant for the yeti cache instance to create.
"""
# Find the roots amongst the loaded nodes
yeti_nodes = cmds.ls(nodes, type="pgYetiMaya", long=True)
assert yeti_nodes, "No pgYetiMaya nodes in rig, this is a bug."
self.log.info("Creating variant: {}".format(variant))
creator_identifier = "io.openpype.creators.maya.yeticache"
host = registered_host()
create_context = CreateContext(host)
with lib.maintained_selection():
cmds.select(yeti_nodes, noExpand=True)
create_context.create(
creator_identifier=creator_identifier,
variant=variant,
pre_create_data={"use_selection": True}
)

View file

@ -12,7 +12,7 @@ class CollectFileDependencies(pyblish.api.ContextPlugin):
families = ["renderlayer"]
@classmethod
def apply_settings(cls, project_settings, system_settings):
def apply_settings(cls, project_settings):
# Disable plug-in if not used for deadline submission anyway
settings = project_settings["deadline"]["publish"]["MayaSubmitDeadline"] # noqa
cls.enabled = settings.get("asset_dependencies", True)

View file

@ -3,6 +3,7 @@ from maya import cmds
import pyblish.api
from ayon_core.hosts.maya.api import lib
from ayon_core.hosts.maya.api.yeti import get_yeti_user_variables
SETTINGS = {
@ -34,7 +35,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
- "increaseRenderBounds"
- "imageSearchPath"
Other information is the name of the transform and it's Colorbleed ID
Other information is the name of the transform and its `cbId`
"""
order = pyblish.api.CollectorOrder + 0.45
@ -54,6 +55,16 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
# Get specific node attributes
attr_data = {}
for attr in SETTINGS:
# Ignore non-existing attributes with a warning, e.g. cbId
# if they have not been generated yet
if not cmds.attributeQuery(attr, node=shape, exists=True):
self.log.warning(
"Attribute '{}' not found on Yeti node: {}".format(
attr, shape
)
)
continue
current = cmds.getAttr("%s.%s" % (shape, attr))
# change None to empty string as Maya doesn't support
# NoneType in attributes
@ -61,6 +72,12 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
current = ""
attr_data[attr] = current
# Get user variable attributes
user_variable_attrs = {
attr: lib.get_attribute("{}.{}".format(shape, attr))
for attr in get_yeti_user_variables(shape)
}
# Get transform data
parent = cmds.listRelatives(shape, parent=True)[0]
transform_data = {"name": parent, "cbId": lib.get_id(parent)}
@ -70,6 +87,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
"name": shape,
"cbId": lib.get_id(shape),
"attrs": attr_data,
"user_variables": user_variable_attrs
}
settings["nodes"].append(shape_data)

View file

@ -5,7 +5,13 @@ import os
from maya import cmds
from ayon_core.pipeline import publish
from ayon_core.hosts.maya.api.lib import maintained_selection
from ayon_core.hosts.maya.api.lib import (
maintained_selection,
renderlayer
)
from ayon_core.hosts.maya.api.render_setup_tools import (
allow_export_from_render_setup_layer
)
class ExtractRedshiftProxy(publish.Extractor):
@ -18,6 +24,9 @@ class ExtractRedshiftProxy(publish.Extractor):
def process(self, instance):
"""Extractor entry point."""
# Make sure Redshift is loaded
cmds.loadPlugin("redshift4maya", quiet=True)
staging_dir = self.staging_dir(instance)
file_name = "{}.rs".format(instance.name)
file_path = os.path.join(staging_dir, file_name)
@ -60,14 +69,22 @@ class ExtractRedshiftProxy(publish.Extractor):
# Write out rs file
self.log.debug("Writing: '%s'" % file_path)
# Allow overriding what renderlayer to export from. By default force
# it to the default render layer. (Note that the renderlayer isn't
# currently exposed as an attribute to artists)
layer = instance.data.get("renderLayer", "defaultRenderLayer")
with maintained_selection():
cmds.select(instance.data["setMembers"], noExpand=True)
cmds.file(file_path,
pr=False,
force=True,
type="Redshift Proxy",
exportSelected=True,
options=rs_options)
with renderlayer(layer):
with allow_export_from_render_setup_layer():
cmds.select(instance.data["setMembers"], noExpand=True)
cmds.file(file_path,
preserveReferences=False,
force=True,
type="Redshift Proxy",
exportSelected=True,
options=rs_options)
if "representations" not in instance.data:
instance.data["representations"] = []

View file

@ -0,0 +1,265 @@
import json
from maya import cmds
from ayon_core.pipeline.workfile.workfile_template_builder import (
PlaceholderPlugin,
LoadPlaceholderItem,
PlaceholderLoadMixin,
)
from ayon_core.hosts.maya.api.lib import (
read,
imprint,
get_reference_node
)
from ayon_core.hosts.maya.api.workfile_template_builder import PLACEHOLDER_SET
class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
identifier = "maya.load"
label = "Maya load"
def _collect_scene_placeholders(self):
# Cache placeholder data to shared data
placeholder_nodes = self.builder.get_shared_populate_data(
"placeholder_nodes"
)
if placeholder_nodes is None:
attributes = cmds.ls("*.plugin_identifier", long=True)
placeholder_nodes = {}
for attribute in attributes:
node_name = attribute.rpartition(".")[0]
placeholder_nodes[node_name] = (
self._parse_placeholder_node_data(node_name)
)
self.builder.set_shared_populate_data(
"placeholder_nodes", placeholder_nodes
)
return placeholder_nodes
def _parse_placeholder_node_data(self, node_name):
placeholder_data = read(node_name)
parent_name = (
cmds.getAttr(node_name + ".parent", asString=True)
or node_name.rpartition("|")[0]
or ""
)
if parent_name:
siblings = cmds.listRelatives(parent_name, children=True)
else:
siblings = cmds.ls(assemblies=True)
node_shortname = node_name.rpartition("|")[2]
current_index = cmds.getAttr(node_name + ".index", asString=True)
if current_index < 0:
current_index = siblings.index(node_shortname)
placeholder_data.update({
"parent": parent_name,
"index": current_index
})
return placeholder_data
def _create_placeholder_name(self, placeholder_data):
placeholder_name_parts = placeholder_data["builder_type"].split("_")
pos = 1
placeholder_product_type = placeholder_data.get("product_type")
if placeholder_product_type is None:
placeholder_product_type = placeholder_data.get("family")
if placeholder_product_type:
placeholder_name_parts.insert(pos, placeholder_product_type)
pos += 1
# add loader arguments if any
loader_args = placeholder_data["loader_args"]
if loader_args:
loader_args = json.loads(loader_args.replace('\'', '\"'))
values = [v for v in loader_args.values()]
for value in values:
placeholder_name_parts.insert(pos, value)
pos += 1
placeholder_name = "_".join(placeholder_name_parts)
return placeholder_name.capitalize()
def _get_loaded_repre_ids(self):
loaded_representation_ids = self.builder.get_shared_populate_data(
"loaded_representation_ids"
)
if loaded_representation_ids is None:
try:
containers = cmds.sets("AVALON_CONTAINERS", q=True)
except ValueError:
containers = []
loaded_representation_ids = {
cmds.getAttr(container + ".representation")
for container in containers
}
self.builder.set_shared_populate_data(
"loaded_representation_ids", loaded_representation_ids
)
return loaded_representation_ids
def create_placeholder(self, placeholder_data):
selection = cmds.ls(selection=True)
if len(selection) > 1:
raise ValueError("More then one item are selected")
parent = selection[0] if selection else None
placeholder_data["plugin_identifier"] = self.identifier
placeholder_name = self._create_placeholder_name(placeholder_data)
placeholder = cmds.spaceLocator(name=placeholder_name)[0]
if parent:
placeholder = cmds.parent(placeholder, selection[0])[0]
imprint(placeholder, placeholder_data)
# Add helper attributes to keep placeholder info
cmds.addAttr(
placeholder,
longName="parent",
hidden=True,
dataType="string"
)
cmds.addAttr(
placeholder,
longName="index",
hidden=True,
attributeType="short",
defaultValue=-1
)
cmds.setAttr(placeholder + ".parent", "", type="string")
def update_placeholder(self, placeholder_item, placeholder_data):
node_name = placeholder_item.scene_identifier
new_values = {}
for key, value in placeholder_data.items():
placeholder_value = placeholder_item.data.get(key)
if value != placeholder_value:
new_values[key] = value
placeholder_item.data[key] = value
for key in new_values.keys():
cmds.deleteAttr(node_name + "." + key)
imprint(node_name, new_values)
def collect_placeholders(self):
output = []
scene_placeholders = self._collect_scene_placeholders()
for node_name, placeholder_data in scene_placeholders.items():
if placeholder_data.get("plugin_identifier") != self.identifier:
continue
# TODO do data validations and maybe upgrades if they are invalid
output.append(
LoadPlaceholderItem(node_name, placeholder_data, self)
)
return output
def populate_placeholder(self, placeholder):
self.populate_load_placeholder(placeholder)
def repopulate_placeholder(self, placeholder):
repre_ids = self._get_loaded_repre_ids()
self.populate_load_placeholder(placeholder, repre_ids)
def get_placeholder_options(self, options=None):
return self.get_load_plugin_options(options)
def post_placeholder_process(self, placeholder, failed):
"""Cleanup placeholder after load of its corresponding representations.
Args:
placeholder (PlaceholderItem): Item which was just used to load
representation.
failed (bool): Loading of representation failed.
"""
# Hide placeholder and add them to placeholder set
node = placeholder.scene_identifier
cmds.sets(node, addElement=PLACEHOLDER_SET)
cmds.hide(node)
cmds.setAttr(node + ".hiddenInOutliner", True)
def delete_placeholder(self, placeholder):
"""Remove placeholder if building was successful"""
cmds.delete(placeholder.scene_identifier)
def load_succeed(self, placeholder, container):
self._parent_in_hierarchy(placeholder, container)
def _parent_in_hierarchy(self, placeholder, container):
"""Parent loaded container to placeholder's parent.
ie : Set loaded content as placeholder's sibling
Args:
container (str): Placeholder loaded containers
"""
if not container:
return
roots = cmds.sets(container, q=True) or []
ref_node = None
try:
ref_node = get_reference_node(roots)
except AssertionError as e:
self.log.info(e.args[0])
nodes_to_parent = []
for root in roots:
if ref_node:
ref_root = cmds.referenceQuery(root, nodes=True)[0]
ref_root = (
cmds.listRelatives(ref_root, parent=True, path=True) or
[ref_root]
)
nodes_to_parent.extend(ref_root)
continue
if root.endswith("_RN"):
# Backwards compatibility for hardcoded reference names.
refRoot = cmds.referenceQuery(root, n=True)[0]
refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot]
nodes_to_parent.extend(refRoot)
elif root not in cmds.listSets(allSets=True):
nodes_to_parent.append(root)
elif not cmds.sets(root, q=True):
return
# Move loaded nodes to correct index in outliner hierarchy
placeholder_form = cmds.xform(
placeholder.scene_identifier,
q=True,
matrix=True,
worldSpace=True
)
scene_parent = cmds.listRelatives(
placeholder.scene_identifier, parent=True, fullPath=True
)
for node in set(nodes_to_parent):
cmds.reorder(node, front=True)
cmds.reorder(node, relative=placeholder.data["index"])
cmds.xform(node, matrix=placeholder_form, ws=True)
if scene_parent:
cmds.parent(node, scene_parent)
else:
if cmds.listRelatives(node, parent=True):
cmds.parent(node, world=True)
holding_sets = cmds.listSets(object=placeholder.scene_identifier)
if not holding_sets:
return
for holding_set in holding_sets:
cmds.sets(roots, forceElement=holding_set)

View file

@ -18,6 +18,7 @@ from ayon_core.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
register_inventory_action_path,
register_workfile_build_plugin_path,
AYON_INSTANCE_ID,
AVALON_INSTANCE_ID,
AVALON_CONTAINER_ID,
@ -52,8 +53,6 @@ from .lib import (
MENU_LABEL,
)
from .workfile_template_builder import (
NukePlaceholderLoadPlugin,
NukePlaceholderCreatePlugin,
build_workfile_template,
create_placeholder,
update_placeholder,
@ -76,6 +75,7 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build")
# registering pyblish gui regarding settings in presets
if os.getenv("PYBLISH_GUI", None):
@ -105,18 +105,11 @@ class NukeHost(
def get_workfile_extensions(self):
return file_extensions()
def get_workfile_build_placeholder_plugins(self):
return [
NukePlaceholderLoadPlugin,
NukePlaceholderCreatePlugin
]
def get_containers(self):
return ls()
def install(self):
''' Installing all requarements for Nuke host
'''
"""Installing all requirements for Nuke host"""
pyblish.api.register_host("nuke")
@ -125,6 +118,7 @@ class NukeHost(
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
register_inventory_action_path(INVENTORY_PATH)
register_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
# Register AYON event for workfiles loading.
register_event_callback("workio.open_file", check_inventory_versions)
@ -178,7 +172,6 @@ def add_nuke_callbacks():
# set apply all workfile settings on script load and save
nuke.addOnScriptLoad(WorkfileSettings().set_context_settings)
if nuke_settings["dirmap"]["enabled"]:
log.info("Added Nuke's dir-mapping callback ...")
# Add dirmap for file paths.

View file

@ -1,30 +1,17 @@
import collections
import nuke
from ayon_core.pipeline import registered_host
from ayon_core.pipeline.workfile.workfile_template_builder import (
AbstractTemplateBuilder,
PlaceholderPlugin,
LoadPlaceholderItem,
CreatePlaceholderItem,
PlaceholderLoadMixin,
PlaceholderCreateMixin,
)
from ayon_core.tools.workfile_template_build import (
WorkfileBuildPlaceholderDialog,
)
from .lib import (
find_free_space_to_paste_nodes,
get_extreme_positions,
get_group_io_nodes,
imprint,
refresh_node,
refresh_nodes,
reset_selection,
get_names_from_nodes,
get_nodes_by_names,
select_nodes,
duplicate_node,
node_tempfile,
get_main_window,
WorkfileSettings,
)
@ -54,6 +41,7 @@ class NukeTemplateBuilder(AbstractTemplateBuilder):
return True
class NukePlaceholderPlugin(PlaceholderPlugin):
node_color = 4278190335
@ -120,843 +108,6 @@ class NukePlaceholderPlugin(PlaceholderPlugin):
nuke.delete(placeholder_node)
class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin):
identifier = "nuke.load"
label = "Nuke load"
def _parse_placeholder_node_data(self, node):
placeholder_data = super(
NukePlaceholderLoadPlugin, self
)._parse_placeholder_node_data(node)
node_knobs = node.knobs()
nb_children = 0
if "nb_children" in node_knobs:
nb_children = int(node_knobs["nb_children"].getValue())
placeholder_data["nb_children"] = nb_children
siblings = []
if "siblings" in node_knobs:
siblings = node_knobs["siblings"].values()
placeholder_data["siblings"] = siblings
node_full_name = node.fullName()
placeholder_data["group_name"] = node_full_name.rpartition(".")[0]
placeholder_data["last_loaded"] = []
placeholder_data["delete"] = False
return placeholder_data
def _get_loaded_repre_ids(self):
loaded_representation_ids = self.builder.get_shared_populate_data(
"loaded_representation_ids"
)
if loaded_representation_ids is None:
loaded_representation_ids = set()
for node in nuke.allNodes():
if "repre_id" in node.knobs():
loaded_representation_ids.add(
node.knob("repre_id").getValue()
)
self.builder.set_shared_populate_data(
"loaded_representation_ids", loaded_representation_ids
)
return loaded_representation_ids
def _before_placeholder_load(self, placeholder):
placeholder.data["nodes_init"] = nuke.allNodes()
def _before_repre_load(self, placeholder, representation):
placeholder.data["last_repre_id"] = representation["id"]
def collect_placeholders(self):
output = []
scene_placeholders = self._collect_scene_placeholders()
for node_name, node in scene_placeholders.items():
plugin_identifier_knob = node.knob("plugin_identifier")
if (
plugin_identifier_knob is None
or plugin_identifier_knob.getValue() != self.identifier
):
continue
placeholder_data = self._parse_placeholder_node_data(node)
# TODO do data validations and maybe updgrades if are invalid
output.append(
LoadPlaceholderItem(node_name, placeholder_data, self)
)
return output
def populate_placeholder(self, placeholder):
self.populate_load_placeholder(placeholder)
def repopulate_placeholder(self, placeholder):
repre_ids = self._get_loaded_repre_ids()
self.populate_load_placeholder(placeholder, repre_ids)
def get_placeholder_options(self, options=None):
return self.get_load_plugin_options(options)
def post_placeholder_process(self, placeholder, failed):
"""Cleanup placeholder after load of its corresponding representations.
Args:
placeholder (PlaceholderItem): Item which was just used to load
representation.
failed (bool): Loading of representation failed.
"""
# deselect all selected nodes
placeholder_node = nuke.toNode(placeholder.scene_identifier)
# getting the latest nodes added
# TODO get from shared populate data!
nodes_init = placeholder.data["nodes_init"]
nodes_loaded = list(set(nuke.allNodes()) - set(nodes_init))
self.log.debug("Loaded nodes: {}".format(nodes_loaded))
if not nodes_loaded:
return
placeholder.data["delete"] = True
nodes_loaded = self._move_to_placeholder_group(
placeholder, nodes_loaded
)
placeholder.data["last_loaded"] = nodes_loaded
refresh_nodes(nodes_loaded)
# positioning of the loaded nodes
min_x, min_y, _, _ = get_extreme_positions(nodes_loaded)
for node in nodes_loaded:
xpos = (node.xpos() - min_x) + placeholder_node.xpos()
ypos = (node.ypos() - min_y) + placeholder_node.ypos()
node.setXYpos(xpos, ypos)
refresh_nodes(nodes_loaded)
# fix the problem of z_order for backdrops
self._fix_z_order(placeholder)
if placeholder.data.get("keep_placeholder"):
self._imprint_siblings(placeholder)
if placeholder.data["nb_children"] == 0:
# save initial nodes positions and dimensions, update them
# and set inputs and outputs of loaded nodes
if placeholder.data.get("keep_placeholder"):
self._imprint_inits()
self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded)
self._set_loaded_connections(placeholder)
elif placeholder.data["siblings"]:
# create copies of placeholder siblings for the new loaded nodes,
# set their inputs and outputs and update all nodes positions and
# dimensions and siblings names
siblings = get_nodes_by_names(placeholder.data["siblings"])
refresh_nodes(siblings)
copies = self._create_sib_copies(placeholder)
new_nodes = list(copies.values()) # copies nodes
self._update_nodes(new_nodes, nodes_loaded)
placeholder_node.removeKnob(placeholder_node.knob("siblings"))
new_nodes_name = get_names_from_nodes(new_nodes)
imprint(placeholder_node, {"siblings": new_nodes_name})
self._set_copies_connections(placeholder, copies)
self._update_nodes(
nuke.allNodes(),
new_nodes + nodes_loaded,
20
)
new_siblings = get_names_from_nodes(new_nodes)
placeholder.data["siblings"] = new_siblings
else:
# if the placeholder doesn't have siblings, the loaded
# nodes will be placed in a free space
xpointer, ypointer = find_free_space_to_paste_nodes(
nodes_loaded, direction="bottom", offset=200
)
node = nuke.createNode("NoOp")
reset_selection()
nuke.delete(node)
for node in nodes_loaded:
xpos = (node.xpos() - min_x) + xpointer
ypos = (node.ypos() - min_y) + ypointer
node.setXYpos(xpos, ypos)
placeholder.data["nb_children"] += 1
reset_selection()
# go back to root group
nuke.root().begin()
def _move_to_placeholder_group(self, placeholder, nodes_loaded):
"""
opening the placeholder's group and copying loaded nodes in it.
Returns :
nodes_loaded (list): the new list of pasted nodes
"""
groups_name = placeholder.data["group_name"]
reset_selection()
select_nodes(nodes_loaded)
if groups_name:
with node_tempfile() as filepath:
nuke.nodeCopy(filepath)
for node in nuke.selectedNodes():
nuke.delete(node)
group = nuke.toNode(groups_name)
group.begin()
nuke.nodePaste(filepath)
nodes_loaded = nuke.selectedNodes()
return nodes_loaded
def _fix_z_order(self, placeholder):
"""Fix the problem of z_order when a backdrop is loaded."""
nodes_loaded = placeholder.data["last_loaded"]
loaded_backdrops = []
bd_orders = set()
for node in nodes_loaded:
if isinstance(node, nuke.BackdropNode):
loaded_backdrops.append(node)
bd_orders.add(node.knob("z_order").getValue())
if not bd_orders:
return
sib_orders = set()
for node_name in placeholder.data["siblings"]:
node = nuke.toNode(node_name)
if isinstance(node, nuke.BackdropNode):
sib_orders.add(node.knob("z_order").getValue())
if not sib_orders:
return
min_order = min(bd_orders)
max_order = max(sib_orders)
for backdrop_node in loaded_backdrops:
z_order = backdrop_node.knob("z_order").getValue()
backdrop_node.knob("z_order").setValue(
z_order + max_order - min_order + 1)
def _imprint_siblings(self, placeholder):
"""
- add siblings names to placeholder attributes (nodes loaded with it)
- add Id to the attributes of all the other nodes
"""
loaded_nodes = placeholder.data["last_loaded"]
loaded_nodes_set = set(loaded_nodes)
data = {"repre_id": str(placeholder.data["last_repre_id"])}
for node in loaded_nodes:
node_knobs = node.knobs()
if "builder_type" not in node_knobs:
# save the id of representation for all imported nodes
imprint(node, data)
node.knob("repre_id").setVisible(False)
refresh_node(node)
continue
if (
"is_placeholder" not in node_knobs
or (
"is_placeholder" in node_knobs
and node.knob("is_placeholder").value()
)
):
siblings = list(loaded_nodes_set - {node})
siblings_name = get_names_from_nodes(siblings)
siblings = {"siblings": siblings_name}
imprint(node, siblings)
def _imprint_inits(self):
"""Add initial positions and dimensions to the attributes"""
for node in nuke.allNodes():
refresh_node(node)
imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()})
node.knob("x_init").setVisible(False)
node.knob("y_init").setVisible(False)
width = node.screenWidth()
height = node.screenHeight()
if "bdwidth" in node.knobs():
imprint(node, {"w_init": width, "h_init": height})
node.knob("w_init").setVisible(False)
node.knob("h_init").setVisible(False)
refresh_node(node)
def _update_nodes(
self, placeholder, nodes, considered_nodes, offset_y=None
):
"""Adjust backdrop nodes dimensions and positions.
Considering some nodes sizes.
Args:
nodes (list): list of nodes to update
considered_nodes (list): list of nodes to consider while updating
positions and dimensions
offset (int): distance between copies
"""
placeholder_node = nuke.toNode(placeholder.scene_identifier)
min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes)
diff_x = diff_y = 0
contained_nodes = [] # for backdrops
if offset_y is None:
width_ph = placeholder_node.screenWidth()
height_ph = placeholder_node.screenHeight()
diff_y = max_y - min_y - height_ph
diff_x = max_x - min_x - width_ph
contained_nodes = [placeholder_node]
min_x = placeholder_node.xpos()
min_y = placeholder_node.ypos()
else:
siblings = get_nodes_by_names(placeholder.data["siblings"])
minX, _, maxX, _ = get_extreme_positions(siblings)
diff_y = max_y - min_y + 20
diff_x = abs(max_x - min_x - maxX + minX)
contained_nodes = considered_nodes
if diff_y <= 0 and diff_x <= 0:
return
for node in nodes:
refresh_node(node)
if (
node == placeholder_node
or node in considered_nodes
):
continue
if (
not isinstance(node, nuke.BackdropNode)
or (
isinstance(node, nuke.BackdropNode)
and not set(contained_nodes) <= set(node.getNodes())
)
):
if offset_y is None and node.xpos() >= min_x:
node.setXpos(node.xpos() + diff_x)
if node.ypos() >= min_y:
node.setYpos(node.ypos() + diff_y)
else:
width = node.screenWidth()
height = node.screenHeight()
node.knob("bdwidth").setValue(width + diff_x)
node.knob("bdheight").setValue(height + diff_y)
refresh_node(node)
def _set_loaded_connections(self, placeholder):
"""
set inputs and outputs of loaded nodes"""
placeholder_node = nuke.toNode(placeholder.scene_identifier)
input_node, output_node = get_group_io_nodes(
placeholder.data["last_loaded"]
)
for node in placeholder_node.dependent():
for idx in range(node.inputs()):
if node.input(idx) == placeholder_node and output_node:
node.setInput(idx, output_node)
for node in placeholder_node.dependencies():
for idx in range(placeholder_node.inputs()):
if placeholder_node.input(idx) == node and input_node:
input_node.setInput(0, node)
def _create_sib_copies(self, placeholder):
""" creating copies of the palce_holder siblings (the ones who were
loaded with it) for the new nodes added
Returns :
copies (dict) : with copied nodes names and their copies
"""
copies = {}
siblings = get_nodes_by_names(placeholder.data["siblings"])
for node in siblings:
new_node = duplicate_node(node)
x_init = int(new_node.knob("x_init").getValue())
y_init = int(new_node.knob("y_init").getValue())
new_node.setXYpos(x_init, y_init)
if isinstance(new_node, nuke.BackdropNode):
w_init = new_node.knob("w_init").getValue()
h_init = new_node.knob("h_init").getValue()
new_node.knob("bdwidth").setValue(w_init)
new_node.knob("bdheight").setValue(h_init)
refresh_node(node)
if "repre_id" in node.knobs().keys():
node.removeKnob(node.knob("repre_id"))
copies[node.name()] = new_node
return copies
def _set_copies_connections(self, placeholder, copies):
"""Set inputs and outputs of the copies.
Args:
copies (dict): Copied nodes by their names.
"""
last_input, last_output = get_group_io_nodes(
placeholder.data["last_loaded"]
)
siblings = get_nodes_by_names(placeholder.data["siblings"])
siblings_input, siblings_output = get_group_io_nodes(siblings)
copy_input = copies[siblings_input.name()]
copy_output = copies[siblings_output.name()]
for node_init in siblings:
if node_init == siblings_output:
continue
node_copy = copies[node_init.name()]
for node in node_init.dependent():
for idx in range(node.inputs()):
if node.input(idx) != node_init:
continue
if node in siblings:
copies[node.name()].setInput(idx, node_copy)
else:
last_input.setInput(0, node_copy)
for node in node_init.dependencies():
for idx in range(node_init.inputs()):
if node_init.input(idx) != node:
continue
if node_init == siblings_input:
copy_input.setInput(idx, node)
elif node in siblings:
node_copy.setInput(idx, copies[node.name()])
else:
node_copy.setInput(idx, last_output)
siblings_input.setInput(0, copy_output)
class NukePlaceholderCreatePlugin(
NukePlaceholderPlugin, PlaceholderCreateMixin
):
identifier = "nuke.create"
label = "Nuke create"
def _parse_placeholder_node_data(self, node):
placeholder_data = super(
NukePlaceholderCreatePlugin, self
)._parse_placeholder_node_data(node)
node_knobs = node.knobs()
nb_children = 0
if "nb_children" in node_knobs:
nb_children = int(node_knobs["nb_children"].getValue())
placeholder_data["nb_children"] = nb_children
siblings = []
if "siblings" in node_knobs:
siblings = node_knobs["siblings"].values()
placeholder_data["siblings"] = siblings
node_full_name = node.fullName()
placeholder_data["group_name"] = node_full_name.rpartition(".")[0]
placeholder_data["last_loaded"] = []
placeholder_data["delete"] = False
return placeholder_data
def _before_instance_create(self, placeholder):
placeholder.data["nodes_init"] = nuke.allNodes()
def collect_placeholders(self):
output = []
scene_placeholders = self._collect_scene_placeholders()
for node_name, node in scene_placeholders.items():
plugin_identifier_knob = node.knob("plugin_identifier")
if (
plugin_identifier_knob is None
or plugin_identifier_knob.getValue() != self.identifier
):
continue
placeholder_data = self._parse_placeholder_node_data(node)
output.append(
CreatePlaceholderItem(node_name, placeholder_data, self)
)
return output
def populate_placeholder(self, placeholder):
self.populate_create_placeholder(placeholder)
def repopulate_placeholder(self, placeholder):
self.populate_create_placeholder(placeholder)
def get_placeholder_options(self, options=None):
return self.get_create_plugin_options(options)
def post_placeholder_process(self, placeholder, failed):
"""Cleanup placeholder after load of its corresponding representations.
Args:
placeholder (PlaceholderItem): Item which was just used to load
representation.
failed (bool): Loading of representation failed.
"""
# deselect all selected nodes
placeholder_node = nuke.toNode(placeholder.scene_identifier)
# getting the latest nodes added
nodes_init = placeholder.data["nodes_init"]
nodes_created = list(set(nuke.allNodes()) - set(nodes_init))
self.log.debug("Created nodes: {}".format(nodes_created))
if not nodes_created:
return
placeholder.data["delete"] = True
nodes_created = self._move_to_placeholder_group(
placeholder, nodes_created
)
placeholder.data["last_created"] = nodes_created
refresh_nodes(nodes_created)
# positioning of the created nodes
min_x, min_y, _, _ = get_extreme_positions(nodes_created)
for node in nodes_created:
xpos = (node.xpos() - min_x) + placeholder_node.xpos()
ypos = (node.ypos() - min_y) + placeholder_node.ypos()
node.setXYpos(xpos, ypos)
refresh_nodes(nodes_created)
# fix the problem of z_order for backdrops
self._fix_z_order(placeholder)
if placeholder.data.get("keep_placeholder"):
self._imprint_siblings(placeholder)
if placeholder.data["nb_children"] == 0:
# save initial nodes positions and dimensions, update them
# and set inputs and outputs of created nodes
if placeholder.data.get("keep_placeholder"):
self._imprint_inits()
self._update_nodes(placeholder, nuke.allNodes(), nodes_created)
self._set_created_connections(placeholder)
elif placeholder.data["siblings"]:
# create copies of placeholder siblings for the new created nodes,
# set their inputs and outputs and update all nodes positions and
# dimensions and siblings names
siblings = get_nodes_by_names(placeholder.data["siblings"])
refresh_nodes(siblings)
copies = self._create_sib_copies(placeholder)
new_nodes = list(copies.values()) # copies nodes
self._update_nodes(new_nodes, nodes_created)
placeholder_node.removeKnob(placeholder_node.knob("siblings"))
new_nodes_name = get_names_from_nodes(new_nodes)
imprint(placeholder_node, {"siblings": new_nodes_name})
self._set_copies_connections(placeholder, copies)
self._update_nodes(
nuke.allNodes(),
new_nodes + nodes_created,
20
)
new_siblings = get_names_from_nodes(new_nodes)
placeholder.data["siblings"] = new_siblings
else:
# if the placeholder doesn't have siblings, the created
# nodes will be placed in a free space
xpointer, ypointer = find_free_space_to_paste_nodes(
nodes_created, direction="bottom", offset=200
)
node = nuke.createNode("NoOp")
reset_selection()
nuke.delete(node)
for node in nodes_created:
xpos = (node.xpos() - min_x) + xpointer
ypos = (node.ypos() - min_y) + ypointer
node.setXYpos(xpos, ypos)
placeholder.data["nb_children"] += 1
reset_selection()
# go back to root group
nuke.root().begin()
def _move_to_placeholder_group(self, placeholder, nodes_created):
"""
opening the placeholder's group and copying created nodes in it.
Returns :
nodes_created (list): the new list of pasted nodes
"""
groups_name = placeholder.data["group_name"]
reset_selection()
select_nodes(nodes_created)
if groups_name:
with node_tempfile() as filepath:
nuke.nodeCopy(filepath)
for node in nuke.selectedNodes():
nuke.delete(node)
group = nuke.toNode(groups_name)
group.begin()
nuke.nodePaste(filepath)
nodes_created = nuke.selectedNodes()
return nodes_created
def _fix_z_order(self, placeholder):
"""Fix the problem of z_order when a backdrop is create."""
nodes_created = placeholder.data["last_created"]
created_backdrops = []
bd_orders = set()
for node in nodes_created:
if isinstance(node, nuke.BackdropNode):
created_backdrops.append(node)
bd_orders.add(node.knob("z_order").getValue())
if not bd_orders:
return
sib_orders = set()
for node_name in placeholder.data["siblings"]:
node = nuke.toNode(node_name)
if isinstance(node, nuke.BackdropNode):
sib_orders.add(node.knob("z_order").getValue())
if not sib_orders:
return
min_order = min(bd_orders)
max_order = max(sib_orders)
for backdrop_node in created_backdrops:
z_order = backdrop_node.knob("z_order").getValue()
backdrop_node.knob("z_order").setValue(
z_order + max_order - min_order + 1)
def _imprint_siblings(self, placeholder):
"""
- add siblings names to placeholder attributes (nodes created with it)
- add Id to the attributes of all the other nodes
"""
created_nodes = placeholder.data["last_created"]
created_nodes_set = set(created_nodes)
for node in created_nodes:
node_knobs = node.knobs()
if (
"is_placeholder" not in node_knobs
or (
"is_placeholder" in node_knobs
and node.knob("is_placeholder").value()
)
):
siblings = list(created_nodes_set - {node})
siblings_name = get_names_from_nodes(siblings)
siblings = {"siblings": siblings_name}
imprint(node, siblings)
def _imprint_inits(self):
"""Add initial positions and dimensions to the attributes"""
for node in nuke.allNodes():
refresh_node(node)
imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()})
node.knob("x_init").setVisible(False)
node.knob("y_init").setVisible(False)
width = node.screenWidth()
height = node.screenHeight()
if "bdwidth" in node.knobs():
imprint(node, {"w_init": width, "h_init": height})
node.knob("w_init").setVisible(False)
node.knob("h_init").setVisible(False)
refresh_node(node)
def _update_nodes(
self, placeholder, nodes, considered_nodes, offset_y=None
):
"""Adjust backdrop nodes dimensions and positions.
Considering some nodes sizes.
Args:
nodes (list): list of nodes to update
considered_nodes (list): list of nodes to consider while updating
positions and dimensions
offset (int): distance between copies
"""
placeholder_node = nuke.toNode(placeholder.scene_identifier)
min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes)
diff_x = diff_y = 0
contained_nodes = [] # for backdrops
if offset_y is None:
width_ph = placeholder_node.screenWidth()
height_ph = placeholder_node.screenHeight()
diff_y = max_y - min_y - height_ph
diff_x = max_x - min_x - width_ph
contained_nodes = [placeholder_node]
min_x = placeholder_node.xpos()
min_y = placeholder_node.ypos()
else:
siblings = get_nodes_by_names(placeholder.data["siblings"])
minX, _, maxX, _ = get_extreme_positions(siblings)
diff_y = max_y - min_y + 20
diff_x = abs(max_x - min_x - maxX + minX)
contained_nodes = considered_nodes
if diff_y <= 0 and diff_x <= 0:
return
for node in nodes:
refresh_node(node)
if (
node == placeholder_node
or node in considered_nodes
):
continue
if (
not isinstance(node, nuke.BackdropNode)
or (
isinstance(node, nuke.BackdropNode)
and not set(contained_nodes) <= set(node.getNodes())
)
):
if offset_y is None and node.xpos() >= min_x:
node.setXpos(node.xpos() + diff_x)
if node.ypos() >= min_y:
node.setYpos(node.ypos() + diff_y)
else:
width = node.screenWidth()
height = node.screenHeight()
node.knob("bdwidth").setValue(width + diff_x)
node.knob("bdheight").setValue(height + diff_y)
refresh_node(node)
def _set_created_connections(self, placeholder):
"""
set inputs and outputs of created nodes"""
placeholder_node = nuke.toNode(placeholder.scene_identifier)
input_node, output_node = get_group_io_nodes(
placeholder.data["last_created"]
)
for node in placeholder_node.dependent():
for idx in range(node.inputs()):
if node.input(idx) == placeholder_node and output_node:
node.setInput(idx, output_node)
for node in placeholder_node.dependencies():
for idx in range(placeholder_node.inputs()):
if placeholder_node.input(idx) == node and input_node:
input_node.setInput(0, node)
def _create_sib_copies(self, placeholder):
""" creating copies of the palce_holder siblings (the ones who were
created with it) for the new nodes added
Returns :
copies (dict) : with copied nodes names and their copies
"""
copies = {}
siblings = get_nodes_by_names(placeholder.data["siblings"])
for node in siblings:
new_node = duplicate_node(node)
x_init = int(new_node.knob("x_init").getValue())
y_init = int(new_node.knob("y_init").getValue())
new_node.setXYpos(x_init, y_init)
if isinstance(new_node, nuke.BackdropNode):
w_init = new_node.knob("w_init").getValue()
h_init = new_node.knob("h_init").getValue()
new_node.knob("bdwidth").setValue(w_init)
new_node.knob("bdheight").setValue(h_init)
refresh_node(node)
if "repre_id" in node.knobs().keys():
node.removeKnob(node.knob("repre_id"))
copies[node.name()] = new_node
return copies
def _set_copies_connections(self, placeholder, copies):
"""Set inputs and outputs of the copies.
Args:
copies (dict): Copied nodes by their names.
"""
last_input, last_output = get_group_io_nodes(
placeholder.data["last_created"]
)
siblings = get_nodes_by_names(placeholder.data["siblings"])
siblings_input, siblings_output = get_group_io_nodes(siblings)
copy_input = copies[siblings_input.name()]
copy_output = copies[siblings_output.name()]
for node_init in siblings:
if node_init == siblings_output:
continue
node_copy = copies[node_init.name()]
for node in node_init.dependent():
for idx in range(node.inputs()):
if node.input(idx) != node_init:
continue
if node in siblings:
copies[node.name()].setInput(idx, node_copy)
else:
last_input.setInput(0, node_copy)
for node in node_init.dependencies():
for idx in range(node_init.inputs()):
if node_init.input(idx) != node:
continue
if node_init == siblings_input:
copy_input.setInput(idx, node)
elif node in siblings:
node_copy.setInput(idx, copies[node.name()])
else:
node_copy.setInput(idx, last_output)
siblings_input.setInput(0, copy_output)
def build_workfile_template(*args, **kwargs):
builder = NukeTemplateBuilder(registered_host())
builder.build_template(*args, **kwargs)

View file

@ -0,0 +1,428 @@
import nuke
from ayon_core.pipeline.workfile.workfile_template_builder import (
CreatePlaceholderItem,
PlaceholderCreateMixin,
)
from ayon_core.hosts.nuke.api.lib import (
find_free_space_to_paste_nodes,
get_extreme_positions,
get_group_io_nodes,
imprint,
refresh_node,
refresh_nodes,
reset_selection,
get_names_from_nodes,
get_nodes_by_names,
select_nodes,
duplicate_node,
node_tempfile,
)
from ayon_core.hosts.nuke.api.workfile_template_builder import (
NukePlaceholderPlugin
)
class NukePlaceholderCreatePlugin(
NukePlaceholderPlugin, PlaceholderCreateMixin
):
identifier = "nuke.create"
label = "Nuke create"
def _parse_placeholder_node_data(self, node):
placeholder_data = super(
NukePlaceholderCreatePlugin, self
)._parse_placeholder_node_data(node)
node_knobs = node.knobs()
nb_children = 0
if "nb_children" in node_knobs:
nb_children = int(node_knobs["nb_children"].getValue())
placeholder_data["nb_children"] = nb_children
siblings = []
if "siblings" in node_knobs:
siblings = node_knobs["siblings"].values()
placeholder_data["siblings"] = siblings
node_full_name = node.fullName()
placeholder_data["group_name"] = node_full_name.rpartition(".")[0]
placeholder_data["last_loaded"] = []
placeholder_data["delete"] = False
return placeholder_data
def _before_instance_create(self, placeholder):
placeholder.data["nodes_init"] = nuke.allNodes()
def collect_placeholders(self):
output = []
scene_placeholders = self._collect_scene_placeholders()
for node_name, node in scene_placeholders.items():
plugin_identifier_knob = node.knob("plugin_identifier")
if (
plugin_identifier_knob is None
or plugin_identifier_knob.getValue() != self.identifier
):
continue
placeholder_data = self._parse_placeholder_node_data(node)
output.append(
CreatePlaceholderItem(node_name, placeholder_data, self)
)
return output
def populate_placeholder(self, placeholder):
self.populate_create_placeholder(placeholder)
def repopulate_placeholder(self, placeholder):
self.populate_create_placeholder(placeholder)
def get_placeholder_options(self, options=None):
return self.get_create_plugin_options(options)
def post_placeholder_process(self, placeholder, failed):
"""Cleanup placeholder after load of its corresponding representations.
Args:
placeholder (PlaceholderItem): Item which was just used to load
representation.
failed (bool): Loading of representation failed.
"""
# deselect all selected nodes
placeholder_node = nuke.toNode(placeholder.scene_identifier)
# getting the latest nodes added
nodes_init = placeholder.data["nodes_init"]
nodes_created = list(set(nuke.allNodes()) - set(nodes_init))
self.log.debug("Created nodes: {}".format(nodes_created))
if not nodes_created:
return
placeholder.data["delete"] = True
nodes_created = self._move_to_placeholder_group(
placeholder, nodes_created
)
placeholder.data["last_created"] = nodes_created
refresh_nodes(nodes_created)
# positioning of the created nodes
min_x, min_y, _, _ = get_extreme_positions(nodes_created)
for node in nodes_created:
xpos = (node.xpos() - min_x) + placeholder_node.xpos()
ypos = (node.ypos() - min_y) + placeholder_node.ypos()
node.setXYpos(xpos, ypos)
refresh_nodes(nodes_created)
# fix the problem of z_order for backdrops
self._fix_z_order(placeholder)
if placeholder.data.get("keep_placeholder"):
self._imprint_siblings(placeholder)
if placeholder.data["nb_children"] == 0:
# save initial nodes positions and dimensions, update them
# and set inputs and outputs of created nodes
if placeholder.data.get("keep_placeholder"):
self._imprint_inits()
self._update_nodes(placeholder, nuke.allNodes(), nodes_created)
self._set_created_connections(placeholder)
elif placeholder.data["siblings"]:
# create copies of placeholder siblings for the new created nodes,
# set their inputs and outputs and update all nodes positions and
# dimensions and siblings names
siblings = get_nodes_by_names(placeholder.data["siblings"])
refresh_nodes(siblings)
copies = self._create_sib_copies(placeholder)
new_nodes = list(copies.values()) # copies nodes
self._update_nodes(new_nodes, nodes_created)
placeholder_node.removeKnob(placeholder_node.knob("siblings"))
new_nodes_name = get_names_from_nodes(new_nodes)
imprint(placeholder_node, {"siblings": new_nodes_name})
self._set_copies_connections(placeholder, copies)
self._update_nodes(
nuke.allNodes(),
new_nodes + nodes_created,
20
)
new_siblings = get_names_from_nodes(new_nodes)
placeholder.data["siblings"] = new_siblings
else:
# if the placeholder doesn't have siblings, the created
# nodes will be placed in a free space
xpointer, ypointer = find_free_space_to_paste_nodes(
nodes_created, direction="bottom", offset=200
)
node = nuke.createNode("NoOp")
reset_selection()
nuke.delete(node)
for node in nodes_created:
xpos = (node.xpos() - min_x) + xpointer
ypos = (node.ypos() - min_y) + ypointer
node.setXYpos(xpos, ypos)
placeholder.data["nb_children"] += 1
reset_selection()
# go back to root group
nuke.root().begin()
def _move_to_placeholder_group(self, placeholder, nodes_created):
"""
opening the placeholder's group and copying created nodes in it.
Returns :
nodes_created (list): the new list of pasted nodes
"""
groups_name = placeholder.data["group_name"]
reset_selection()
select_nodes(nodes_created)
if groups_name:
with node_tempfile() as filepath:
nuke.nodeCopy(filepath)
for node in nuke.selectedNodes():
nuke.delete(node)
group = nuke.toNode(groups_name)
group.begin()
nuke.nodePaste(filepath)
nodes_created = nuke.selectedNodes()
return nodes_created
def _fix_z_order(self, placeholder):
"""Fix the problem of z_order when a backdrop is create."""
nodes_created = placeholder.data["last_created"]
created_backdrops = []
bd_orders = set()
for node in nodes_created:
if isinstance(node, nuke.BackdropNode):
created_backdrops.append(node)
bd_orders.add(node.knob("z_order").getValue())
if not bd_orders:
return
sib_orders = set()
for node_name in placeholder.data["siblings"]:
node = nuke.toNode(node_name)
if isinstance(node, nuke.BackdropNode):
sib_orders.add(node.knob("z_order").getValue())
if not sib_orders:
return
min_order = min(bd_orders)
max_order = max(sib_orders)
for backdrop_node in created_backdrops:
z_order = backdrop_node.knob("z_order").getValue()
backdrop_node.knob("z_order").setValue(
z_order + max_order - min_order + 1)
def _imprint_siblings(self, placeholder):
"""
- add siblings names to placeholder attributes (nodes created with it)
- add Id to the attributes of all the other nodes
"""
created_nodes = placeholder.data["last_created"]
created_nodes_set = set(created_nodes)
for node in created_nodes:
node_knobs = node.knobs()
if (
"is_placeholder" not in node_knobs
or (
"is_placeholder" in node_knobs
and node.knob("is_placeholder").value()
)
):
siblings = list(created_nodes_set - {node})
siblings_name = get_names_from_nodes(siblings)
siblings = {"siblings": siblings_name}
imprint(node, siblings)
def _imprint_inits(self):
"""Add initial positions and dimensions to the attributes"""
for node in nuke.allNodes():
refresh_node(node)
imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()})
node.knob("x_init").setVisible(False)
node.knob("y_init").setVisible(False)
width = node.screenWidth()
height = node.screenHeight()
if "bdwidth" in node.knobs():
imprint(node, {"w_init": width, "h_init": height})
node.knob("w_init").setVisible(False)
node.knob("h_init").setVisible(False)
refresh_node(node)
def _update_nodes(
self, placeholder, nodes, considered_nodes, offset_y=None
):
"""Adjust backdrop nodes dimensions and positions.
Considering some nodes sizes.
Args:
nodes (list): list of nodes to update
considered_nodes (list): list of nodes to consider while updating
positions and dimensions
offset (int): distance between copies
"""
placeholder_node = nuke.toNode(placeholder.scene_identifier)
min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes)
diff_x = diff_y = 0
contained_nodes = [] # for backdrops
if offset_y is None:
width_ph = placeholder_node.screenWidth()
height_ph = placeholder_node.screenHeight()
diff_y = max_y - min_y - height_ph
diff_x = max_x - min_x - width_ph
contained_nodes = [placeholder_node]
min_x = placeholder_node.xpos()
min_y = placeholder_node.ypos()
else:
siblings = get_nodes_by_names(placeholder.data["siblings"])
minX, _, maxX, _ = get_extreme_positions(siblings)
diff_y = max_y - min_y + 20
diff_x = abs(max_x - min_x - maxX + minX)
contained_nodes = considered_nodes
if diff_y <= 0 and diff_x <= 0:
return
for node in nodes:
refresh_node(node)
if (
node == placeholder_node
or node in considered_nodes
):
continue
if (
not isinstance(node, nuke.BackdropNode)
or (
isinstance(node, nuke.BackdropNode)
and not set(contained_nodes) <= set(node.getNodes())
)
):
if offset_y is None and node.xpos() >= min_x:
node.setXpos(node.xpos() + diff_x)
if node.ypos() >= min_y:
node.setYpos(node.ypos() + diff_y)
else:
width = node.screenWidth()
height = node.screenHeight()
node.knob("bdwidth").setValue(width + diff_x)
node.knob("bdheight").setValue(height + diff_y)
refresh_node(node)
def _set_created_connections(self, placeholder):
"""
set inputs and outputs of created nodes"""
placeholder_node = nuke.toNode(placeholder.scene_identifier)
input_node, output_node = get_group_io_nodes(
placeholder.data["last_created"]
)
for node in placeholder_node.dependent():
for idx in range(node.inputs()):
if node.input(idx) == placeholder_node and output_node:
node.setInput(idx, output_node)
for node in placeholder_node.dependencies():
for idx in range(placeholder_node.inputs()):
if placeholder_node.input(idx) == node and input_node:
input_node.setInput(0, node)
def _create_sib_copies(self, placeholder):
""" creating copies of the palce_holder siblings (the ones who were
created with it) for the new nodes added
Returns :
copies (dict) : with copied nodes names and their copies
"""
copies = {}
siblings = get_nodes_by_names(placeholder.data["siblings"])
for node in siblings:
new_node = duplicate_node(node)
x_init = int(new_node.knob("x_init").getValue())
y_init = int(new_node.knob("y_init").getValue())
new_node.setXYpos(x_init, y_init)
if isinstance(new_node, nuke.BackdropNode):
w_init = new_node.knob("w_init").getValue()
h_init = new_node.knob("h_init").getValue()
new_node.knob("bdwidth").setValue(w_init)
new_node.knob("bdheight").setValue(h_init)
refresh_node(node)
if "repre_id" in node.knobs().keys():
node.removeKnob(node.knob("repre_id"))
copies[node.name()] = new_node
return copies
def _set_copies_connections(self, placeholder, copies):
"""Set inputs and outputs of the copies.
Args:
copies (dict): Copied nodes by their names.
"""
last_input, last_output = get_group_io_nodes(
placeholder.data["last_created"]
)
siblings = get_nodes_by_names(placeholder.data["siblings"])
siblings_input, siblings_output = get_group_io_nodes(siblings)
copy_input = copies[siblings_input.name()]
copy_output = copies[siblings_output.name()]
for node_init in siblings:
if node_init == siblings_output:
continue
node_copy = copies[node_init.name()]
for node in node_init.dependent():
for idx in range(node.inputs()):
if node.input(idx) != node_init:
continue
if node in siblings:
copies[node.name()].setInput(idx, node_copy)
else:
last_input.setInput(0, node_copy)
for node in node_init.dependencies():
for idx in range(node_init.inputs()):
if node_init.input(idx) != node:
continue
if node_init == siblings_input:
copy_input.setInput(idx, node)
elif node in siblings:
node_copy.setInput(idx, copies[node.name()])
else:
node_copy.setInput(idx, last_output)
siblings_input.setInput(0, copy_output)

View file

@ -0,0 +1,455 @@
import nuke
from ayon_core.pipeline.workfile.workfile_template_builder import (
LoadPlaceholderItem,
PlaceholderLoadMixin,
)
from ayon_core.hosts.nuke.api.lib import (
find_free_space_to_paste_nodes,
get_extreme_positions,
get_group_io_nodes,
imprint,
refresh_node,
refresh_nodes,
reset_selection,
get_names_from_nodes,
get_nodes_by_names,
select_nodes,
duplicate_node,
node_tempfile,
)
from ayon_core.hosts.nuke.api.workfile_template_builder import (
NukePlaceholderPlugin
)
class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin):
identifier = "nuke.load"
label = "Nuke load"
def _parse_placeholder_node_data(self, node):
placeholder_data = super(
NukePlaceholderLoadPlugin, self
)._parse_placeholder_node_data(node)
node_knobs = node.knobs()
nb_children = 0
if "nb_children" in node_knobs:
nb_children = int(node_knobs["nb_children"].getValue())
placeholder_data["nb_children"] = nb_children
siblings = []
if "siblings" in node_knobs:
siblings = node_knobs["siblings"].values()
placeholder_data["siblings"] = siblings
node_full_name = node.fullName()
placeholder_data["group_name"] = node_full_name.rpartition(".")[0]
placeholder_data["last_loaded"] = []
placeholder_data["delete"] = False
return placeholder_data
def _get_loaded_repre_ids(self):
loaded_representation_ids = self.builder.get_shared_populate_data(
"loaded_representation_ids"
)
if loaded_representation_ids is None:
loaded_representation_ids = set()
for node in nuke.allNodes():
if "repre_id" in node.knobs():
loaded_representation_ids.add(
node.knob("repre_id").getValue()
)
self.builder.set_shared_populate_data(
"loaded_representation_ids", loaded_representation_ids
)
return loaded_representation_ids
def _before_placeholder_load(self, placeholder):
placeholder.data["nodes_init"] = nuke.allNodes()
def _before_repre_load(self, placeholder, representation):
placeholder.data["last_repre_id"] = representation["id"]
def collect_placeholders(self):
output = []
scene_placeholders = self._collect_scene_placeholders()
for node_name, node in scene_placeholders.items():
plugin_identifier_knob = node.knob("plugin_identifier")
if (
plugin_identifier_knob is None
or plugin_identifier_knob.getValue() != self.identifier
):
continue
placeholder_data = self._parse_placeholder_node_data(node)
# TODO do data validations and maybe updgrades if are invalid
output.append(
LoadPlaceholderItem(node_name, placeholder_data, self)
)
return output
def populate_placeholder(self, placeholder):
self.populate_load_placeholder(placeholder)
def repopulate_placeholder(self, placeholder):
repre_ids = self._get_loaded_repre_ids()
self.populate_load_placeholder(placeholder, repre_ids)
def get_placeholder_options(self, options=None):
return self.get_load_plugin_options(options)
def post_placeholder_process(self, placeholder, failed):
"""Cleanup placeholder after load of its corresponding representations.
Args:
placeholder (PlaceholderItem): Item which was just used to load
representation.
failed (bool): Loading of representation failed.
"""
# deselect all selected nodes
placeholder_node = nuke.toNode(placeholder.scene_identifier)
# getting the latest nodes added
# TODO get from shared populate data!
nodes_init = placeholder.data["nodes_init"]
nodes_loaded = list(set(nuke.allNodes()) - set(nodes_init))
self.log.debug("Loaded nodes: {}".format(nodes_loaded))
if not nodes_loaded:
return
placeholder.data["delete"] = True
nodes_loaded = self._move_to_placeholder_group(
placeholder, nodes_loaded
)
placeholder.data["last_loaded"] = nodes_loaded
refresh_nodes(nodes_loaded)
# positioning of the loaded nodes
min_x, min_y, _, _ = get_extreme_positions(nodes_loaded)
for node in nodes_loaded:
xpos = (node.xpos() - min_x) + placeholder_node.xpos()
ypos = (node.ypos() - min_y) + placeholder_node.ypos()
node.setXYpos(xpos, ypos)
refresh_nodes(nodes_loaded)
# fix the problem of z_order for backdrops
self._fix_z_order(placeholder)
if placeholder.data.get("keep_placeholder"):
self._imprint_siblings(placeholder)
if placeholder.data["nb_children"] == 0:
# save initial nodes positions and dimensions, update them
# and set inputs and outputs of loaded nodes
if placeholder.data.get("keep_placeholder"):
self._imprint_inits()
self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded)
self._set_loaded_connections(placeholder)
elif placeholder.data["siblings"]:
# create copies of placeholder siblings for the new loaded nodes,
# set their inputs and outputs and update all nodes positions and
# dimensions and siblings names
siblings = get_nodes_by_names(placeholder.data["siblings"])
refresh_nodes(siblings)
copies = self._create_sib_copies(placeholder)
new_nodes = list(copies.values()) # copies nodes
self._update_nodes(new_nodes, nodes_loaded)
placeholder_node.removeKnob(placeholder_node.knob("siblings"))
new_nodes_name = get_names_from_nodes(new_nodes)
imprint(placeholder_node, {"siblings": new_nodes_name})
self._set_copies_connections(placeholder, copies)
self._update_nodes(
nuke.allNodes(),
new_nodes + nodes_loaded,
20
)
new_siblings = get_names_from_nodes(new_nodes)
placeholder.data["siblings"] = new_siblings
else:
# if the placeholder doesn't have siblings, the loaded
# nodes will be placed in a free space
xpointer, ypointer = find_free_space_to_paste_nodes(
nodes_loaded, direction="bottom", offset=200
)
node = nuke.createNode("NoOp")
reset_selection()
nuke.delete(node)
for node in nodes_loaded:
xpos = (node.xpos() - min_x) + xpointer
ypos = (node.ypos() - min_y) + ypointer
node.setXYpos(xpos, ypos)
placeholder.data["nb_children"] += 1
reset_selection()
# go back to root group
nuke.root().begin()
def _move_to_placeholder_group(self, placeholder, nodes_loaded):
"""
opening the placeholder's group and copying loaded nodes in it.
Returns :
nodes_loaded (list): the new list of pasted nodes
"""
groups_name = placeholder.data["group_name"]
reset_selection()
select_nodes(nodes_loaded)
if groups_name:
with node_tempfile() as filepath:
nuke.nodeCopy(filepath)
for node in nuke.selectedNodes():
nuke.delete(node)
group = nuke.toNode(groups_name)
group.begin()
nuke.nodePaste(filepath)
nodes_loaded = nuke.selectedNodes()
return nodes_loaded
def _fix_z_order(self, placeholder):
"""Fix the problem of z_order when a backdrop is loaded."""
nodes_loaded = placeholder.data["last_loaded"]
loaded_backdrops = []
bd_orders = set()
for node in nodes_loaded:
if isinstance(node, nuke.BackdropNode):
loaded_backdrops.append(node)
bd_orders.add(node.knob("z_order").getValue())
if not bd_orders:
return
sib_orders = set()
for node_name in placeholder.data["siblings"]:
node = nuke.toNode(node_name)
if isinstance(node, nuke.BackdropNode):
sib_orders.add(node.knob("z_order").getValue())
if not sib_orders:
return
min_order = min(bd_orders)
max_order = max(sib_orders)
for backdrop_node in loaded_backdrops:
z_order = backdrop_node.knob("z_order").getValue()
backdrop_node.knob("z_order").setValue(
z_order + max_order - min_order + 1)
def _imprint_siblings(self, placeholder):
"""
- add siblings names to placeholder attributes (nodes loaded with it)
- add Id to the attributes of all the other nodes
"""
loaded_nodes = placeholder.data["last_loaded"]
loaded_nodes_set = set(loaded_nodes)
data = {"repre_id": str(placeholder.data["last_repre_id"])}
for node in loaded_nodes:
node_knobs = node.knobs()
if "builder_type" not in node_knobs:
# save the id of representation for all imported nodes
imprint(node, data)
node.knob("repre_id").setVisible(False)
refresh_node(node)
continue
if (
"is_placeholder" not in node_knobs
or (
"is_placeholder" in node_knobs
and node.knob("is_placeholder").value()
)
):
siblings = list(loaded_nodes_set - {node})
siblings_name = get_names_from_nodes(siblings)
siblings = {"siblings": siblings_name}
imprint(node, siblings)
def _imprint_inits(self):
"""Add initial positions and dimensions to the attributes"""
for node in nuke.allNodes():
refresh_node(node)
imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()})
node.knob("x_init").setVisible(False)
node.knob("y_init").setVisible(False)
width = node.screenWidth()
height = node.screenHeight()
if "bdwidth" in node.knobs():
imprint(node, {"w_init": width, "h_init": height})
node.knob("w_init").setVisible(False)
node.knob("h_init").setVisible(False)
refresh_node(node)
def _update_nodes(
self, placeholder, nodes, considered_nodes, offset_y=None
):
"""Adjust backdrop nodes dimensions and positions.
Considering some nodes sizes.
Args:
nodes (list): list of nodes to update
considered_nodes (list): list of nodes to consider while updating
positions and dimensions
offset (int): distance between copies
"""
placeholder_node = nuke.toNode(placeholder.scene_identifier)
min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes)
diff_x = diff_y = 0
contained_nodes = [] # for backdrops
if offset_y is None:
width_ph = placeholder_node.screenWidth()
height_ph = placeholder_node.screenHeight()
diff_y = max_y - min_y - height_ph
diff_x = max_x - min_x - width_ph
contained_nodes = [placeholder_node]
min_x = placeholder_node.xpos()
min_y = placeholder_node.ypos()
else:
siblings = get_nodes_by_names(placeholder.data["siblings"])
minX, _, maxX, _ = get_extreme_positions(siblings)
diff_y = max_y - min_y + 20
diff_x = abs(max_x - min_x - maxX + minX)
contained_nodes = considered_nodes
if diff_y <= 0 and diff_x <= 0:
return
for node in nodes:
refresh_node(node)
if (
node == placeholder_node
or node in considered_nodes
):
continue
if (
not isinstance(node, nuke.BackdropNode)
or (
isinstance(node, nuke.BackdropNode)
and not set(contained_nodes) <= set(node.getNodes())
)
):
if offset_y is None and node.xpos() >= min_x:
node.setXpos(node.xpos() + diff_x)
if node.ypos() >= min_y:
node.setYpos(node.ypos() + diff_y)
else:
width = node.screenWidth()
height = node.screenHeight()
node.knob("bdwidth").setValue(width + diff_x)
node.knob("bdheight").setValue(height + diff_y)
refresh_node(node)
def _set_loaded_connections(self, placeholder):
"""
set inputs and outputs of loaded nodes"""
placeholder_node = nuke.toNode(placeholder.scene_identifier)
input_node, output_node = get_group_io_nodes(
placeholder.data["last_loaded"]
)
for node in placeholder_node.dependent():
for idx in range(node.inputs()):
if node.input(idx) == placeholder_node and output_node:
node.setInput(idx, output_node)
for node in placeholder_node.dependencies():
for idx in range(placeholder_node.inputs()):
if placeholder_node.input(idx) == node and input_node:
input_node.setInput(0, node)
def _create_sib_copies(self, placeholder):
""" creating copies of the palce_holder siblings (the ones who were
loaded with it) for the new nodes added
Returns :
copies (dict) : with copied nodes names and their copies
"""
copies = {}
siblings = get_nodes_by_names(placeholder.data["siblings"])
for node in siblings:
new_node = duplicate_node(node)
x_init = int(new_node.knob("x_init").getValue())
y_init = int(new_node.knob("y_init").getValue())
new_node.setXYpos(x_init, y_init)
if isinstance(new_node, nuke.BackdropNode):
w_init = new_node.knob("w_init").getValue()
h_init = new_node.knob("h_init").getValue()
new_node.knob("bdwidth").setValue(w_init)
new_node.knob("bdheight").setValue(h_init)
refresh_node(node)
if "repre_id" in node.knobs().keys():
node.removeKnob(node.knob("repre_id"))
copies[node.name()] = new_node
return copies
def _set_copies_connections(self, placeholder, copies):
"""Set inputs and outputs of the copies.
Args:
copies (dict): Copied nodes by their names.
"""
last_input, last_output = get_group_io_nodes(
placeholder.data["last_loaded"]
)
siblings = get_nodes_by_names(placeholder.data["siblings"])
siblings_input, siblings_output = get_group_io_nodes(siblings)
copy_input = copies[siblings_input.name()]
copy_output = copies[siblings_output.name()]
for node_init in siblings:
if node_init == siblings_output:
continue
node_copy = copies[node_init.name()]
for node in node_init.dependent():
for idx in range(node.inputs()):
if node.input(idx) != node_init:
continue
if node in siblings:
copies[node.name()].setInput(idx, node_copy)
else:
last_input.setInput(0, node_copy)
for node in node_init.dependencies():
for idx in range(node_init.inputs()):
if node_init.input(idx) != node:
continue
if node_init == siblings_input:
copy_input.setInput(idx, node)
elif node in siblings:
node_copy.setInput(idx, copies[node.name()])
else:
node_copy.setInput(idx, last_output)
siblings_input.setInput(0, copy_output)

View file

@ -586,7 +586,6 @@ def prompt_new_file_with_mesh(mesh_filepath):
# TODO: find a way to improve the process event to
# load more complicated mesh
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 3000)
file_dialog.done(file_dialog.Accepted)
app.processEvents(QtCore.QEventLoop.AllEvents)
@ -606,7 +605,7 @@ def prompt_new_file_with_mesh(mesh_filepath):
mesh_select.setVisible(False)
# Ensure UI is visually up-to-date
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents)
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 8000)
# Trigger the 'select file' dialog to set the path and have the
# new file dialog to use the path.
@ -623,8 +622,6 @@ def prompt_new_file_with_mesh(mesh_filepath):
"Failed to set mesh path with the prompt dialog:"
f"{mesh_filepath}\n\n"
"Creating new project directly with the mesh path instead.")
else:
dialog.done(dialog.Accepted)
new_action = _get_new_project_action()
if not new_action:

View file

@ -1,3 +1,5 @@
import copy
from qtpy import QtWidgets, QtCore
from ayon_core.pipeline import (
load,
get_representation_path,
@ -8,10 +10,133 @@ from ayon_core.hosts.substancepainter.api.pipeline import (
set_container_metadata,
remove_container_metadata
)
from ayon_core.hosts.substancepainter.api.lib import prompt_new_file_with_mesh
import substance_painter.project
import qargparse
def _convert(substance_attr):
"""Return Substance Painter Python API Project attribute from string.
This converts a string like "ProjectWorkflow.Default" to for example
the Substance Painter Python API equivalent object, like:
`substance_painter.project.ProjectWorkflow.Default`
Args:
substance_attr (str): The `substance_painter.project` attribute,
for example "ProjectWorkflow.Default"
Returns:
Any: Substance Python API object of the project attribute.
Raises:
ValueError: If attribute does not exist on the
`substance_painter.project` python api.
"""
root = substance_painter.project
for attr in substance_attr.split("."):
root = getattr(root, attr, None)
if root is None:
raise ValueError(
"Substance Painter project attribute"
f" does not exist: {substance_attr}")
return root
def get_template_by_name(name: str, templates: list[dict]) -> dict:
return next(
template for template in templates
if template["name"] == name
)
class SubstanceProjectConfigurationWindow(QtWidgets.QDialog):
"""The pop-up dialog allows users to choose material
duplicate options for importing Max objects when updating
or switching assets.
"""
def __init__(self, project_templates):
super(SubstanceProjectConfigurationWindow, self).__init__()
self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint)
self.configuration = None
self.template_names = [template["name"] for template
in project_templates]
self.project_templates = project_templates
self.widgets = {
"label": QtWidgets.QLabel(
"Select your template for project configuration"),
"template_options": QtWidgets.QComboBox(),
"import_cameras": QtWidgets.QCheckBox("Import Cameras"),
"preserve_strokes": QtWidgets.QCheckBox("Preserve Strokes"),
"clickbox": QtWidgets.QWidget(),
"combobox": QtWidgets.QWidget(),
"buttons": QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok
| QtWidgets.QDialogButtonBox.Cancel)
}
self.widgets["template_options"].addItems(self.template_names)
template_name = self.widgets["template_options"].currentText()
self._update_to_match_template(template_name)
# Build clickboxes
layout = QtWidgets.QHBoxLayout(self.widgets["clickbox"])
layout.addWidget(self.widgets["import_cameras"])
layout.addWidget(self.widgets["preserve_strokes"])
# Build combobox
layout = QtWidgets.QHBoxLayout(self.widgets["combobox"])
layout.addWidget(self.widgets["template_options"])
# Build buttons
layout = QtWidgets.QHBoxLayout(self.widgets["buttons"])
# Build layout.
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(self.widgets["label"])
layout.addWidget(self.widgets["combobox"])
layout.addWidget(self.widgets["clickbox"])
layout.addWidget(self.widgets["buttons"])
self.widgets["template_options"].currentTextChanged.connect(
self._update_to_match_template)
self.widgets["buttons"].accepted.connect(self.on_accept)
self.widgets["buttons"].rejected.connect(self.on_reject)
def on_accept(self):
self.configuration = self.get_project_configuration()
self.close()
def on_reject(self):
self.close()
def _update_to_match_template(self, template_name):
template = get_template_by_name(template_name, self.project_templates)
self.widgets["import_cameras"].setChecked(template["import_cameras"])
self.widgets["preserve_strokes"].setChecked(
template["preserve_strokes"])
def get_project_configuration(self):
templates = self.project_templates
template_name = self.widgets["template_options"].currentText()
template = get_template_by_name(template_name, templates)
template = copy.deepcopy(template) # do not edit the original
template["import_cameras"] = self.widgets["import_cameras"].isChecked()
template["preserve_strokes"] = (
self.widgets["preserve_strokes"].isChecked()
)
for key in ["normal_map_format",
"project_workflow",
"tangent_space_mode"]:
template[key] = _convert(template[key])
return template
@classmethod
def prompt(cls, templates):
dialog = cls(templates)
dialog.exec_()
configuration = dialog.configuration
dialog.deleteLater()
return configuration
class SubstanceLoadProjectMesh(load.LoaderPlugin):
@ -25,48 +150,35 @@ class SubstanceLoadProjectMesh(load.LoaderPlugin):
icon = "code-fork"
color = "orange"
options = [
qargparse.Boolean(
"preserve_strokes",
default=True,
help="Preserve strokes positions on mesh.\n"
"(only relevant when loading into existing project)"
),
qargparse.Boolean(
"import_cameras",
default=True,
help="Import cameras from the mesh file."
)
]
# Defined via settings
project_templates = []
def load(self, context, name, namespace, data):
def load(self, context, name, namespace, options=None):
# Get user inputs
import_cameras = data.get("import_cameras", True)
preserve_strokes = data.get("preserve_strokes", True)
sp_settings = substance_painter.project.Settings(
import_cameras=import_cameras
)
result = SubstanceProjectConfigurationWindow.prompt(
self.project_templates)
if not result:
# cancelling loader action
return
if not substance_painter.project.is_open():
# Allow to 'initialize' a new project
path = self.filepath_from_context(context)
# TODO: improve the prompt dialog function to not
# only works for simple polygon scene
result = prompt_new_file_with_mesh(mesh_filepath=path)
if not result:
self.log.info("User cancelled new project prompt."
"Creating new project directly from"
" Substance Painter API Instead.")
settings = substance_painter.project.create(
mesh_file_path=path, settings=sp_settings
)
sp_settings = substance_painter.project.Settings(
import_cameras=result["import_cameras"],
normal_map_format=result["normal_map_format"],
project_workflow=result["project_workflow"],
tangent_space_mode=result["tangent_space_mode"],
default_texture_resolution=result["default_texture_resolution"]
)
settings = substance_painter.project.create(
mesh_file_path=path, settings=sp_settings
)
else:
# Reload the mesh
settings = substance_painter.project.MeshReloadingSettings(
import_cameras=import_cameras,
preserve_strokes=preserve_strokes
)
import_cameras=result["import_cameras"],
preserve_strokes=result["preserve_strokes"])
def on_mesh_reload(status: substance_painter.project.ReloadMeshStatus): # noqa
if status == substance_painter.project.ReloadMeshStatus.SUCCESS: # noqa
@ -92,7 +204,7 @@ class SubstanceLoadProjectMesh(load.LoaderPlugin):
# from the user's original choice. We don't store 'preserve_strokes'
# as we always preserve strokes on updates.
container["options"] = {
"import_cameras": import_cameras,
"import_cameras": result["import_cameras"],
}
set_container_metadata(project_mesh_object_name, container)

View file

@ -1,5 +1,6 @@
import os
from pathlib import Path
from ayon_core.lib import get_ayon_launcher_args
from ayon_core.lib.execute import run_detached_process
from ayon_core.addon import (
@ -57,3 +58,62 @@ def launch():
from ayon_core.tools import traypublisher
traypublisher.main()
@cli_main.command()
@click_wrap.option(
"--filepath",
help="Full path to CSV file with data",
type=str,
required=True
)
@click_wrap.option(
"--project",
help="Project name in which the context will be used",
type=str,
required=True
)
@click_wrap.option(
"--folder-path",
help="Asset name in which the context will be used",
type=str,
required=True
)
@click_wrap.option(
"--task",
help="Task name under Asset in which the context will be used",
type=str,
required=False
)
@click_wrap.option(
"--ignore-validators",
help="Option to ignore validators",
type=bool,
is_flag=True,
required=False
)
def ingestcsv(
filepath,
project,
folder_path,
task,
ignore_validators
):
"""Ingest CSV file into project.
This command will ingest CSV file into project. CSV file must be in
specific format. See documentation for more information.
"""
from .csv_publish import csvpublish
# use Path to check if csv_filepath exists
if not Path(filepath).exists():
raise FileNotFoundError(f"File {filepath} does not exist.")
csvpublish(
filepath,
project,
folder_path,
task,
ignore_validators
)

View file

@ -0,0 +1,86 @@
import os
import pyblish.api
import pyblish.util
from ayon_api import get_folder_by_path, get_task_by_name
from ayon_core.lib.attribute_definitions import FileDefItem
from ayon_core.pipeline import install_host
from ayon_core.pipeline.create import CreateContext
from ayon_core.hosts.traypublisher.api import TrayPublisherHost
def csvpublish(
filepath,
project_name,
folder_path,
task_name=None,
ignore_validators=False
):
"""Publish CSV file.
Args:
filepath (str): Path to CSV file.
project_name (str): Project name.
folder_path (str): Folder path.
task_name (Optional[str]): Task name.
ignore_validators (Optional[bool]): Option to ignore validators.
"""
# initialization of host
host = TrayPublisherHost()
install_host(host)
# setting host context into project
host.set_project_name(project_name)
# form precreate data with field values
file_field = FileDefItem.from_paths([filepath], False).pop().to_dict()
precreate_data = {
"csv_filepath_data": file_field,
}
# create context initialization
create_context = CreateContext(host, headless=True)
folder_entity = get_folder_by_path(
project_name,
folder_path=folder_path,
)
if not folder_entity:
ValueError(
f"Folder path '{folder_path}' doesn't "
f"exists at project '{project_name}'."
)
task_entity = get_task_by_name(
project_name,
folder_entity["id"],
task_name,
)
if not task_entity:
ValueError(
f"Task name '{task_name}' doesn't "
f"exists at folder '{folder_path}'."
)
create_context.create(
"io.ayon.creators.traypublisher.csv_ingest",
"Main",
folder_entity=folder_entity,
task_entity=task_entity,
pre_create_data=precreate_data,
)
# publishing context initialization
pyblish_context = pyblish.api.Context()
pyblish_context.data["create_context"] = create_context
# redefine targets (skip 'local' to disable validators)
if ignore_validators:
targets = ["default", "ingest"]
# publishing
pyblish.util.publish(context=pyblish_context, targets=targets)

View file

@ -0,0 +1,741 @@
import os
import re
import csv
import clique
from io import StringIO
from copy import deepcopy, copy
from ayon_api import get_folder_by_path, get_task_by_name
from ayon_core.pipeline.create import get_product_name
from ayon_core.pipeline import CreatedInstance
from ayon_core.lib import FileDef, BoolDef
from ayon_core.lib.transcoding import (
VIDEO_EXTENSIONS, IMAGE_EXTENSIONS
)
from ayon_core.pipeline.create import CreatorError
from ayon_core.hosts.traypublisher.api.plugin import (
TrayPublishCreator
)
class IngestCSV(TrayPublishCreator):
"""CSV ingest creator class"""
icon = "fa.file"
label = "CSV Ingest"
product_type = "csv_ingest_file"
identifier = "io.ayon.creators.traypublisher.csv_ingest"
default_variants = ["Main"]
description = "Ingest products' data from CSV file"
detailed_description = """
Ingest products' data from CSV file following column and representation
configuration in project settings.
"""
# Position in the list of creators.
order = 10
# settings for this creator
columns_config = {}
representations_config = {}
def create(self, subset_name, instance_data, pre_create_data):
"""Create an product from each row found in the CSV.
Args:
subset_name (str): The subset name.
instance_data (dict): The instance data.
pre_create_data (dict):
"""
csv_filepath_data = pre_create_data.get("csv_filepath_data", {})
folder = csv_filepath_data.get("directory", "")
if not os.path.exists(folder):
raise CreatorError(
f"Directory '{folder}' does not exist."
)
filename = csv_filepath_data.get("filenames", [])
self._process_csv_file(subset_name, instance_data, folder, filename[0])
def _process_csv_file(
self, subset_name, instance_data, staging_dir, filename):
"""Process CSV file.
Args:
subset_name (str): The subset name.
instance_data (dict): The instance data.
staging_dir (str): The staging directory.
filename (str): The filename.
"""
# create new instance from the csv file via self function
self._pass_data_to_csv_instance(
instance_data,
staging_dir,
filename
)
csv_instance = CreatedInstance(
self.product_type, subset_name, instance_data, self
)
self._store_new_instance(csv_instance)
csv_instance["csvFileData"] = {
"filename": filename,
"staging_dir": staging_dir,
}
# from special function get all data from csv file and convert them
# to new instances
csv_data_for_instances = self._get_data_from_csv(
staging_dir, filename)
# create instances from csv data via self function
self._create_instances_from_csv_data(
csv_data_for_instances, staging_dir
)
def _create_instances_from_csv_data(
self,
csv_data_for_instances,
staging_dir
):
"""Create instances from csv data"""
for folder_path, prepared_data in csv_data_for_instances.items():
project_name = self.create_context.get_current_project_name()
products = prepared_data["products"]
for instance_name, product_data in products.items():
# get important instance variables
task_name = product_data["task_name"]
task_type = product_data["task_type"]
variant = product_data["variant"]
product_type = product_data["product_type"]
version = product_data["version"]
# create subset/product name
product_name = get_product_name(
project_name,
task_name,
task_type,
self.host_name,
product_type,
variant
)
# make sure frame start/end is inherited from csv columns
# expected frame range data are handles excluded
for _, repre_data in product_data["representations"].items(): # noqa: E501
frame_start = repre_data["frameStart"]
frame_end = repre_data["frameEnd"]
handle_start = repre_data["handleStart"]
handle_end = repre_data["handleEnd"]
fps = repre_data["fps"]
break
# try to find any version comment in representation data
version_comment = next(
iter(
repre_data["comment"]
for repre_data in product_data["representations"].values() # noqa: E501
if repre_data["comment"]
),
None
)
# try to find any slate switch in representation data
slate_exists = any(
repre_data["slate"]
for _, repre_data in product_data["representations"].items() # noqa: E501
)
# get representations from product data
representations = product_data["representations"]
label = f"{folder_path}_{product_name}_v{version:>03}"
families = ["csv_ingest"]
if slate_exists:
# adding slate to families mainly for loaders to be able
# to filter out slates
families.append("slate")
# make product data
product_data = {
"name": instance_name,
"folderPath": folder_path,
"families": families,
"label": label,
"task": task_name,
"variant": variant,
"source": "csv",
"frameStart": frame_start,
"frameEnd": frame_end,
"handleStart": handle_start,
"handleEnd": handle_end,
"fps": fps,
"version": version,
"comment": version_comment,
}
# create new instance
new_instance = CreatedInstance(
product_type, product_name, product_data, self
)
self._store_new_instance(new_instance)
if not new_instance.get("prepared_data_for_repres"):
new_instance["prepared_data_for_repres"] = []
base_thumbnail_repre_data = {
"name": "thumbnail",
"ext": None,
"files": None,
"stagingDir": None,
"stagingDir_persistent": True,
"tags": ["thumbnail", "delete"],
}
# need to populate all thumbnails for all representations
# so we can check if unique thumbnail per representation
# is needed
thumbnails = [
repre_data["thumbnailPath"]
for repre_data in representations.values()
if repre_data["thumbnailPath"]
]
multiple_thumbnails = len(set(thumbnails)) > 1
explicit_output_name = None
thumbnails_processed = False
for filepath, repre_data in representations.items():
# check if any review derivate tag is present
reviewable = any(
tag for tag in repre_data.get("tags", [])
# tag can be `ftrackreview` or `review`
if "review" in tag
)
# since we need to populate multiple thumbnails as
# representation with outputName for (Ftrack instance
# integrator) pairing with reviewable video representations
if (
thumbnails
and multiple_thumbnails
and reviewable
):
# multiple unique thumbnails per representation needs
# grouping by outputName
# mainly used in Ftrack instance integrator
explicit_output_name = repre_data["representationName"]
relative_thumbnail_path = repre_data["thumbnailPath"]
# representation might not have thumbnail path
# so ignore this one
if not relative_thumbnail_path:
continue
thumb_dir, thumb_file = \
self._get_refactor_thumbnail_path(
staging_dir, relative_thumbnail_path)
filename, ext = os.path.splitext(thumb_file)
thumbnail_repr_data = deepcopy(
base_thumbnail_repre_data)
thumbnail_repr_data.update({
"name": "thumbnail_{}".format(filename),
"ext": ext[1:],
"files": thumb_file,
"stagingDir": thumb_dir,
"outputName": explicit_output_name,
})
new_instance["prepared_data_for_repres"].append({
"type": "thumbnail",
"colorspace": None,
"representation": thumbnail_repr_data,
})
# also add thumbnailPath for ayon to integrate
if not new_instance.get("thumbnailPath"):
new_instance["thumbnailPath"] = (
os.path.join(thumb_dir, thumb_file)
)
elif (
thumbnails
and not multiple_thumbnails
and not thumbnails_processed
or not reviewable
):
"""
For case where we have only one thumbnail
and not reviewable medias. This needs to be processed
only once per instance.
"""
if not thumbnails:
continue
# here we will use only one thumbnail for
# all representations
relative_thumbnail_path = repre_data["thumbnailPath"]
# popping last thumbnail from list since it is only one
# and we do not need to iterate again over it
if not relative_thumbnail_path:
relative_thumbnail_path = thumbnails.pop()
thumb_dir, thumb_file = \
self._get_refactor_thumbnail_path(
staging_dir, relative_thumbnail_path)
_, ext = os.path.splitext(thumb_file)
thumbnail_repr_data = deepcopy(
base_thumbnail_repre_data)
thumbnail_repr_data.update({
"ext": ext[1:],
"files": thumb_file,
"stagingDir": thumb_dir
})
new_instance["prepared_data_for_repres"].append({
"type": "thumbnail",
"colorspace": None,
"representation": thumbnail_repr_data,
})
# also add thumbnailPath for ayon to integrate
if not new_instance.get("thumbnailPath"):
new_instance["thumbnailPath"] = (
os.path.join(thumb_dir, thumb_file)
)
thumbnails_processed = True
# get representation data
representation_data = self._get_representation_data(
filepath, repre_data, staging_dir,
explicit_output_name
)
new_instance["prepared_data_for_repres"].append({
"type": "media",
"colorspace": repre_data["colorspace"],
"representation": representation_data,
})
def _get_refactor_thumbnail_path(
self, staging_dir, relative_thumbnail_path):
thumbnail_abs_path = os.path.join(
staging_dir, relative_thumbnail_path)
return os.path.split(
thumbnail_abs_path)
def _get_representation_data(
self, filepath, repre_data, staging_dir, explicit_output_name=None
):
"""Get representation data
Args:
filepath (str): Filepath to representation file.
repre_data (dict): Representation data from CSV file.
staging_dir (str): Staging directory.
explicit_output_name (Optional[str]): Explicit output name.
For grouping purposes with reviewable components.
Defaults to None.
"""
# get extension of file
basename = os.path.basename(filepath)
extension = os.path.splitext(filepath)[-1].lower()
# validate filepath is having correct extension based on output
repre_name = repre_data["representationName"]
repre_config_data = None
for repre in self.representations_config["representations"]:
if repre["name"] == repre_name:
repre_config_data = repre
break
if not repre_config_data:
raise CreatorError(
f"Representation '{repre_name}' not found "
"in config representation data."
)
validate_extensions = repre_config_data["extensions"]
if extension not in validate_extensions:
raise CreatorError(
f"File extension '{extension}' not valid for "
f"output '{validate_extensions}'."
)
is_sequence = (extension in IMAGE_EXTENSIONS)
# convert ### string in file name to %03d
# this is for correct frame range validation
# example: file.###.exr -> file.%03d.exr
if "#" in basename:
padding = len(basename.split("#")) - 1
basename = basename.replace("#" * padding, f"%0{padding}d")
is_sequence = True
# make absolute path to file
absfilepath = os.path.normpath(os.path.join(staging_dir, filepath))
dirname = os.path.dirname(absfilepath)
# check if dirname exists
if not os.path.isdir(dirname):
raise CreatorError(
f"Directory '{dirname}' does not exist."
)
# collect all data from dirname
paths_for_collection = []
for file in os.listdir(dirname):
filepath = os.path.join(dirname, file)
paths_for_collection.append(filepath)
collections, _ = clique.assemble(paths_for_collection)
if collections:
collections = collections[0]
else:
if is_sequence:
raise CreatorError(
f"No collections found in directory '{dirname}'."
)
frame_start = None
frame_end = None
if is_sequence:
files = [os.path.basename(file) for file in collections]
frame_start = list(collections.indexes)[0]
frame_end = list(collections.indexes)[-1]
else:
files = basename
tags = deepcopy(repre_data["tags"])
# if slate in repre_data is True then remove one frame from start
if repre_data["slate"]:
tags.append("has_slate")
# get representation data
representation_data = {
"name": repre_name,
"ext": extension[1:],
"files": files,
"stagingDir": dirname,
"stagingDir_persistent": True,
"tags": tags,
}
if extension in VIDEO_EXTENSIONS:
representation_data.update({
"fps": repre_data["fps"],
"outputName": repre_name,
})
if explicit_output_name:
representation_data["outputName"] = explicit_output_name
if frame_start:
representation_data["frameStart"] = frame_start
if frame_end:
representation_data["frameEnd"] = frame_end
return representation_data
def _get_data_from_csv(
self, package_dir, filename
):
"""Generate instances from the csv file"""
# get current project name and code from context.data
project_name = self.create_context.get_current_project_name()
csv_file_path = os.path.join(
package_dir, filename
)
# make sure csv file contains columns from following list
required_columns = [
column["name"] for column in self.columns_config["columns"]
if column["required_column"]
]
# read csv file
with open(csv_file_path, "r") as csv_file:
csv_content = csv_file.read()
# read csv file with DictReader
csv_reader = csv.DictReader(
StringIO(csv_content),
delimiter=self.columns_config["csv_delimiter"]
)
# fix fieldnames
# sometimes someone can keep extra space at the start or end of
# the column name
all_columns = [
" ".join(column.rsplit()) for column in csv_reader.fieldnames]
# return back fixed fieldnames
csv_reader.fieldnames = all_columns
# check if csv file contains all required columns
if any(column not in all_columns for column in required_columns):
raise CreatorError(
f"Missing required columns: {required_columns}"
)
csv_data = {}
# get data from csv file
for row in csv_reader:
# Get required columns first
# TODO: will need to be folder path in CSV
# TODO: `context_asset_name` is now `folder_path`
folder_path = self._get_row_value_with_validation(
"Folder Path", row)
task_name = self._get_row_value_with_validation(
"Task Name", row)
version = self._get_row_value_with_validation(
"Version", row)
# Get optional columns
variant = self._get_row_value_with_validation(
"Variant", row)
product_type = self._get_row_value_with_validation(
"Product Type", row)
pre_product_name = (
f"{task_name}{variant}{product_type}"
f"{version}".replace(" ", "").lower()
)
# get representation data
filename, representation_data = \
self._get_representation_row_data(row)
# TODO: batch query of all folder paths and task names
# get folder entity from folder path
folder_entity = get_folder_by_path(
project_name, folder_path)
# make sure asset exists
if not folder_entity:
raise CreatorError(
f"Asset '{folder_path}' not found."
)
# first get all tasks on the folder entity and then find
task_entity = get_task_by_name(
project_name, folder_entity["id"], task_name)
# check if task name is valid task in asset doc
if not task_entity:
raise CreatorError(
f"Task '{task_name}' not found in asset doc."
)
# get all csv data into one dict and make sure there are no
# duplicates data are already validated and sorted under
# correct existing asset also check if asset exists and if
# task name is valid task in asset doc and representations
# are distributed under products following variants
if folder_path not in csv_data:
csv_data[folder_path] = {
"folder_entity": folder_entity,
"products": {
pre_product_name: {
"task_name": task_name,
"task_type": task_entity["taskType"],
"variant": variant,
"product_type": product_type,
"version": version,
"representations": {
filename: representation_data,
},
}
}
}
else:
csv_products = csv_data[folder_path]["products"]
if pre_product_name not in csv_products:
csv_products[pre_product_name] = {
"task_name": task_name,
"task_type": task_entity["taskType"],
"variant": variant,
"product_type": product_type,
"version": version,
"representations": {
filename: representation_data,
},
}
else:
csv_representations = \
csv_products[pre_product_name]["representations"]
if filename in csv_representations:
raise CreatorError(
f"Duplicate filename '{filename}' in csv file."
)
csv_representations[filename] = representation_data
return csv_data
def _get_representation_row_data(self, row_data):
"""Get representation row data"""
# Get required columns first
file_path = self._get_row_value_with_validation(
"File Path", row_data)
frame_start = self._get_row_value_with_validation(
"Frame Start", row_data)
frame_end = self._get_row_value_with_validation(
"Frame End", row_data)
handle_start = self._get_row_value_with_validation(
"Handle Start", row_data)
handle_end = self._get_row_value_with_validation(
"Handle End", row_data)
fps = self._get_row_value_with_validation(
"FPS", row_data)
# Get optional columns
thumbnail_path = self._get_row_value_with_validation(
"Version Thumbnail", row_data)
colorspace = self._get_row_value_with_validation(
"Representation Colorspace", row_data)
comment = self._get_row_value_with_validation(
"Version Comment", row_data)
repre = self._get_row_value_with_validation(
"Representation", row_data)
slate_exists = self._get_row_value_with_validation(
"Slate Exists", row_data)
repre_tags = self._get_row_value_with_validation(
"Representation Tags", row_data)
# convert tags value to list
tags_list = copy(self.representations_config["default_tags"])
if repre_tags:
tags_list = []
tags_delimiter = self.representations_config["tags_delimiter"]
# strip spaces from repre_tags
if tags_delimiter in repre_tags:
tags = repre_tags.split(tags_delimiter)
for _tag in tags:
tags_list.append(("".join(_tag.strip())).lower())
else:
tags_list.append(repre_tags)
representation_data = {
"colorspace": colorspace,
"comment": comment,
"representationName": repre,
"slate": slate_exists,
"tags": tags_list,
"thumbnailPath": thumbnail_path,
"frameStart": int(frame_start),
"frameEnd": int(frame_end),
"handleStart": int(handle_start),
"handleEnd": int(handle_end),
"fps": float(fps),
}
return file_path, representation_data
def _get_row_value_with_validation(
self, column_name, row_data, default_value=None
):
"""Get row value with validation"""
# get column data from column config
column_data = None
for column in self.columns_config["columns"]:
if column["name"] == column_name:
column_data = column
break
if not column_data:
raise CreatorError(
f"Column '{column_name}' not found in column config."
)
# get column value from row
column_value = row_data.get(column_name)
column_required = column_data["required_column"]
# check if column value is not empty string and column is required
if column_value == "" and column_required:
raise CreatorError(
f"Value in column '{column_name}' is required."
)
# get column type
column_type = column_data["type"]
# get column validation regex
column_validation = column_data["validation_pattern"]
# get column default value
column_default = default_value or column_data["default"]
if column_type in ["number", "decimal"] and column_default == 0:
column_default = None
# check if column value is not empty string
if column_value == "":
# set default value if column value is empty string
column_value = column_default
# set column value to correct type following column type
if column_type == "number" and column_value is not None:
column_value = int(column_value)
elif column_type == "decimal" and column_value is not None:
column_value = float(column_value)
elif column_type == "bool":
column_value = column_value in ["true", "True"]
# check if column value matches validation regex
if (
column_value is not None and
not re.match(str(column_validation), str(column_value))
):
raise CreatorError(
f"Column '{column_name}' value '{column_value}' "
f"does not match validation regex '{column_validation}' \n"
f"Row data: {row_data} \n"
f"Column data: {column_data}"
)
return column_value
def _pass_data_to_csv_instance(
self, instance_data, staging_dir, filename
):
"""Pass CSV representation file to instance data"""
representation = {
"name": "csv",
"ext": "csv",
"files": filename,
"stagingDir": staging_dir,
"stagingDir_persistent": True,
}
instance_data.update({
"label": f"CSV: {filename}",
"representations": [representation],
"stagingDir": staging_dir,
"stagingDir_persistent": True,
})
def get_instance_attr_defs(self):
return [
BoolDef(
"add_review_family",
default=True,
label="Review"
)
]
def get_pre_create_attr_defs(self):
"""Creating pre-create attributes at creator plugin.
Returns:
list: list of attribute object instances
"""
# Use same attributes as for instance attributes
attr_defs = [
FileDef(
"csv_filepath_data",
folders=False,
extensions=[".csv"],
allow_sequences=False,
single_item=True,
label="CSV File",
),
]
return attr_defs

View file

@ -0,0 +1,47 @@
from pprint import pformat
import pyblish.api
from ayon_core.pipeline import publish
class CollectCSVIngestInstancesData(
pyblish.api.InstancePlugin,
publish.AYONPyblishPluginMixin,
publish.ColormanagedPyblishPluginMixin
):
"""Collect CSV Ingest data from instance.
"""
label = "Collect CSV Ingest instances data"
order = pyblish.api.CollectorOrder + 0.1
hosts = ["traypublisher"]
families = ["csv_ingest"]
def process(self, instance):
# expecting [(colorspace, repre_data), ...]
prepared_repres_data_items = instance.data[
"prepared_data_for_repres"]
for prep_repre_data in prepared_repres_data_items:
type = prep_repre_data["type"]
colorspace = prep_repre_data["colorspace"]
repre_data = prep_repre_data["representation"]
# thumbnails should be skipped
if type == "media":
# colorspace name is passed from CSV column
self.set_representation_colorspace(
repre_data, instance.context, colorspace
)
elif type == "media" and colorspace is None:
# TODO: implement colorspace file rules file parsing
self.log.warning(
"Colorspace is not defined in csv for following"
f" representation: {pformat(repre_data)}"
)
pass
elif type == "thumbnail":
# thumbnails should be skipped
pass
instance.data["representations"].append(repre_data)

View file

@ -0,0 +1,31 @@
import pyblish.api
from ayon_core.pipeline import publish
class ExtractCSVFile(publish.Extractor):
"""
Extractor export CSV file
"""
label = "Extract CSV file"
order = pyblish.api.ExtractorOrder - 0.45
families = ["csv_ingest_file"]
hosts = ["traypublisher"]
def process(self, instance):
csv_file_data = instance.data["csvFileData"]
representation_csv = {
'name': "csv_data",
'ext': "csv",
'files': csv_file_data["filename"],
"stagingDir": csv_file_data["staging_dir"],
"stagingDir_persistent": True
}
instance.data["representations"].append(representation_csv)
self.log.info("Added CSV file representation: {}".format(
representation_csv))

View file

@ -16,6 +16,7 @@ class ValidateExistingVersion(
order = ValidateContentsOrder
hosts = ["traypublisher"]
targets = ["local"]
actions = [RepairAction]

View file

@ -16,6 +16,8 @@ class ValidateFrameRange(OptionalPyblishPluginMixin,
label = "Validate Frame Range"
hosts = ["traypublisher"]
families = ["render", "plate"]
targets = ["local"]
order = ValidateContentsOrder
optional = True

View file

@ -97,6 +97,15 @@ from .context_tools import (
get_current_folder_path,
get_current_task_name
)
from .workfile import (
discover_workfile_build_plugins,
register_workfile_build_plugin,
deregister_workfile_build_plugin,
register_workfile_build_plugin_path,
deregister_workfile_build_plugin_path,
)
install = install_host
uninstall = uninstall_host
@ -198,6 +207,13 @@ __all__ = (
"get_current_folder_path",
"get_current_task_name",
# Workfile templates
"discover_workfile_build_plugins",
"register_workfile_build_plugin",
"deregister_workfile_build_plugin",
"register_workfile_build_plugin_path",
"deregister_workfile_build_plugin_path",
# Backwards compatible function names
"install",
"uninstall",

View file

@ -21,6 +21,15 @@ from .utils import (
from .build_workfile import BuildWorkfile
from .workfile_template_builder import (
discover_workfile_build_plugins,
register_workfile_build_plugin,
deregister_workfile_build_plugin,
register_workfile_build_plugin_path,
deregister_workfile_build_plugin_path,
)
__all__ = (
"get_workfile_template_key_from_context",
"get_workfile_template_key",
@ -39,4 +48,10 @@ __all__ = (
"should_open_workfiles_tool_on_launch",
"BuildWorkfile",
"discover_workfile_build_plugins",
"register_workfile_build_plugin",
"deregister_workfile_build_plugin",
"register_workfile_build_plugin_path",
"deregister_workfile_build_plugin_path",
)

View file

@ -43,6 +43,13 @@ from ayon_core.pipeline.load import (
get_representation_contexts,
load_with_repre_context,
)
from ayon_core.pipeline.plugin_discover import (
discover,
register_plugin,
register_plugin_path,
deregister_plugin,
deregister_plugin_path
)
from ayon_core.pipeline.create import (
discover_legacy_creator_plugins,
@ -211,10 +218,14 @@ class AbstractTemplateBuilder(object):
Returns:
List[PlaceholderPlugin]: Plugin classes available for host.
"""
plugins = []
# Backwards compatibility
if hasattr(self._host, "get_workfile_build_placeholder_plugins"):
return self._host.get_workfile_build_placeholder_plugins()
return []
plugins.extend(discover(PlaceholderPlugin))
return plugins
@property
def host(self):
@ -329,7 +340,7 @@ class AbstractTemplateBuilder(object):
is good practice to check if the same value is not already stored under
different key or if the key is not already used for something else.
Key should be self explanatory to content.
Key should be self-explanatory to content.
- wrong: 'folder'
- good: 'folder_name'
@ -375,7 +386,7 @@ class AbstractTemplateBuilder(object):
is good practice to check if the same value is not already stored under
different key or if the key is not already used for something else.
Key should be self explanatory to content.
Key should be self-explanatory to content.
- wrong: 'folder'
- good: 'folder_path'
@ -395,7 +406,7 @@ class AbstractTemplateBuilder(object):
is good practice to check if the same value is not already stored under
different key or if the key is not already used for something else.
Key should be self explanatory to content.
Key should be self-explanatory to content.
- wrong: 'folder'
- good: 'folder_path'
@ -466,7 +477,7 @@ class AbstractTemplateBuilder(object):
return list(sorted(
placeholders,
key=lambda i: i.order
key=lambda placeholder: placeholder.order
))
def build_template(
@ -498,15 +509,21 @@ class AbstractTemplateBuilder(object):
process if version is created
"""
template_preset = self.get_template_preset()
if template_path is None:
template_path = template_preset["path"]
if keep_placeholders is None:
keep_placeholders = template_preset["keep_placeholder"]
if create_first_version is None:
create_first_version = template_preset["create_first_version"]
if any(
value is None
for value in [
template_path,
keep_placeholders,
create_first_version,
]
):
template_preset = self.get_template_preset()
if template_path is None:
template_path = template_preset["path"]
if keep_placeholders is None:
keep_placeholders = template_preset["keep_placeholder"]
if create_first_version is None:
create_first_version = template_preset["create_first_version"]
# check if first version is created
created_version_workfile = False
@ -685,7 +702,7 @@ class AbstractTemplateBuilder(object):
for placeholder in placeholders
}
all_processed = len(placeholders) == 0
# Counter is checked at the ned of a loop so the loop happens at least
# Counter is checked at the end of a loop so the loop happens at least
# once.
iter_counter = 0
while not all_processed:
@ -772,12 +789,14 @@ class AbstractTemplateBuilder(object):
- 'project_settings/{host name}/templated_workfile_build/profiles'
Returns:
str: Path to a template file with placeholders.
dict: Dictionary with `path`, `keep_placeholder` and
`create_first_version` settings from the template preset
for current context.
Raises:
TemplateProfileNotFound: When profiles are not filled.
TemplateLoadFailed: Profile was found but path is not set.
TemplateNotFound: Path was set but file does not exists.
TemplateNotFound: Path was set but file does not exist.
"""
host_name = self.host_name
@ -1045,7 +1064,7 @@ class PlaceholderPlugin(object):
Using shared data from builder but stored under plugin identifier.
Key should be self explanatory to content.
Key should be self-explanatory to content.
- wrong: 'folder'
- good: 'folder_path'
@ -1085,7 +1104,7 @@ class PlaceholderPlugin(object):
Using shared data from builder but stored under plugin identifier.
Key should be self explanatory to content.
Key should be self-explanatory to content.
- wrong: 'folder'
- good: 'folder_path'
@ -1107,10 +1126,10 @@ class PlaceholderItem(object):
"""Item representing single item in scene that is a placeholder to process.
Items are always created and updated by their plugins. Each plugin can use
modified class of 'PlacehoderItem' but only to add more options instead of
modified class of 'PlaceholderItem' but only to add more options instead of
new other.
Scene identifier is used to avoid processing of the palceholder item
Scene identifier is used to avoid processing of the placeholder item
multiple times so must be unique across whole workfile builder.
Args:
@ -1162,7 +1181,7 @@ class PlaceholderItem(object):
"""Placeholder data which can modify how placeholder is processed.
Possible general keys
- order: Can define the order in which is palceholder processed.
- order: Can define the order in which is placeholder processed.
Lower == earlier.
Other keys are defined by placeholder and should validate them on item
@ -1264,11 +1283,9 @@ class PlaceholderLoadMixin(object):
"""Unified attribute definitions for load placeholder.
Common function for placeholder plugins used for loading of
repsentations. Use it in 'get_placeholder_options'.
representations. Use it in 'get_placeholder_options'.
Args:
plugin (PlaceholderPlugin): Plugin used for loading of
representations.
options (Dict[str, Any]): Already available options which are used
as defaults for attributes.
@ -1468,7 +1485,9 @@ class PlaceholderLoadMixin(object):
product_name_regex = None
if product_name_regex_value:
product_name_regex = re.compile(product_name_regex_value)
product_type = placeholder.data["family"]
product_type = placeholder.data.get("product_type")
if product_type is None:
product_type = placeholder.data["family"]
builder_type = placeholder.data["builder_type"]
folder_ids = []
@ -1529,35 +1548,22 @@ class PlaceholderLoadMixin(object):
pass
def _reduce_last_version_repre_entities(self, representations):
"""Reduce representations to last verison."""
def _reduce_last_version_repre_entities(self, repre_contexts):
"""Reduce representations to last version."""
mapping = {}
# TODO use representation context with entities
# - using 'folder', 'subset' and 'version' from context on
# representation is danger
for repre_entity in representations:
repre_context = repre_entity["context"]
folder_name = repre_context["asset"]
product_name = repre_context["subset"]
version = repre_context.get("version", -1)
if folder_name not in mapping:
mapping[folder_name] = {}
product_mapping = mapping[folder_name]
if product_name not in product_mapping:
product_mapping[product_name] = collections.defaultdict(list)
version_mapping = product_mapping[product_name]
version_mapping[version].append(repre_entity)
version_mapping_by_product_id = {}
for repre_context in repre_contexts:
product_id = repre_context["product"]["id"]
version = repre_context["version"]["version"]
version_mapping = version_mapping_by_product_id.setdefault(
product_id, {}
)
version_mapping.setdefault(version, []).append(repre_context)
output = []
for product_mapping in mapping.values():
for version_mapping in product_mapping.values():
last_version = tuple(sorted(version_mapping.keys()))[-1]
output.extend(version_mapping[last_version])
for version_mapping in version_mapping_by_product_id.values():
last_version = max(version_mapping.keys())
output.extend(version_mapping[last_version])
return output
def populate_load_placeholder(self, placeholder, ignore_repre_ids=None):
@ -1585,32 +1591,33 @@ class PlaceholderLoadMixin(object):
loader_name = placeholder.data["loader"]
loader_args = self.parse_loader_args(placeholder.data["loader_args"])
placeholder_representations = self._get_representations(placeholder)
placeholder_representations = [
repre_entity
for repre_entity in self._get_representations(placeholder)
if repre_entity["id"] not in ignore_repre_ids
]
filtered_representations = []
for representation in self._reduce_last_version_repre_entities(
placeholder_representations
):
repre_id = representation["id"]
if repre_id not in ignore_repre_ids:
filtered_representations.append(representation)
if not filtered_representations:
repre_load_contexts = get_representation_contexts(
self.project_name, placeholder_representations
)
filtered_repre_contexts = self._reduce_last_version_repre_entities(
repre_load_contexts.values()
)
if not filtered_repre_contexts:
self.log.info((
"There's no representation for this placeholder: {}"
).format(placeholder.scene_identifier))
if not placeholder.data.get("keep_placeholder", True):
self.delete_placeholder(placeholder)
return
repre_load_contexts = get_representation_contexts(
self.project_name, filtered_representations
)
loaders_by_name = self.builder.get_loaders_by_name()
self._before_placeholder_load(
placeholder
)
failed = False
for repre_load_context in repre_load_contexts.values():
for repre_load_context in filtered_repre_contexts:
folder_path = repre_load_context["folder"]["path"]
product_name = repre_load_context["product"]["name"]
representation = repre_load_context["representation"]
@ -1695,8 +1702,6 @@ class PlaceholderCreateMixin(object):
publishable instances. Use it with 'get_placeholder_options'.
Args:
plugin (PlaceholderPlugin): Plugin used for creating of
publish instances.
options (Dict[str, Any]): Already available options which are used
as defaults for attributes.
@ -1918,3 +1923,23 @@ class CreatePlaceholderItem(PlaceholderItem):
def create_failed(self, creator_data):
self._failed_created_publish_instances.append(creator_data)
def discover_workfile_build_plugins(*args, **kwargs):
return discover(PlaceholderPlugin, *args, **kwargs)
def register_workfile_build_plugin(plugin: PlaceholderPlugin):
register_plugin(PlaceholderPlugin, plugin)
def deregister_workfile_build_plugin(plugin: PlaceholderPlugin):
deregister_plugin(PlaceholderPlugin, plugin)
def register_workfile_build_plugin_path(path: str):
register_plugin_path(PlaceholderPlugin, path)
def deregister_workfile_build_plugin_path(path: str):
deregister_plugin_path(PlaceholderPlugin, path)

View file

@ -1,501 +1,426 @@
# TODO This plugin is not converted for AYON
#
# import collections
# import os
# import uuid
#
# import clique
# import ayon_api
# from pymongo import UpdateOne
# import qargparse
# from qtpy import QtWidgets, QtCore
#
# from ayon_core import style
# from ayon_core.addon import AddonsManager
# from ayon_core.lib import format_file_size
# from ayon_core.pipeline import load, Anatomy
# from ayon_core.pipeline.load import (
# get_representation_path_with_anatomy,
# InvalidRepresentationContext,
# )
#
#
# class DeleteOldVersions(load.ProductLoaderPlugin):
# """Deletes specific number of old version"""
#
# is_multiple_contexts_compatible = True
# sequence_splitter = "__sequence_splitter__"
#
# representations = {"*"}
# product_types = {"*"}
# tool_names = ["library_loader"]
#
# label = "Delete Old Versions"
# order = 35
# icon = "trash"
# color = "#d8d8d8"
#
# options = [
# qargparse.Integer(
# "versions_to_keep", default=2, min=0, help="Versions to keep:"
# ),
# qargparse.Boolean(
# "remove_publish_folder", help="Remove publish folder:"
# )
# ]
#
# def delete_whole_dir_paths(self, dir_paths, delete=True):
# size = 0
#
# for dir_path in dir_paths:
# # Delete all files and fodlers in dir path
# for root, dirs, files in os.walk(dir_path, topdown=False):
# for name in files:
# file_path = os.path.join(root, name)
# size += os.path.getsize(file_path)
# if delete:
# os.remove(file_path)
# self.log.debug("Removed file: {}".format(file_path))
#
# for name in dirs:
# if delete:
# os.rmdir(os.path.join(root, name))
#
# if not delete:
# continue
#
# # Delete even the folder and it's parents folders if they are empty
# while True:
# if not os.path.exists(dir_path):
# dir_path = os.path.dirname(dir_path)
# continue
#
# if len(os.listdir(dir_path)) != 0:
# break
#
# os.rmdir(os.path.join(dir_path))
#
# return size
#
# def path_from_representation(self, representation, anatomy):
# try:
# context = representation["context"]
# except KeyError:
# return (None, None)
#
# try:
# path = get_representation_path_with_anatomy(
# representation, anatomy
# )
# except InvalidRepresentationContext:
# return (None, None)
#
# sequence_path = None
# if "frame" in context:
# context["frame"] = self.sequence_splitter
# sequence_path = get_representation_path_with_anatomy(
# representation, anatomy
# )
#
# if sequence_path:
# sequence_path = sequence_path.normalized()
#
# return (path.normalized(), sequence_path)
#
# def delete_only_repre_files(self, dir_paths, file_paths, delete=True):
# size = 0
#
# for dir_id, dir_path in dir_paths.items():
# dir_files = os.listdir(dir_path)
# collections, remainders = clique.assemble(dir_files)
# for file_path, seq_path in file_paths[dir_id]:
# file_path_base = os.path.split(file_path)[1]
# # Just remove file if `frame` key was not in context or
# # filled path is in remainders (single file sequence)
# if not seq_path or file_path_base in remainders:
# if not os.path.exists(file_path):
# self.log.debug(
# "File was not found: {}".format(file_path)
# )
# continue
#
# size += os.path.getsize(file_path)
#
# if delete:
# os.remove(file_path)
# self.log.debug("Removed file: {}".format(file_path))
#
# if file_path_base in remainders:
# remainders.remove(file_path_base)
# continue
#
# seq_path_base = os.path.split(seq_path)[1]
# head, tail = seq_path_base.split(self.sequence_splitter)
#
# final_col = None
# for collection in collections:
# if head != collection.head or tail != collection.tail:
# continue
# final_col = collection
# break
#
# if final_col is not None:
# # Fill full path to head
# final_col.head = os.path.join(dir_path, final_col.head)
# for _file_path in final_col:
# if os.path.exists(_file_path):
#
# size += os.path.getsize(_file_path)
#
# if delete:
# os.remove(_file_path)
# self.log.debug(
# "Removed file: {}".format(_file_path)
# )
#
# _seq_path = final_col.format("{head}{padding}{tail}")
# self.log.debug("Removed files: {}".format(_seq_path))
# collections.remove(final_col)
#
# elif os.path.exists(file_path):
# size += os.path.getsize(file_path)
#
# if delete:
# os.remove(file_path)
# self.log.debug("Removed file: {}".format(file_path))
# else:
# self.log.debug(
# "File was not found: {}".format(file_path)
# )
#
# # Delete as much as possible parent folders
# if not delete:
# return size
#
# for dir_path in dir_paths.values():
# while True:
# if not os.path.exists(dir_path):
# dir_path = os.path.dirname(dir_path)
# continue
#
# if len(os.listdir(dir_path)) != 0:
# break
#
# self.log.debug("Removed folder: {}".format(dir_path))
# os.rmdir(dir_path)
#
# return size
#
# def message(self, text):
# msgBox = QtWidgets.QMessageBox()
# msgBox.setText(text)
# msgBox.setStyleSheet(style.load_stylesheet())
# msgBox.setWindowFlags(
# msgBox.windowFlags() | QtCore.Qt.FramelessWindowHint
# )
# msgBox.exec_()
#
# def get_data(self, context, versions_count):
# product_entity = context["product"]
# folder_entity = context["folder"]
# project_name = context["project"]["name"]
# anatomy = Anatomy(project_name)
#
# versions = list(ayon_api.get_versions(
# project_name, product_ids=[product_entity["id"]]
# ))
#
# versions_by_parent = collections.defaultdict(list)
# for ent in versions:
# versions_by_parent[ent["productId"]].append(ent)
#
# def sort_func(ent):
# return int(ent["version"])
#
# all_last_versions = []
# for _parent_id, _versions in versions_by_parent.items():
# for idx, version in enumerate(
# sorted(_versions, key=sort_func, reverse=True)
# ):
# if idx >= versions_count:
# break
# all_last_versions.append(version)
#
# self.log.debug("Collected versions ({})".format(len(versions)))
#
# # Filter latest versions
# for version in all_last_versions:
# versions.remove(version)
#
# # Update versions_by_parent without filtered versions
# versions_by_parent = collections.defaultdict(list)
# for ent in versions:
# versions_by_parent[ent["productId"]].append(ent)
#
# # Filter already deleted versions
# versions_to_pop = []
# for version in versions:
# version_tags = version["data"].get("tags")
# if version_tags and "deleted" in version_tags:
# versions_to_pop.append(version)
#
# for version in versions_to_pop:
# msg = "Folder: \"{}\" | Product: \"{}\" | Version: \"{}\"".format(
# folder_entity["path"],
# product_entity["name"],
# version["version"]
# )
# self.log.debug((
# "Skipping version. Already tagged as `deleted`. < {} >"
# ).format(msg))
# versions.remove(version)
#
# version_ids = [ent["id"] for ent in versions]
#
# self.log.debug(
# "Filtered versions to delete ({})".format(len(version_ids))
# )
#
# if not version_ids:
# msg = "Skipping processing. Nothing to delete on {}/{}".format(
# folder_entity["path"], product_entity["name"]
# )
# self.log.info(msg)
# print(msg)
# return
#
# repres = list(ayon_api.get_representations(
# project_name, version_ids=version_ids
# ))
#
# self.log.debug(
# "Collected representations to remove ({})".format(len(repres))
# )
#
# dir_paths = {}
# file_paths_by_dir = collections.defaultdict(list)
# for repre in repres:
# file_path, seq_path = self.path_from_representation(
# repre, anatomy
# )
# if file_path is None:
# self.log.debug((
# "Could not format path for represenation \"{}\""
# ).format(str(repre)))
# continue
#
# dir_path = os.path.dirname(file_path)
# dir_id = None
# for _dir_id, _dir_path in dir_paths.items():
# if _dir_path == dir_path:
# dir_id = _dir_id
# break
#
# if dir_id is None:
# dir_id = uuid.uuid4()
# dir_paths[dir_id] = dir_path
#
# file_paths_by_dir[dir_id].append([file_path, seq_path])
#
# dir_ids_to_pop = []
# for dir_id, dir_path in dir_paths.items():
# if os.path.exists(dir_path):
# continue
#
# dir_ids_to_pop.append(dir_id)
#
# # Pop dirs from both dictionaries
# for dir_id in dir_ids_to_pop:
# dir_paths.pop(dir_id)
# paths = file_paths_by_dir.pop(dir_id)
# # TODO report of missing directories?
# paths_msg = ", ".join([
# "'{}'".format(path[0].replace("\\", "/")) for path in paths
# ])
# self.log.debug((
# "Folder does not exist. Deleting it's files skipped: {}"
# ).format(paths_msg))
#
# return {
# "dir_paths": dir_paths,
# "file_paths_by_dir": file_paths_by_dir,
# "versions": versions,
# "folder": folder_entity,
# "product": product_entity,
# "archive_product": versions_count == 0
# }
#
# def main(self, project_name, data, remove_publish_folder):
# # Size of files.
# size = 0
# if not data:
# return size
#
# if remove_publish_folder:
# size = self.delete_whole_dir_paths(data["dir_paths"].values())
# else:
# size = self.delete_only_repre_files(
# data["dir_paths"], data["file_paths_by_dir"]
# )
#
# mongo_changes_bulk = []
# for version in data["versions"]:
# orig_version_tags = version["data"].get("tags") or []
# version_tags = [tag for tag in orig_version_tags]
# if "deleted" not in version_tags:
# version_tags.append("deleted")
#
# if version_tags == orig_version_tags:
# continue
#
# update_query = {"id": version["id"]}
# update_data = {"$set": {"data.tags": version_tags}}
# mongo_changes_bulk.append(UpdateOne(update_query, update_data))
#
# if data["archive_product"]:
# mongo_changes_bulk.append(UpdateOne(
# {
# "id": data["product"]["id"],
# "type": "subset"
# },
# {"$set": {"type": "archived_subset"}}
# ))
#
# if mongo_changes_bulk:
# dbcon = AvalonMongoDB()
# dbcon.Session["AYON_PROJECT_NAME"] = project_name
# dbcon.install()
# dbcon.bulk_write(mongo_changes_bulk)
# dbcon.uninstall()
#
# self._ftrack_delete_versions(data)
#
# return size
#
# def _ftrack_delete_versions(self, data):
# """Delete version on ftrack.
#
# Handling of ftrack logic in this plugin is not ideal. But in OP3 it is
# almost impossible to solve the issue other way.
#
# Note:
# Asset versions on ftrack are not deleted but marked as
# "not published" which cause that they're invisible.
#
# Args:
# data (dict): Data sent to product loader with full context.
# """
#
# # First check for ftrack id on folder entity
# # - skip if ther is none
# ftrack_id = data["folder"]["attrib"].get("ftrackId")
# if not ftrack_id:
# self.log.info((
# "Folder does not have filled ftrack id. Skipped delete"
# " of ftrack version."
# ))
# return
#
# # Check if ftrack module is enabled
# addons_manager = AddonsManager()
# ftrack_addon = addons_manager.get("ftrack")
# if not ftrack_addon or not ftrack_addon.enabled:
# return
#
# import ftrack_api
#
# session = ftrack_api.Session()
# product_name = data["product"]["name"]
# versions = {
# '"{}"'.format(version_doc["name"])
# for version_doc in data["versions"]
# }
# asset_versions = session.query(
# (
# "select id, is_published from AssetVersion where"
# " asset.parent.id is \"{}\""
# " and asset.name is \"{}\""
# " and version in ({})"
# ).format(
# ftrack_id,
# product_name,
# ",".join(versions)
# )
# ).all()
#
# # Set attribute `is_published` to `False` on ftrack AssetVersions
# for asset_version in asset_versions:
# asset_version["is_published"] = False
#
# try:
# session.commit()
#
# except Exception:
# msg = (
# "Could not set `is_published` attribute to `False`"
# " for selected AssetVersions."
# )
# self.log.error(msg)
# self.message(msg)
#
# def load(self, contexts, name=None, namespace=None, options=None):
# try:
# size = 0
# for count, context in enumerate(contexts):
# versions_to_keep = 2
# remove_publish_folder = False
# if options:
# versions_to_keep = options.get(
# "versions_to_keep", versions_to_keep
# )
# remove_publish_folder = options.get(
# "remove_publish_folder", remove_publish_folder
# )
#
# data = self.get_data(context, versions_to_keep)
# if not data:
# continue
#
# project_name = context["project"]["name"]
# size += self.main(project_name, data, remove_publish_folder)
# print("Progressing {}/{}".format(count + 1, len(contexts)))
#
# msg = "Total size of files: {}".format(format_file_size(size))
# self.log.info(msg)
# self.message(msg)
#
# except Exception:
# self.log.error("Failed to delete versions.", exc_info=True)
#
#
# class CalculateOldVersions(DeleteOldVersions):
# """Calculate file size of old versions"""
# label = "Calculate Old Versions"
# order = 30
# tool_names = ["library_loader"]
#
# options = [
# qargparse.Integer(
# "versions_to_keep", default=2, min=0, help="Versions to keep:"
# ),
# qargparse.Boolean(
# "remove_publish_folder", help="Remove publish folder:"
# )
# ]
#
# def main(self, project_name, data, remove_publish_folder):
# size = 0
#
# if not data:
# return size
#
# if remove_publish_folder:
# size = self.delete_whole_dir_paths(
# data["dir_paths"].values(), delete=False
# )
# else:
# size = self.delete_only_repre_files(
# data["dir_paths"], data["file_paths_by_dir"], delete=False
# )
#
# return size
import collections
import os
import uuid
import clique
import ayon_api
from ayon_api.operations import OperationsSession
import qargparse
from qtpy import QtWidgets, QtCore
from ayon_core import style
from ayon_core.lib import format_file_size
from ayon_core.pipeline import load, Anatomy
from ayon_core.pipeline.load import (
get_representation_path_with_anatomy,
InvalidRepresentationContext,
)
class DeleteOldVersions(load.ProductLoaderPlugin):
"""Deletes specific number of old version"""
is_multiple_contexts_compatible = True
sequence_splitter = "__sequence_splitter__"
representations = ["*"]
product_types = {"*"}
tool_names = ["library_loader"]
label = "Delete Old Versions"
order = 35
icon = "trash"
color = "#d8d8d8"
options = [
qargparse.Integer(
"versions_to_keep", default=2, min=0, help="Versions to keep:"
),
qargparse.Boolean(
"remove_publish_folder", help="Remove publish folder:"
)
]
def delete_whole_dir_paths(self, dir_paths, delete=True):
size = 0
for dir_path in dir_paths:
# Delete all files and fodlers in dir path
for root, dirs, files in os.walk(dir_path, topdown=False):
for name in files:
file_path = os.path.join(root, name)
size += os.path.getsize(file_path)
if delete:
os.remove(file_path)
self.log.debug("Removed file: {}".format(file_path))
for name in dirs:
if delete:
os.rmdir(os.path.join(root, name))
if not delete:
continue
# Delete even the folder and it's parents folders if they are empty
while True:
if not os.path.exists(dir_path):
dir_path = os.path.dirname(dir_path)
continue
if len(os.listdir(dir_path)) != 0:
break
os.rmdir(os.path.join(dir_path))
return size
def path_from_representation(self, representation, anatomy):
try:
context = representation["context"]
except KeyError:
return (None, None)
try:
path = get_representation_path_with_anatomy(
representation, anatomy
)
except InvalidRepresentationContext:
return (None, None)
sequence_path = None
if "frame" in context:
context["frame"] = self.sequence_splitter
sequence_path = get_representation_path_with_anatomy(
representation, anatomy
)
if sequence_path:
sequence_path = sequence_path.normalized()
return (path.normalized(), sequence_path)
def delete_only_repre_files(self, dir_paths, file_paths, delete=True):
size = 0
for dir_id, dir_path in dir_paths.items():
dir_files = os.listdir(dir_path)
collections, remainders = clique.assemble(dir_files)
for file_path, seq_path in file_paths[dir_id]:
file_path_base = os.path.split(file_path)[1]
# Just remove file if `frame` key was not in context or
# filled path is in remainders (single file sequence)
if not seq_path or file_path_base in remainders:
if not os.path.exists(file_path):
self.log.debug(
"File was not found: {}".format(file_path)
)
continue
size += os.path.getsize(file_path)
if delete:
os.remove(file_path)
self.log.debug("Removed file: {}".format(file_path))
if file_path_base in remainders:
remainders.remove(file_path_base)
continue
seq_path_base = os.path.split(seq_path)[1]
head, tail = seq_path_base.split(self.sequence_splitter)
final_col = None
for collection in collections:
if head != collection.head or tail != collection.tail:
continue
final_col = collection
break
if final_col is not None:
# Fill full path to head
final_col.head = os.path.join(dir_path, final_col.head)
for _file_path in final_col:
if os.path.exists(_file_path):
size += os.path.getsize(_file_path)
if delete:
os.remove(_file_path)
self.log.debug(
"Removed file: {}".format(_file_path)
)
_seq_path = final_col.format("{head}{padding}{tail}")
self.log.debug("Removed files: {}".format(_seq_path))
collections.remove(final_col)
elif os.path.exists(file_path):
size += os.path.getsize(file_path)
if delete:
os.remove(file_path)
self.log.debug("Removed file: {}".format(file_path))
else:
self.log.debug(
"File was not found: {}".format(file_path)
)
# Delete as much as possible parent folders
if not delete:
return size
for dir_path in dir_paths.values():
while True:
if not os.path.exists(dir_path):
dir_path = os.path.dirname(dir_path)
continue
if len(os.listdir(dir_path)) != 0:
break
self.log.debug("Removed folder: {}".format(dir_path))
os.rmdir(dir_path)
return size
def message(self, text):
msgBox = QtWidgets.QMessageBox()
msgBox.setText(text)
msgBox.setStyleSheet(style.load_stylesheet())
msgBox.setWindowFlags(
msgBox.windowFlags() | QtCore.Qt.FramelessWindowHint
)
msgBox.exec_()
def get_data(self, context, versions_count):
product_entity = context["product"]
folder_entity = context["folder"]
project_name = context["project"]["name"]
anatomy = Anatomy(project_name, project_entity=context["project"])
version_fields = ayon_api.get_default_fields_for_type("version")
version_fields.add("tags")
versions = list(ayon_api.get_versions(
project_name,
product_ids=[product_entity["id"]],
active=None,
hero=False,
fields=version_fields
))
self.log.debug(
"Version Number ({})".format(len(versions))
)
versions_by_parent = collections.defaultdict(list)
for ent in versions:
versions_by_parent[ent["productId"]].append(ent)
def sort_func(ent):
return int(ent["version"])
all_last_versions = []
for _parent_id, _versions in versions_by_parent.items():
for idx, version in enumerate(
sorted(_versions, key=sort_func, reverse=True)
):
if idx >= versions_count:
break
all_last_versions.append(version)
self.log.debug("Collected versions ({})".format(len(versions)))
# Filter latest versions
for version in all_last_versions:
versions.remove(version)
# Update versions_by_parent without filtered versions
versions_by_parent = collections.defaultdict(list)
for ent in versions:
versions_by_parent[ent["productId"]].append(ent)
# Filter already deleted versions
versions_to_pop = []
for version in versions:
if "deleted" in version["tags"]:
versions_to_pop.append(version)
for version in versions_to_pop:
msg = "Folder: \"{}\" | Product: \"{}\" | Version: \"{}\"".format(
folder_entity["path"],
product_entity["name"],
version["version"]
)
self.log.debug((
"Skipping version. Already tagged as inactive. < {} >"
).format(msg))
versions.remove(version)
version_ids = [ent["id"] for ent in versions]
self.log.debug(
"Filtered versions to delete ({})".format(len(version_ids))
)
if not version_ids:
msg = "Skipping processing. Nothing to delete on {}/{}".format(
folder_entity["path"], product_entity["name"]
)
self.log.info(msg)
print(msg)
return
repres = list(ayon_api.get_representations(
project_name, version_ids=version_ids
))
self.log.debug(
"Collected representations to remove ({})".format(len(repres))
)
dir_paths = {}
file_paths_by_dir = collections.defaultdict(list)
for repre in repres:
file_path, seq_path = self.path_from_representation(
repre, anatomy
)
if file_path is None:
self.log.debug((
"Could not format path for represenation \"{}\""
).format(str(repre)))
continue
dir_path = os.path.dirname(file_path)
dir_id = None
for _dir_id, _dir_path in dir_paths.items():
if _dir_path == dir_path:
dir_id = _dir_id
break
if dir_id is None:
dir_id = uuid.uuid4()
dir_paths[dir_id] = dir_path
file_paths_by_dir[dir_id].append([file_path, seq_path])
dir_ids_to_pop = []
for dir_id, dir_path in dir_paths.items():
if os.path.exists(dir_path):
continue
dir_ids_to_pop.append(dir_id)
# Pop dirs from both dictionaries
for dir_id in dir_ids_to_pop:
dir_paths.pop(dir_id)
paths = file_paths_by_dir.pop(dir_id)
# TODO report of missing directories?
paths_msg = ", ".join([
"'{}'".format(path[0].replace("\\", "/")) for path in paths
])
self.log.debug((
"Folder does not exist. Deleting its files skipped: {}"
).format(paths_msg))
return {
"dir_paths": dir_paths,
"file_paths_by_dir": file_paths_by_dir,
"versions": versions,
"folder": folder_entity,
"product": product_entity,
"archive_product": versions_count == 0
}
def main(self, project_name, data, remove_publish_folder):
# Size of files.
size = 0
if not data:
return size
if remove_publish_folder:
size = self.delete_whole_dir_paths(data["dir_paths"].values())
else:
size = self.delete_only_repre_files(
data["dir_paths"], data["file_paths_by_dir"]
)
op_session = OperationsSession()
for version in data["versions"]:
orig_version_tags = version["tags"]
version_tags = list(orig_version_tags)
changes = {}
if "deleted" not in version_tags:
version_tags.append("deleted")
changes["tags"] = version_tags
if version["active"]:
changes["active"] = False
if not changes:
continue
op_session.update_entity(
project_name, "version", version["id"], changes
)
op_session.commit()
return size
def load(self, contexts, name=None, namespace=None, options=None):
try:
size = 0
for count, context in enumerate(contexts):
versions_to_keep = 2
remove_publish_folder = False
if options:
versions_to_keep = options.get(
"versions_to_keep", versions_to_keep
)
remove_publish_folder = options.get(
"remove_publish_folder", remove_publish_folder
)
data = self.get_data(context, versions_to_keep)
if not data:
continue
project_name = context["project"]["name"]
size += self.main(project_name, data, remove_publish_folder)
print("Progressing {}/{}".format(count + 1, len(contexts)))
msg = "Total size of files: {}".format(format_file_size(size))
self.log.info(msg)
self.message(msg)
except Exception:
self.log.error("Failed to delete versions.", exc_info=True)
class CalculateOldVersions(DeleteOldVersions):
"""Calculate file size of old versions"""
label = "Calculate Old Versions"
order = 30
tool_names = ["library_loader"]
options = [
qargparse.Integer(
"versions_to_keep", default=2, min=0, help="Versions to keep:"
),
qargparse.Boolean(
"remove_publish_folder", help="Remove publish folder:"
)
]
def main(self, project_name, data, remove_publish_folder):
size = 0
if not data:
return size
if remove_publish_folder:
size = self.delete_whole_dir_paths(
data["dir_paths"].values(), delete=False
)
else:
size = self.delete_only_repre_files(
data["dir_paths"], data["file_paths_by_dir"], delete=False
)
return size

View file

@ -167,7 +167,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
"uasset",
"blendScene",
"yeticacheUE",
"tycache"
"tycache",
"csv_ingest_file",
]
default_template_name = "publish"

View file

@ -8,12 +8,12 @@ from ayon_core.tools.utils.dialogs import show_message_dialog
def open_template_ui(builder, main_window):
"""Open template from `builder`
Asks user about overwriting current scene and feedsback exceptions.
Asks user about overwriting current scene and feedback exceptions.
"""
result = QtWidgets.QMessageBox.question(
main_window,
"Opening template",
"Caution! You will loose unsaved changes.\nDo you want to continue?",
"Caution! You will lose unsaved changes.\nDo you want to continue?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
)
if result == QtWidgets.QMessageBox.Yes:

View file

@ -20,6 +20,8 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel):
controller (AbstractWorkfilesFrontend): The control object.
"""
refreshed = QtCore.Signal()
def __init__(self, controller):
super(WorkAreaFilesModel, self).__init__()
@ -163,6 +165,12 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel):
self._fill_items()
def _fill_items(self):
try:
self._fill_items_impl()
finally:
self.refreshed.emit()
def _fill_items_impl(self):
folder_id = self._selected_folder_id
task_id = self._selected_task_id
if not folder_id or not task_id:
@ -285,6 +293,7 @@ class WorkAreaFilesWidget(QtWidgets.QWidget):
selection_model.selectionChanged.connect(self._on_selection_change)
view.double_clicked.connect(self._on_mouse_double_click)
view.customContextMenuRequested.connect(self._on_context_menu)
model.refreshed.connect(self._on_model_refresh)
controller.register_event_callback(
"expected_selection_changed",
@ -298,6 +307,7 @@ class WorkAreaFilesWidget(QtWidgets.QWidget):
self._controller = controller
self._published_mode = False
self._change_selection_on_refresh = True
def set_published_mode(self, published_mode):
"""Set the published mode.
@ -379,7 +389,9 @@ class WorkAreaFilesWidget(QtWidgets.QWidget):
if not workfile_info["current"]:
return
self._change_selection_on_refresh = False
self._model.refresh()
self._change_selection_on_refresh = True
workfile_name = workfile_info["name"]
if (
@ -394,3 +406,30 @@ class WorkAreaFilesWidget(QtWidgets.QWidget):
self._controller.expected_workfile_selected(
event["folder"]["id"], event["task"]["name"], workfile_name
)
def _on_model_refresh(self):
if (
not self._change_selection_on_refresh
or self._proxy_model.rowCount() < 1
):
return
# Find the row with latest date modified
latest_index = max(
(
self._proxy_model.index(idx, 0)
for idx in range(self._proxy_model.rowCount())
),
key=lambda model_index: model_index.data(DATE_MODIFIED_ROLE)
)
# Select row of latest modified
selection_model = self._view.selectionModel()
selection_model.select(
latest_index,
(
QtCore.QItemSelectionModel.ClearAndSelect
| QtCore.QItemSelectionModel.Current
| QtCore.QItemSelectionModel.Rows
)
)

View file

@ -118,11 +118,11 @@ class WorkfilesToolWindow(QtWidgets.QWidget):
overlay_invalid_host = InvalidHostOverlay(self)
overlay_invalid_host.setVisible(False)
first_show_timer = QtCore.QTimer()
first_show_timer.setSingleShot(True)
first_show_timer.setInterval(50)
show_timer = QtCore.QTimer()
show_timer.setSingleShot(True)
show_timer.setInterval(50)
first_show_timer.timeout.connect(self._on_first_show)
show_timer.timeout.connect(self._on_show)
controller.register_event_callback(
"save_as.finished",
@ -159,7 +159,7 @@ class WorkfilesToolWindow(QtWidgets.QWidget):
self._tasks_widget = tasks_widget
self._side_panel = side_panel
self._first_show_timer = first_show_timer
self._show_timer = show_timer
self._post_init()
@ -287,9 +287,9 @@ class WorkfilesToolWindow(QtWidgets.QWidget):
def showEvent(self, event):
super(WorkfilesToolWindow, self).showEvent(event)
self._show_timer.start()
if self._first_show:
self._first_show = False
self._first_show_timer.start()
self.setStyleSheet(style.load_stylesheet())
def keyPressEvent(self, event):
@ -303,9 +303,8 @@ class WorkfilesToolWindow(QtWidgets.QWidget):
pass
def _on_first_show(self):
if not self._controller_refreshed:
self.refresh()
def _on_show(self):
self.refresh()
def _on_file_text_filter_change(self, text):
self._files_widget.set_text_filter(text)

View file

@ -0,0 +1,3 @@
name = "aftereffects"
title = "AfterEffects"
version = "0.1.3"

View file

@ -1,14 +1,9 @@
from ayon_server.addons import BaseServerAddon
from .settings import AfterEffectsSettings, DEFAULT_AFTEREFFECTS_SETTING
from .version import __version__
class AfterEffects(BaseServerAddon):
name = "aftereffects"
title = "AfterEffects"
version = __version__
settings_model = AfterEffectsSettings
async def get_default_settings(self):

View file

@ -1,3 +0,0 @@
# -*- coding: utf-8 -*-
"""Package declaring addon version."""
__version__ = "0.1.3"

View file

@ -1,3 +1,10 @@
name = "applications"
title = "Applications"
version = "0.2.1"
ayon_server_version = ">=1.0.7"
ayon_launcher_version = ">=1.0.2"
ayon_required_addons = {
"core": ">0.3.0",
}
ayon_compatible_addons = {}

View file

@ -0,0 +1,3 @@
name = "blender"
title = "Blender"
version = "0.1.8"

View file

@ -2,17 +2,11 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
from .version import __version__
from .settings import BlenderSettings, DEFAULT_VALUES
class BlenderAddon(BaseServerAddon):
name = "blender"
title = "Blender"
version = __version__
settings_model: Type[BlenderSettings] = BlenderSettings
frontend_scopes = {}
services = {}
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()

View file

@ -1 +0,0 @@
__version__ = "0.1.8"

View file

@ -0,0 +1,3 @@
name = "celaction"
title = "CelAction"
version = "0.1.0"

View file

@ -2,17 +2,11 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
from .version import __version__
from .settings import CelActionSettings, DEFAULT_VALUES
class CelActionAddon(BaseServerAddon):
name = "celaction"
title = "CelAction"
version = __version__
settings_model: Type[CelActionSettings] = CelActionSettings
frontend_scopes = {}
services = {}
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()

View file

@ -1 +0,0 @@
__version__ = "0.1.0"

View file

@ -0,0 +1,3 @@
name = "clockify"
title = "Clockify"
version = "0.1.1"

View file

@ -2,14 +2,8 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
from .version import __version__
from .settings import ClockifySettings
class ClockifyAddon(BaseServerAddon):
name = "clockify"
title = "Clockify"
version = __version__
settings_model: Type[ClockifySettings] = ClockifySettings
frontend_scopes = {}
services = {}

View file

@ -1 +0,0 @@
__version__ = "0.1.1"

View file

@ -5,7 +5,7 @@ import shutil
import argparse
import zipfile
import types
import importlib
import importlib.machinery
import platform
import collections
from pathlib import Path
@ -245,12 +245,8 @@ def create_addon_package(
keep_source: bool,
):
src_package_py = addon_dir / "package.py"
package = None
if src_package_py.exists():
package = import_filepath(src_package_py)
addon_version = package.version
else:
addon_version = get_addon_version(addon_dir)
package = import_filepath(src_package_py)
addon_version = package.version
addon_output_dir = output_dir / addon_dir.name / addon_version
if addon_output_dir.exists():
@ -259,18 +255,7 @@ def create_addon_package(
# Copy server content
dst_package_py = addon_output_dir / "package.py"
if package is not None:
shutil.copy(src_package_py, dst_package_py)
else:
addon_name = addon_dir.name
if addon_name == "royal_render":
addon_name = "royalrender"
package_py_content = PACKAGE_PY_TEMPLATE.format(
addon_name=addon_name, addon_version=addon_version
)
with open(dst_package_py, "w+") as pkg_py:
pkg_py.write(package_py_content)
shutil.copy(src_package_py, dst_package_py)
server_dir = addon_dir / "server"
shutil.copytree(

View file

@ -0,0 +1,3 @@
name = "deadline"
title = "Deadline"
version = "0.1.10"

View file

@ -2,14 +2,10 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
from .version import __version__
from .settings import DeadlineSettings, DEFAULT_VALUES
class Deadline(BaseServerAddon):
name = "deadline"
title = "Deadline"
version = __version__
settings_model: Type[DeadlineSettings] = DeadlineSettings
async def get_default_settings(self):

View file

@ -22,7 +22,7 @@ class ServerListSubmodel(BaseSettingsModel):
async def defined_deadline_ws_name_enum_resolver(
addon: BaseServerAddon,
addon: "BaseServerAddon",
settings_variant: str = "production",
project_name: str | None = None,
) -> list[str]:

View file

@ -1 +0,0 @@
__version__ = "0.1.10"

View file

@ -0,0 +1,3 @@
name = "flame"
title = "Flame"
version = "0.1.0"

View file

@ -2,17 +2,11 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
from .version import __version__
from .settings import FlameSettings, DEFAULT_VALUES
class FlameAddon(BaseServerAddon):
name = "flame"
title = "Flame"
version = __version__
settings_model: Type[FlameSettings] = FlameSettings
frontend_scopes = {}
services = {}
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()

View file

@ -1 +0,0 @@
__version__ = "0.1.0"

View file

@ -0,0 +1,3 @@
name = "fusion"
title = "Fusion"
version = "0.1.5"

View file

@ -2,17 +2,11 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
from .version import __version__
from .settings import FusionSettings, DEFAULT_VALUES
class FusionAddon(BaseServerAddon):
name = "fusion"
title = "Fusion"
version = __version__
settings_model: Type[FusionSettings] = FusionSettings
frontend_scopes = {}
services = {}
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()

View file

@ -1 +0,0 @@
__version__ = "0.1.5"

View file

@ -0,0 +1,3 @@
name = "harmony"
title = "Harmony"
version = "0.1.2"

View file

@ -1,14 +1,9 @@
from ayon_server.addons import BaseServerAddon
from .settings import HarmonySettings, DEFAULT_HARMONY_SETTING
from .version import __version__
class Harmony(BaseServerAddon):
name = "harmony"
title = "Harmony"
version = __version__
settings_model = HarmonySettings
async def get_default_settings(self):

View file

@ -1,3 +0,0 @@
# -*- coding: utf-8 -*-
"""Package declaring addon version."""
__version__ = "0.1.2"

View file

@ -0,0 +1,3 @@
name = "hiero"
title = "Hiero"
version = "0.1.2"

View file

@ -2,17 +2,11 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
from .version import __version__
from .settings import HieroSettings, DEFAULT_VALUES
class HieroAddon(BaseServerAddon):
name = "hiero"
title = "Hiero"
version = __version__
settings_model: Type[HieroSettings] = HieroSettings
frontend_scopes = {}
services = {}
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()

View file

@ -1 +0,0 @@
__version__ = "0.1.2"

View file

@ -0,0 +1,3 @@
name = "houdini"
title = "Houdini"
version = "0.2.13"

View file

@ -2,14 +2,10 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
from .version import __version__
from .settings import HoudiniSettings, DEFAULT_VALUES
class Houdini(BaseServerAddon):
name = "houdini"
title = "Houdini"
version = __version__
settings_model: Type[HoudiniSettings] = HoudiniSettings
async def get_default_settings(self):

View file

@ -34,6 +34,34 @@ class ImageIOFileRulesModel(BaseSettingsModel):
return value
class WorkfileImageIOModel(BaseSettingsModel):
"""Workfile settings help.
Empty values will be skipped, allowing any existing env vars to
pass through as defined.
Note: The render space in Houdini is
always set to the 'scene_linear' role."""
enabled: bool = SettingsField(False, title="Enabled")
default_display: str = SettingsField(
title="Default active displays",
description="It behaves like the 'OCIO_ACTIVE_DISPLAYS' env var,"
" Colon-separated list of displays, e.g ACES:P3"
)
default_view: str = SettingsField(
title="Default active views",
description="It behaves like the 'OCIO_ACTIVE_VIEWS' env var,"
" Colon-separated list of views, e.g sRGB:DCDM"
)
review_color_space: str = SettingsField(
title="Review colorspace",
description="It exposes OCIO Colorspace parameter in opengl nodes."
"if left empty, Ayon will figure out the default "
"colorspace using your default display and default view."
)
class HoudiniImageIOModel(BaseSettingsModel):
activate_host_color_management: bool = SettingsField(
True, title="Enable Color Management"
@ -46,3 +74,26 @@ class HoudiniImageIOModel(BaseSettingsModel):
default_factory=ImageIOFileRulesModel,
title="File Rules"
)
workfile: WorkfileImageIOModel = SettingsField(
default_factory=WorkfileImageIOModel,
title="Workfile"
)
DEFAULT_IMAGEIO_SETTINGS = {
"activate_host_color_management": False,
"ocio_config": {
"override_global_config": False,
"filepath": []
},
"file_rules": {
"activate_host_rules": False,
"rules": []
},
"workfile": {
"enabled": False,
"default_display": "ACES",
"default_view": "sRGB",
"review_color_space": ""
}
}

View file

@ -3,7 +3,10 @@ from .general import (
GeneralSettingsModel,
DEFAULT_GENERAL_SETTINGS
)
from .imageio import HoudiniImageIOModel
from .imageio import (
HoudiniImageIOModel,
DEFAULT_IMAGEIO_SETTINGS
)
from .shelves import ShelvesModel
from .create import (
CreatePluginsModel,
@ -40,6 +43,7 @@ class HoudiniSettings(BaseSettingsModel):
DEFAULT_VALUES = {
"general": DEFAULT_GENERAL_SETTINGS,
"imageio": DEFAULT_IMAGEIO_SETTINGS,
"shelves": [],
"create": DEFAULT_HOUDINI_CREATE_SETTINGS,
"publish": DEFAULT_HOUDINI_PUBLISH_SETTINGS

View file

@ -1 +0,0 @@
__version__ = "0.2.12"

View file

@ -0,0 +1,3 @@
name = "max"
title = "Max"
version = "0.1.7"

View file

@ -2,14 +2,10 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
from .version import __version__
from .settings import MaxSettings, DEFAULT_VALUES
class MaxAddon(BaseServerAddon):
name = "max"
title = "Max"
version = __version__
settings_model: Type[MaxSettings] = MaxSettings
async def get_default_settings(self):

View file

@ -1 +0,0 @@
__version__ = "0.1.7"

View file

@ -0,0 +1,3 @@
name = "maya"
title = "Maya"
version = "0.1.16"

View file

@ -2,13 +2,9 @@
from ayon_server.addons import BaseServerAddon
from .settings.main import MayaSettings, DEFAULT_MAYA_SETTING
from .version import __version__
class MayaAddon(BaseServerAddon):
name = "maya"
title = "Maya"
version = __version__
settings_model = MayaSettings
async def get_default_settings(self):

View file

@ -103,6 +103,17 @@ class ImportLoaderModel(BaseSettingsModel):
group_name: str = SettingsField(title="Group name")
class YetiRigLoaderModel(LoaderEnabledModel):
create_cache_instance_on_load: bool = SettingsField(
title="Create Yeti Cache instance on load",
description=(
"When enabled, upon loading a Yeti Rig product a new Yeti cache "
"instance is automatically created as preparation to publishing "
"the output directly."
)
)
class LoadersModel(BaseSettingsModel):
colors: ColorsSetting = SettingsField(
default_factory=ColorsSetting,
@ -195,8 +206,8 @@ class LoadersModel(BaseSettingsModel):
default_factory=LoaderEnabledModel,
title="Yeti Cache Loader"
)
YetiRigLoader: LoaderEnabledModel = SettingsField(
default_factory=LoaderEnabledModel,
YetiRigLoader: YetiRigLoaderModel = SettingsField(
default_factory=YetiRigLoaderModel,
title="Yeti Rig Loader"
)
@ -266,5 +277,8 @@ DEFAULT_LOADERS_SETTING = {
"VRaySceneLoader": {"enabled": True},
"XgenLoader": {"enabled": True},
"YetiCacheLoader": {"enabled": True},
"YetiRigLoader": {"enabled": True},
"YetiRigLoader": {
"enabled": True,
"create_cache_instance_on_load": True
},
}

View file

@ -1,3 +0,0 @@
# -*- coding: utf-8 -*-
"""Package declaring addon version."""
__version__ = "0.1.15"

View file

@ -0,0 +1,3 @@
name = "nuke"
title = "Nuke"
version = "0.1.10"

View file

@ -2,14 +2,10 @@ from typing import Type
from ayon_server.addons import BaseServerAddon
from .version import __version__
from .settings import NukeSettings, DEFAULT_VALUES
class NukeAddon(BaseServerAddon):
name = "nuke"
title = "Nuke"
version = __version__
settings_model: Type[NukeSettings] = NukeSettings
async def get_default_settings(self):

View file

@ -1 +0,0 @@
__version__ = "0.1.10"

View file

@ -0,0 +1,3 @@
name = "photoshop"
title = "Photoshop"
version = "0.1.2"

View file

@ -1,14 +1,9 @@
from ayon_server.addons import BaseServerAddon
from .settings import PhotoshopSettings, DEFAULT_PHOTOSHOP_SETTING
from .version import __version__
class Photoshop(BaseServerAddon):
name = "photoshop"
title = "Photoshop"
version = __version__
settings_model = PhotoshopSettings
async def get_default_settings(self):

Some files were not shown because too many files have changed in this diff Show more