[Automated] Merged develop into main

This commit is contained in:
ynbot 2023-11-07 14:46:23 +01:00 committed by GitHub
commit 0ede10886c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
79 changed files with 748 additions and 455 deletions

View file

@ -35,6 +35,7 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
- 3.17.5-nightly.3
- 3.17.5-nightly.2
- 3.17.5-nightly.1
- 3.17.4
@ -134,7 +135,6 @@ body:
- 3.15.1-nightly.5
- 3.15.1-nightly.4
- 3.15.1-nightly.3
- 3.15.1-nightly.2
validations:
required: true
- type: dropdown

View file

@ -282,6 +282,9 @@ def run(script):
"--app_variant",
help="Provide specific app variant for test, empty for latest",
default=None)
@click.option("--app_group",
help="Provide specific app group for test, empty for default",
default=None)
@click.option("-t",
"--timeout",
help="Provide specific timeout value for test case",
@ -294,11 +297,11 @@ def run(script):
help="MongoDB for testing.",
default=None)
def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant,
timeout, setup_only, mongo_url):
timeout, setup_only, mongo_url, app_group):
"""Run all automatic tests after proper initialization via start.py"""
PypeCommands().run_tests(folder, mark, pyargs, test_data_folder,
persist, app_variant, timeout, setup_only,
mongo_url)
mongo_url, app_group)
@main.command(help="DEPRECATED - run sync server")

View file

@ -74,11 +74,6 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
log.info(PUBLISH_PATH)
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled
)
register_event_callback("application.launched", application_launch)
@ -186,11 +181,6 @@ def application_launch():
check_inventory()
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle layer visibility on instance toggles."""
instance[0].Visible = new_value
def ls():
"""Yields containers from active AfterEffects document.

View file

@ -173,6 +173,7 @@ def install():
os.remove(filepath)
icon = get_openpype_icon_filepath()
tab_menu_label = os.environ.get("AVALON_LABEL") or "AYON"
# Create context only to get creator plugins, so we don't reset and only
# populate what we need to retrieve the list of creator plugins
@ -197,14 +198,14 @@ def install():
if not network_categories:
continue
key = "openpype_create.{}".format(identifier)
key = "ayon_create.{}".format(identifier)
log.debug(f"Registering {key}")
script = CREATE_SCRIPT.format(identifier=identifier)
data = {
"script": script,
"language": hou.scriptLanguage.Python,
"icon": icon,
"help": "Create OpenPype publish instance for {}".format(
"help": "Create Ayon publish instance for {}".format(
creator.label
),
"help_url": None,
@ -213,7 +214,7 @@ def install():
"cop_viewer_categories": [],
"network_op_type": None,
"viewer_op_type": None,
"locations": ["OpenPype"]
"locations": [tab_menu_label]
}
label = "Create {}".format(creator.label)
tool = hou.shelves.tool(key)

View file

@ -569,9 +569,9 @@ def get_template_from_value(key, value):
return parm
def get_frame_data(node, handle_start=0, handle_end=0, log=None):
"""Get the frame data: start frame, end frame, steps,
start frame with start handle and end frame with end handle.
def get_frame_data(node, log=None):
"""Get the frame data: `frameStartHandle`, `frameEndHandle`
and `byFrameStep`.
This function uses Houdini node's `trange`, `t1, `t2` and `t3`
parameters as the source of truth for the full inclusive frame
@ -579,20 +579,17 @@ def get_frame_data(node, handle_start=0, handle_end=0, log=None):
range including the handles.
The non-inclusive frame start and frame end without handles
are computed by subtracting the handles from the inclusive
can be computed by subtracting the handles from the inclusive
frame range.
Args:
node (hou.Node): ROP node to retrieve frame range from,
the frame range is assumed to be the frame range
*including* the start and end handles.
handle_start (int): Start handles.
handle_end (int): End handles.
log (logging.Logger): Logger to log to.
Returns:
dict: frame data for start, end, steps,
start with handle and end with handle
dict: frame data for `frameStartHandle`, `frameEndHandle`
and `byFrameStep`.
"""
@ -623,11 +620,6 @@ def get_frame_data(node, handle_start=0, handle_end=0, log=None):
data["frameEndHandle"] = int(node.evalParm("f2"))
data["byFrameStep"] = node.evalParm("f3")
data["handleStart"] = handle_start
data["handleEnd"] = handle_end
data["frameStart"] = data["frameStartHandle"] + data["handleStart"]
data["frameEnd"] = data["frameEndHandle"] - data["handleEnd"]
return data
@ -1018,7 +1010,7 @@ def self_publish():
def add_self_publish_button(node):
"""Adds a self publish button to the rop node."""
label = os.environ.get("AVALON_LABEL") or "OpenPype"
label = os.environ.get("AVALON_LABEL") or "AYON"
button_parm = hou.ButtonParmTemplate(
"ayon_self_publish",

View file

@ -3,7 +3,6 @@
import os
import sys
import logging
import contextlib
import hou # noqa
@ -66,10 +65,6 @@ class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
register_event_callback("open", on_open)
register_event_callback("new", on_new)
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled
)
self._has_been_setup = True
# add houdini vendor packages
hou_pythonpath = os.path.join(HOUDINI_HOST_DIR, "vendor")
@ -406,54 +401,3 @@ def _set_context_settings():
lib.reset_framerange()
lib.update_houdini_vars_context()
def on_pyblish_instance_toggled(instance, new_value, old_value):
"""Toggle saver tool passthrough states on instance toggles."""
@contextlib.contextmanager
def main_take(no_update=True):
"""Enter root take during context"""
original_take = hou.takes.currentTake()
original_update_mode = hou.updateModeSetting()
root = hou.takes.rootTake()
has_changed = False
try:
if original_take != root:
has_changed = True
if no_update:
hou.setUpdateMode(hou.updateMode.Manual)
hou.takes.setCurrentTake(root)
yield
finally:
if has_changed:
if no_update:
hou.setUpdateMode(original_update_mode)
hou.takes.setCurrentTake(original_take)
if not instance.data.get("_allowToggleBypass", True):
return
nodes = instance[:]
if not nodes:
return
# Assume instance node is first node
instance_node = nodes[0]
if not hasattr(instance_node, "isBypassed"):
# Likely not a node that can actually be bypassed
log.debug("Can't bypass node: %s", instance_node.path())
return
if instance_node.isBypassed() != (not old_value):
print("%s old bypass state didn't match old instance state, "
"updating anyway.." % instance_node.path())
try:
# Go into the main take, because when in another take changing
# the bypass state of a note cannot be done due to it being locked
# by default.
with main_take(no_update=True):
instance_node.bypass(not new_value)
except hou.PermissionError as exc:
log.warning("%s - %s", instance_node.path(), exc)

View file

@ -3,6 +3,7 @@
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance, CreatorError
from openpype.lib import EnumDef
import hou
class CreateBGEO(plugin.HoudiniCreator):
@ -13,7 +14,6 @@ class CreateBGEO(plugin.HoudiniCreator):
icon = "gears"
def create(self, subset_name, instance_data, pre_create_data):
import hou
instance_data.pop("active", None)
@ -90,3 +90,9 @@ class CreateBGEO(plugin.HoudiniCreator):
return attrs + [
EnumDef("bgeo_type", bgeo_enum, label="BGEO Options"),
]
def get_network_categories(self):
return [
hou.ropNodeTypeCategory(),
hou.sopNodeTypeCategory()
]

View file

@ -45,6 +45,11 @@ class CreateCompositeSequence(plugin.HoudiniCreator):
instance_node.setParms(parms)
# Manually set f1 & f2 to $FSTART and $FEND respectively
# to match other Houdini nodes default.
instance_node.parm("f1").setExpression("$FSTART")
instance_node.parm("f2").setExpression("$FEND")
# Lock any parameters in this list
to_lock = ["prim_to_detail_pattern"]
self.lock_parameters(instance_node, to_lock)

View file

@ -5,6 +5,7 @@ from openpype.client import (
get_subsets,
)
from openpype.hosts.houdini.api import plugin
import hou
class CreateHDA(plugin.HoudiniCreator):
@ -35,7 +36,6 @@ class CreateHDA(plugin.HoudiniCreator):
def create_instance_node(
self, node_name, parent, node_type="geometry"):
import hou
parent_node = hou.node("/obj")
if self.selected_nodes:
@ -81,3 +81,8 @@ class CreateHDA(plugin.HoudiniCreator):
pre_create_data) # type: plugin.CreatedInstance
return instance
def get_network_categories(self):
return [
hou.objNodeTypeCategory()
]

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating Redshift proxies."""
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
import hou
class CreateRedshiftProxy(plugin.HoudiniCreator):
@ -12,7 +12,7 @@ class CreateRedshiftProxy(plugin.HoudiniCreator):
icon = "magic"
def create(self, subset_name, instance_data, pre_create_data):
import hou # noqa
# Remove the active, we are checking the bypass flag of the nodes
instance_data.pop("active", None)
@ -28,7 +28,7 @@ class CreateRedshiftProxy(plugin.HoudiniCreator):
instance = super(CreateRedshiftProxy, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
pre_create_data)
instance_node = hou.node(instance.get("instance_node"))
@ -44,3 +44,9 @@ class CreateRedshiftProxy(plugin.HoudiniCreator):
# Lock some Avalon attributes
to_lock = ["family", "id", "prim_to_detail_pattern"]
self.lock_parameters(instance_node, to_lock)
def get_network_categories(self):
return [
hou.ropNodeTypeCategory(),
hou.sopNodeTypeCategory()
]

View file

@ -54,6 +54,7 @@ class CreateStaticMesh(plugin.HoudiniCreator):
def get_network_categories(self):
return [
hou.ropNodeTypeCategory(),
hou.objNodeTypeCategory(),
hou.sopNodeTypeCategory()
]

View file

@ -40,6 +40,7 @@ class CreateVDBCache(plugin.HoudiniCreator):
def get_network_categories(self):
return [
hou.ropNodeTypeCategory(),
hou.objNodeTypeCategory(),
hou.sopNodeTypeCategory()
]

View file

@ -119,7 +119,8 @@ class ImageLoader(load.LoaderPlugin):
if not parent.children():
parent.destroy()
def _get_file_sequence(self, root):
def _get_file_sequence(self, file_path):
root = os.path.dirname(file_path)
files = sorted(os.listdir(root))
first_fname = files[0]

View file

@ -21,8 +21,8 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
label = "Arnold ROP Render Products"
# This specific order value is used so that
# this plugin runs after CollectRopFrameRange
order = pyblish.api.CollectorOrder + 0.4999
# this plugin runs after CollectFrames
order = pyblish.api.CollectorOrder + 0.11
hosts = ["houdini"]
families = ["arnold_rop"]

View file

@ -0,0 +1,124 @@
# -*- coding: utf-8 -*-
"""Collector plugin for frames data on ROP instances."""
import hou # noqa
import pyblish.api
from openpype.lib import BoolDef
from openpype.pipeline import OpenPypePyblishPluginMixin
class CollectAssetHandles(pyblish.api.InstancePlugin,
OpenPypePyblishPluginMixin):
"""Apply asset handles.
If instance does not have:
- frameStart
- frameEnd
- handleStart
- handleEnd
But it does have:
- frameStartHandle
- frameEndHandle
Then we will retrieve the asset's handles to compute
the exclusive frame range and actual handle ranges.
"""
hosts = ["houdini"]
# This specific order value is used so that
# this plugin runs after CollectAnatomyInstanceData
order = pyblish.api.CollectorOrder + 0.499
label = "Collect Asset Handles"
use_asset_handles = True
def process(self, instance):
# Only process instances without already existing handles data
# but that do have frameStartHandle and frameEndHandle defined
# like the data collected from CollectRopFrameRange
if "frameStartHandle" not in instance.data:
return
if "frameEndHandle" not in instance.data:
return
has_existing_data = {
"handleStart",
"handleEnd",
"frameStart",
"frameEnd"
}.issubset(instance.data)
if has_existing_data:
return
attr_values = self.get_attr_values_from_data(instance.data)
if attr_values.get("use_handles", self.use_asset_handles):
asset_data = instance.data["assetEntity"]["data"]
handle_start = asset_data.get("handleStart", 0)
handle_end = asset_data.get("handleEnd", 0)
else:
handle_start = 0
handle_end = 0
frame_start = instance.data["frameStartHandle"] + handle_start
frame_end = instance.data["frameEndHandle"] - handle_end
instance.data.update({
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": frame_start,
"frameEnd": frame_end
})
# Log debug message about the collected frame range
if attr_values.get("use_handles", self.use_asset_handles):
self.log.debug(
"Full Frame range with Handles "
"[{frame_start_handle} - {frame_end_handle}]"
.format(
frame_start_handle=instance.data["frameStartHandle"],
frame_end_handle=instance.data["frameEndHandle"]
)
)
else:
self.log.debug(
"Use handles is deactivated for this instance, "
"start and end handles are set to 0."
)
# Log collected frame range to the user
message = "Frame range [{frame_start} - {frame_end}]".format(
frame_start=frame_start,
frame_end=frame_end
)
if handle_start or handle_end:
message += " with handles [{handle_start}]-[{handle_end}]".format(
handle_start=handle_start,
handle_end=handle_end
)
self.log.info(message)
if instance.data.get("byFrameStep", 1.0) != 1.0:
self.log.info(
"Frame steps {}".format(instance.data["byFrameStep"]))
# Add frame range to label if the instance has a frame range.
label = instance.data.get("label", instance.data["name"])
instance.data["label"] = (
"{label} [{frame_start_handle} - {frame_end_handle}]"
.format(
label=label,
frame_start_handle=instance.data["frameStartHandle"],
frame_end_handle=instance.data["frameEndHandle"]
)
)
@classmethod
def get_attribute_defs(cls):
return [
BoolDef("use_handles",
tooltip="Disable this if you want the publisher to"
" ignore start and end handles specified in the"
" asset data for this publish instance",
default=cls.use_asset_handles,
label="Use asset handles")
]

View file

@ -11,7 +11,9 @@ from openpype.hosts.houdini.api import lib
class CollectFrames(pyblish.api.InstancePlugin):
"""Collect all frames which would be saved from the ROP nodes"""
order = pyblish.api.CollectorOrder + 0.01
# This specific order value is used so that
# this plugin runs after CollectRopFrameRange
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Frames"
families = ["vdbcache", "imagesequence", "ass",
"redshiftproxy", "review", "bgeo"]
@ -20,8 +22,8 @@ class CollectFrames(pyblish.api.InstancePlugin):
ropnode = hou.node(instance.data["instance_node"])
start_frame = instance.data.get("frameStart", None)
end_frame = instance.data.get("frameEnd", None)
start_frame = instance.data.get("frameStartHandle", None)
end_frame = instance.data.get("frameEndHandle", None)
output_parm = lib.get_output_parameter(ropnode)
if start_frame is not None:

View file

@ -122,10 +122,6 @@ class CollectInstancesUsdLayered(pyblish.api.ContextPlugin):
instance.data.update(save_data)
instance.data["usdLayer"] = layer
# Don't allow the Pyblish `instanceToggled` we have installed
# to set this node to bypass.
instance.data["_allowToggleBypass"] = False
instances.append(instance)
# Store the collected ROP node dependencies

View file

@ -25,8 +25,8 @@ class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin):
label = "Karma ROP Render Products"
# This specific order value is used so that
# this plugin runs after CollectRopFrameRange
order = pyblish.api.CollectorOrder + 0.4999
# this plugin runs after CollectFrames
order = pyblish.api.CollectorOrder + 0.11
hosts = ["houdini"]
families = ["karma_rop"]

View file

@ -25,8 +25,8 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
label = "Mantra ROP Render Products"
# This specific order value is used so that
# this plugin runs after CollectRopFrameRange
order = pyblish.api.CollectorOrder + 0.4999
# this plugin runs after CollectFrames
order = pyblish.api.CollectorOrder + 0.11
hosts = ["houdini"]
families = ["mantra_rop"]

View file

@ -25,8 +25,8 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
label = "Redshift ROP Render Products"
# This specific order value is used so that
# this plugin runs after CollectRopFrameRange
order = pyblish.api.CollectorOrder + 0.4999
# this plugin runs after CollectFrames
order = pyblish.api.CollectorOrder + 0.11
hosts = ["houdini"]
families = ["redshift_rop"]

View file

@ -6,6 +6,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
"""Collect Review Data."""
label = "Collect Review Data"
# This specific order value is used so that
# this plugin runs after CollectRopFrameRange
order = pyblish.api.CollectorOrder + 0.1
hosts = ["houdini"]
families = ["review"]
@ -41,8 +43,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
return
if focal_length_parm.isTimeDependent():
start = instance.data["frameStart"]
end = instance.data["frameEnd"] + 1
start = instance.data["frameStartHandle"]
end = instance.data["frameEndHandle"] + 1
focal_length = [
focal_length_parm.evalAsFloatAtFrame(t)
for t in range(int(start), int(end))

View file

@ -2,22 +2,15 @@
"""Collector plugin for frames data on ROP instances."""
import hou # noqa
import pyblish.api
from openpype.lib import BoolDef
from openpype.hosts.houdini.api import lib
from openpype.pipeline import OpenPypePyblishPluginMixin
class CollectRopFrameRange(pyblish.api.InstancePlugin,
OpenPypePyblishPluginMixin):
class CollectRopFrameRange(pyblish.api.InstancePlugin):
"""Collect all frames which would be saved from the ROP nodes"""
hosts = ["houdini"]
# This specific order value is used so that
# this plugin runs after CollectAnatomyInstanceData
order = pyblish.api.CollectorOrder + 0.499
order = pyblish.api.CollectorOrder
label = "Collect RopNode Frame Range"
use_asset_handles = True
def process(self, instance):
@ -30,78 +23,16 @@ class CollectRopFrameRange(pyblish.api.InstancePlugin,
return
ropnode = hou.node(node_path)
attr_values = self.get_attr_values_from_data(instance.data)
if attr_values.get("use_handles", self.use_asset_handles):
asset_data = instance.data["assetEntity"]["data"]
handle_start = asset_data.get("handleStart", 0)
handle_end = asset_data.get("handleEnd", 0)
else:
handle_start = 0
handle_end = 0
frame_data = lib.get_frame_data(
ropnode, handle_start, handle_end, self.log
ropnode, self.log
)
if not frame_data:
return
# Log debug message about the collected frame range
frame_start = frame_data["frameStart"]
frame_end = frame_data["frameEnd"]
if attr_values.get("use_handles", self.use_asset_handles):
self.log.debug(
"Full Frame range with Handles "
"[{frame_start_handle} - {frame_end_handle}]"
.format(
frame_start_handle=frame_data["frameStartHandle"],
frame_end_handle=frame_data["frameEndHandle"]
)
)
else:
self.log.debug(
"Use handles is deactivated for this instance, "
"start and end handles are set to 0."
)
# Log collected frame range to the user
message = "Frame range [{frame_start} - {frame_end}]".format(
frame_start=frame_start,
frame_end=frame_end
self.log.debug(
"Collected frame_data: {}".format(frame_data)
)
if handle_start or handle_end:
message += " with handles [{handle_start}]-[{handle_end}]".format(
handle_start=handle_start,
handle_end=handle_end
)
self.log.info(message)
if frame_data.get("byFrameStep", 1.0) != 1.0:
self.log.info("Frame steps {}".format(frame_data["byFrameStep"]))
instance.data.update(frame_data)
# Add frame range to label if the instance has a frame range.
label = instance.data.get("label", instance.data["name"])
instance.data["label"] = (
"{label} [{frame_start} - {frame_end}]"
.format(
label=label,
frame_start=frame_start,
frame_end=frame_end
)
)
@classmethod
def get_attribute_defs(cls):
return [
BoolDef("use_handles",
tooltip="Disable this if you want the publisher to"
" ignore start and end handles specified in the"
" asset data for this publish instance",
default=cls.use_asset_handles,
label="Use asset handles")
]

View file

@ -25,8 +25,8 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
label = "VRay ROP Render Products"
# This specific order value is used so that
# this plugin runs after CollectRopFrameRange
order = pyblish.api.CollectorOrder + 0.4999
# this plugin runs after CollectFrames
order = pyblish.api.CollectorOrder + 0.11
hosts = ["houdini"]
families = ["vray_rop"]

View file

@ -56,7 +56,7 @@ class ExtractAss(publish.Extractor):
'ext': ext,
"files": files,
"stagingDir": staging_dir,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"frameStart": instance.data["frameStartHandle"],
"frameEnd": instance.data["frameEndHandle"],
}
instance.data["representations"].append(representation)

View file

@ -47,7 +47,7 @@ class ExtractBGEO(publish.Extractor):
"ext": ext.lstrip("."),
"files": output,
"stagingDir": staging_dir,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"]
"frameStart": instance.data["frameStartHandle"],
"frameEnd": instance.data["frameEndHandle"]
}
instance.data["representations"].append(representation)

View file

@ -41,8 +41,8 @@ class ExtractComposite(publish.Extractor):
"ext": ext,
"files": output,
"stagingDir": staging_dir,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"frameStart": instance.data["frameStartHandle"],
"frameEnd": instance.data["frameEndHandle"],
}
from pprint import pformat

View file

@ -40,9 +40,9 @@ class ExtractFBX(publish.Extractor):
}
# A single frame may also be rendered without start/end frame.
if "frameStart" in instance.data and "frameEnd" in instance.data:
representation["frameStart"] = instance.data["frameStart"]
representation["frameEnd"] = instance.data["frameEnd"]
if "frameStartHandle" in instance.data and "frameEndHandle" in instance.data: # noqa
representation["frameStart"] = instance.data["frameStartHandle"]
representation["frameEnd"] = instance.data["frameEndHandle"]
# set value type for 'representations' key to list
if "representations" not in instance.data:

View file

@ -39,8 +39,8 @@ class ExtractOpenGL(publish.Extractor):
"ext": instance.data["imageFormat"],
"files": output,
"stagingDir": staging_dir,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"frameStart": instance.data["frameStartHandle"],
"frameEnd": instance.data["frameEndHandle"],
"tags": tags,
"preview": True,
"camera_name": instance.data.get("review_camera")

View file

@ -44,8 +44,8 @@ class ExtractRedshiftProxy(publish.Extractor):
}
# A single frame may also be rendered without start/end frame.
if "frameStart" in instance.data and "frameEnd" in instance.data:
representation["frameStart"] = instance.data["frameStart"]
representation["frameEnd"] = instance.data["frameEnd"]
if "frameStartHandle" in instance.data and "frameEndHandle" in instance.data: # noqa
representation["frameStart"] = instance.data["frameStartHandle"]
representation["frameEnd"] = instance.data["frameEndHandle"]
instance.data["representations"].append(representation)

View file

@ -40,7 +40,7 @@ class ExtractVDBCache(publish.Extractor):
"ext": "vdb",
"files": output,
"stagingDir": staging_dir,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"frameStart": instance.data["frameStartHandle"],
"frameEnd": instance.data["frameEndHandle"],
}
instance.data["representations"].append(representation)

View file

@ -99,7 +99,7 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
.format(instance))
return
created_instance.publish_attributes["CollectRopFrameRange"]["use_handles"] = False # noqa
created_instance.publish_attributes["CollectAssetHandles"]["use_handles"] = False # noqa
create_context.save_changes()
cls.log.debug("use asset handles is turned off for '{}'"

View file

@ -4,7 +4,7 @@
<subMenu id="openpype_menu">
<labelExpression><![CDATA[
import os
return os.environ.get("AVALON_LABEL") or "OpenPype"
return os.environ.get("AVALON_LABEL") or "AYON"
]]></labelExpression>
<actionItem id="asset_name">
<labelExpression><![CDATA[
@ -16,7 +16,7 @@ return label
<separatorItem/>
<scriptItem id="openpype_create">
<scriptItem id="ayon_create">
<label>Create...</label>
<scriptCode><![CDATA[
import hou
@ -26,7 +26,7 @@ host_tools.show_publisher(parent, tab="create")
]]></scriptCode>
</scriptItem>
<scriptItem id="openpype_load">
<scriptItem id="ayon_load">
<label>Load...</label>
<scriptCode><![CDATA[
import hou
@ -46,7 +46,7 @@ host_tools.show_publisher(parent, tab="publish")
]]></scriptCode>
</scriptItem>
<scriptItem id="openpype_manage">
<scriptItem id="ayon_manage">
<label>Manage...</label>
<scriptCode><![CDATA[
import hou

View file

@ -0,0 +1,131 @@
# -*- coding: utf-8 -*-
"""Validator for Attributes."""
from pyblish.api import ContextPlugin, ValidatorOrder
from pymxs import runtime as rt
from openpype.pipeline.publish import (
OptionalPyblishPluginMixin,
PublishValidationError,
RepairContextAction
)
def has_property(object_name, property_name):
"""Return whether an object has a property with given name"""
return rt.Execute(f'isProperty {object_name} "{property_name}"')
def is_matching_value(object_name, property_name, value):
"""Return whether an existing property matches value `value"""
property_value = rt.Execute(f"{object_name}.{property_name}")
# Wrap property value if value is a string valued attributes
# starting with a `#`
if (
isinstance(value, str) and
value.startswith("#") and
not value.endswith(")")
):
# prefix value with `#`
# not applicable for #() array value type
# and only applicable for enum i.e. #bob, #sally
property_value = f"#{property_value}"
return property_value == value
class ValidateAttributes(OptionalPyblishPluginMixin,
ContextPlugin):
"""Validates attributes in the project setting are consistent
with the nodes from MaxWrapper Class in 3ds max.
E.g. "renderers.current.separateAovFiles",
"renderers.production.PrimaryGIEngine"
Admin(s) need to put the dict below and enable this validator for a check:
{
"renderers.current":{
"separateAovFiles" : True
},
"renderers.production":{
"PrimaryGIEngine": "#RS_GIENGINE_BRUTE_FORCE"
}
....
}
"""
order = ValidatorOrder
hosts = ["max"]
label = "Attributes"
actions = [RepairContextAction]
optional = True
@classmethod
def get_invalid(cls, context):
attributes = (
context.data["project_settings"]["max"]["publish"]
["ValidateAttributes"]["attributes"]
)
if not attributes:
return
invalid = []
for object_name, required_properties in attributes.items():
if not rt.Execute(f"isValidValue {object_name}"):
# Skip checking if the node does not
# exist in MaxWrapper Class
cls.log.debug(f"Unable to find '{object_name}'."
" Skipping validation of attributes.")
continue
for property_name, value in required_properties.items():
if not has_property(object_name, property_name):
cls.log.error(
"Non-existing property: "
f"{object_name}.{property_name}")
invalid.append((object_name, property_name))
if not is_matching_value(object_name, property_name, value):
cls.log.error(
f"Invalid value for: {object_name}.{property_name}"
f" should be: {value}")
invalid.append((object_name, property_name))
return invalid
def process(self, context):
if not self.is_active(context.data):
self.log.debug("Skipping Validate Attributes...")
return
invalid_attributes = self.get_invalid(context)
if invalid_attributes:
bullet_point_invalid_statement = "\n".join(
"- {}".format(invalid) for invalid
in invalid_attributes
)
report = (
"Required Attribute(s) have invalid value(s).\n\n"
f"{bullet_point_invalid_statement}\n\n"
"You can use repair action to fix them if they are not\n"
"unknown property value(s)."
)
raise PublishValidationError(
report, title="Invalid Value(s) for Required Attribute(s)")
@classmethod
def repair(cls, context):
attributes = (
context.data["project_settings"]["max"]["publish"]
["ValidateAttributes"]["attributes"]
)
invalid_attributes = cls.get_invalid(context)
for attrs in invalid_attributes:
prop, attr = attrs
value = attributes[prop][attr]
if isinstance(value, str) and not value.startswith("#"):
attribute_fix = '{}.{}="{}"'.format(
prop, attr, value
)
else:
attribute_fix = "{}.{}={}".format(
prop, attr, value
)
rt.Execute(attribute_fix)

View file

@ -129,9 +129,6 @@ class NukeHost(
register_event_callback("workio.open_file", check_inventory_versions)
register_event_callback("taskChanged", change_context_label)
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled)
_install_menu()
# add script menu
@ -402,25 +399,6 @@ def add_shortcuts_from_presets():
log.error(e)
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle node passthrough states on instance toggles."""
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
instance, old_value, new_value))
# Whether instances should be passthrough based on new value
with viewer_update_and_undo_stop():
n = instance[0]
try:
n["publish"].value()
except ValueError:
n = add_publish_knob(n)
log.info(" `Publish` knob was added to write node..")
n["publish"].setValue(new_value)
def containerise(node,
name,
namespace,
@ -478,8 +456,6 @@ def parse_container(node):
"""
data = read_avalon_data(node)
# (TODO) Remove key validation when `ls` has re-implemented.
#
# If not all required data return the empty container
required = ["schema", "id", "name",
"namespace", "loader", "representation"]
@ -487,7 +463,10 @@ def parse_container(node):
return
# Store the node's name
data["objectName"] = node["name"].value()
data.update({
"objectName": node.fullName(),
"node": node,
})
return data

View file

@ -537,6 +537,7 @@ class NukeLoader(LoaderPlugin):
node.addKnob(knob)
def clear_members(self, parent_node):
parent_class = parent_node.Class()
members = self.get_members(parent_node)
dependent_nodes = None
@ -549,6 +550,8 @@ class NukeLoader(LoaderPlugin):
break
for member in members:
if member.Class() == parent_class:
continue
self.log.info("removing node: `{}".format(member.name()))
nuke.delete(member)

View file

@ -64,8 +64,7 @@ class LoadBackdropNodes(load.LoaderPlugin):
data_imprint = {
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name
"colorspaceInput": colorspace
}
for k in add_keys:
@ -194,7 +193,7 @@ class LoadBackdropNodes(load.LoaderPlugin):
version_doc = get_version_by_id(project_name, representation["parent"])
# get corresponding node
GN = nuke.toNode(container['objectName'])
GN = container["node"]
file = get_representation_path(representation).replace("\\", "/")
@ -207,10 +206,11 @@ class LoadBackdropNodes(load.LoaderPlugin):
add_keys = ["source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
data_imprint = {
"representation": str(representation["_id"]),
"version": vname,
"colorspaceInput": colorspace,
}
for k in add_keys:
data_imprint.update({k: version_data[k]})
@ -252,6 +252,6 @@ class LoadBackdropNodes(load.LoaderPlugin):
self.update(container, representation)
def remove(self, container):
node = nuke.toNode(container['objectName'])
node = container["node"]
with viewer_update_and_undo_stop():
nuke.delete(node)

View file

@ -48,10 +48,11 @@ class AlembicCameraLoader(load.LoaderPlugin):
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"objectName": object_name}
data_imprint = {
"frameStart": first,
"frameEnd": last,
"version": vname,
}
for k in add_keys:
data_imprint.update({k: version_data[k]})
@ -111,7 +112,7 @@ class AlembicCameraLoader(load.LoaderPlugin):
project_name = get_current_project_name()
version_doc = get_version_by_id(project_name, representation["parent"])
object_name = container['objectName']
object_name = container["node"]
# get main variables
version_data = version_doc.get("data", {})
@ -124,11 +125,12 @@ class AlembicCameraLoader(load.LoaderPlugin):
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"objectName": object_name}
data_imprint = {
"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname
}
for k in add_keys:
data_imprint.update({k: version_data[k]})
@ -194,6 +196,6 @@ class AlembicCameraLoader(load.LoaderPlugin):
self.update(container, representation)
def remove(self, container):
node = nuke.toNode(container['objectName'])
node = container["node"]
with viewer_update_and_undo_stop():
nuke.delete(node)

View file

@ -189,8 +189,6 @@ class LoadClip(plugin.NukeLoader):
value_ = value_.replace("\\", "/")
data_imprint[key] = value_
data_imprint["objectName"] = read_name
if add_retime and version_data.get("retime", None):
data_imprint["addRetime"] = True
@ -254,7 +252,7 @@ class LoadClip(plugin.NukeLoader):
is_sequence = len(representation["files"]) > 1
read_node = nuke.toNode(container['objectName'])
read_node = container["node"]
if is_sequence:
representation = self._representation_with_hash_in_frame(
@ -299,9 +297,6 @@ class LoadClip(plugin.NukeLoader):
"Representation id `{}` is failing to load".format(repre_id))
return
read_name = self._get_node_name(representation)
read_node["name"].setValue(read_name)
read_node["file"].setValue(filepath)
# to avoid multiple undo steps for rest of process
@ -356,7 +351,7 @@ class LoadClip(plugin.NukeLoader):
self.set_as_member(read_node)
def remove(self, container):
read_node = nuke.toNode(container['objectName'])
read_node = container["node"]
assert read_node.Class() == "Read", "Must be Read"
with viewer_update_and_undo_stop():

View file

@ -62,11 +62,12 @@ class LoadEffects(load.LoaderPlugin):
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
data_imprint = {
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
}
for k in add_keys:
data_imprint.update({k: version_data[k]})
@ -159,7 +160,7 @@ class LoadEffects(load.LoaderPlugin):
version_doc = get_version_by_id(project_name, representation["parent"])
# get corresponding node
GN = nuke.toNode(container['objectName'])
GN = container["node"]
file = get_representation_path(representation).replace("\\", "/")
name = container['name']
@ -175,12 +176,13 @@ class LoadEffects(load.LoaderPlugin):
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
data_imprint = {
"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace
}
for k in add_keys:
data_imprint.update({k: version_data[k]})
@ -212,7 +214,7 @@ class LoadEffects(load.LoaderPlugin):
pre_node = nuke.createNode("Input")
pre_node["name"].setValue("rgb")
for ef_name, ef_val in nodes_order.items():
for _, ef_val in nodes_order.items():
node = nuke.createNode(ef_val["class"])
for k, v in ef_val["node"].items():
if k in self.ignore_attr:
@ -346,6 +348,6 @@ class LoadEffects(load.LoaderPlugin):
self.update(container, representation)
def remove(self, container):
node = nuke.toNode(container['objectName'])
node = container["node"]
with viewer_update_and_undo_stop():
nuke.delete(node)

View file

@ -63,11 +63,12 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
data_imprint = {
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
}
for k in add_keys:
data_imprint.update({k: version_data[k]})
@ -98,7 +99,7 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
pre_node = nuke.createNode("Input")
pre_node["name"].setValue("rgb")
for ef_name, ef_val in nodes_order.items():
for _, ef_val in nodes_order.items():
node = nuke.createNode(ef_val["class"])
for k, v in ef_val["node"].items():
if k in self.ignore_attr:
@ -164,28 +165,26 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
version_doc = get_version_by_id(project_name, representation["parent"])
# get corresponding node
GN = nuke.toNode(container['objectName'])
GN = container["node"]
file = get_representation_path(representation).replace("\\", "/")
name = container['name']
version_data = version_doc.get("data", {})
vname = version_doc.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
workfile_first_frame = int(nuke.root()["first_frame"].getValue())
namespace = container['namespace']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
data_imprint = {
"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
}
for k in add_keys:
data_imprint.update({k: version_data[k]})
@ -217,7 +216,7 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
pre_node = nuke.createNode("Input")
pre_node["name"].setValue("rgb")
for ef_name, ef_val in nodes_order.items():
for _, ef_val in nodes_order.items():
node = nuke.createNode(ef_val["class"])
for k, v in ef_val["node"].items():
if k in self.ignore_attr:
@ -251,11 +250,6 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
output = nuke.createNode("Output")
output.setInput(0, pre_node)
# # try to place it under Viewer1
# if not self.connect_active_viewer(GN):
# nuke.delete(GN)
# return
# get all versions in list
last_version_doc = get_last_version_by_subset_id(
project_name, version_doc["parent"], fields=["_id"]
@ -365,6 +359,6 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
self.update(container, representation)
def remove(self, container):
node = nuke.toNode(container['objectName'])
node = container["node"]
with viewer_update_and_undo_stop():
nuke.delete(node)

View file

@ -64,11 +64,12 @@ class LoadGizmo(load.LoaderPlugin):
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
data_imprint = {
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace
}
for k in add_keys:
data_imprint.update({k: version_data[k]})
@ -111,7 +112,7 @@ class LoadGizmo(load.LoaderPlugin):
version_doc = get_version_by_id(project_name, representation["parent"])
# get corresponding node
group_node = nuke.toNode(container['objectName'])
group_node = container["node"]
file = get_representation_path(representation).replace("\\", "/")
name = container['name']
@ -126,12 +127,13 @@ class LoadGizmo(load.LoaderPlugin):
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
data_imprint = {
"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace
}
for k in add_keys:
data_imprint.update({k: version_data[k]})
@ -175,6 +177,6 @@ class LoadGizmo(load.LoaderPlugin):
self.update(container, representation)
def remove(self, container):
node = nuke.toNode(container['objectName'])
node = container["node"]
with viewer_update_and_undo_stop():
nuke.delete(node)

View file

@ -66,11 +66,12 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
data_imprint = {
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace
}
for k in add_keys:
data_imprint.update({k: version_data[k]})
@ -118,7 +119,7 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
version_doc = get_version_by_id(project_name, representation["parent"])
# get corresponding node
group_node = nuke.toNode(container['objectName'])
group_node = container["node"]
file = get_representation_path(representation).replace("\\", "/")
name = container['name']
@ -133,12 +134,13 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
data_imprint = {
"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace
}
for k in add_keys:
data_imprint.update({k: version_data[k]})
@ -256,6 +258,6 @@ class LoadGizmoInputProcess(load.LoaderPlugin):
self.update(container, representation)
def remove(self, container):
node = nuke.toNode(container['objectName'])
node = container["node"]
with viewer_update_and_undo_stop():
nuke.delete(node)

View file

@ -146,8 +146,6 @@ class LoadImage(load.LoaderPlugin):
data_imprint.update(
{k: context["version"]['data'].get(k, str(None))})
data_imprint.update({"objectName": read_name})
r["tile_color"].setValue(int("0x4ecd25ff", 16))
return containerise(r,
@ -168,7 +166,7 @@ class LoadImage(load.LoaderPlugin):
inputs:
"""
node = nuke.toNode(container["objectName"])
node = container["node"]
frame_number = node["first"].value()
assert node.Class() == "Read", "Must be Read"
@ -237,7 +235,7 @@ class LoadImage(load.LoaderPlugin):
self.log.info("updated to version: {}".format(version_doc.get("name")))
def remove(self, container):
node = nuke.toNode(container['objectName'])
node = container["node"]
assert node.Class() == "Read", "Must be Read"
with viewer_update_and_undo_stop():

View file

@ -46,10 +46,11 @@ class AlembicModelLoader(load.LoaderPlugin):
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"objectName": object_name}
data_imprint = {
"frameStart": first,
"frameEnd": last,
"version": vname
}
for k in add_keys:
data_imprint.update({k: version_data[k]})
@ -114,9 +115,9 @@ class AlembicModelLoader(load.LoaderPlugin):
# Get version from io
project_name = get_current_project_name()
version_doc = get_version_by_id(project_name, representation["parent"])
object_name = container['objectName']
# get corresponding node
model_node = nuke.toNode(object_name)
model_node = container["node"]
# get main variables
version_data = version_doc.get("data", {})
@ -129,11 +130,12 @@ class AlembicModelLoader(load.LoaderPlugin):
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"objectName": object_name}
data_imprint = {
"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname
}
for k in add_keys:
data_imprint.update({k: version_data[k]})
@ -142,7 +144,6 @@ class AlembicModelLoader(load.LoaderPlugin):
file = get_representation_path(representation).replace("\\", "/")
with maintained_selection():
model_node = nuke.toNode(object_name)
model_node['selected'].setValue(True)
# collect input output dependencies
@ -163,8 +164,10 @@ class AlembicModelLoader(load.LoaderPlugin):
ypos = model_node.ypos()
nuke.nodeCopy("%clipboard%")
nuke.delete(model_node)
# paste the node back and set the position
nuke.nodePaste("%clipboard%")
model_node = nuke.toNode(object_name)
model_node = nuke.selectedNode()
model_node.setXYpos(xpos, ypos)
# link to original input nodes

View file

@ -55,7 +55,7 @@ class LoadOcioLookNodes(load.LoaderPlugin):
"""
namespace = namespace or context['asset']['name']
suffix = secrets.token_hex(nbytes=4)
object_name = "{}_{}_{}".format(
node_name = "{}_{}_{}".format(
name, namespace, suffix)
# getting file path
@ -64,7 +64,9 @@ class LoadOcioLookNodes(load.LoaderPlugin):
json_f = self._load_json_data(filepath)
group_node = self._create_group_node(
object_name, filepath, json_f["data"])
filepath, json_f["data"])
# renaming group node
group_node["name"].setValue(node_name)
self._node_version_color(context["version"], group_node)
@ -76,17 +78,14 @@ class LoadOcioLookNodes(load.LoaderPlugin):
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
data={
"objectName": object_name,
}
loader=self.__class__.__name__
)
def _create_group_node(
self,
object_name,
filepath,
data
data,
group_node=None
):
"""Creates group node with all the nodes inside.
@ -94,9 +93,9 @@ class LoadOcioLookNodes(load.LoaderPlugin):
in between - in case those are needed.
Arguments:
object_name (str): name of the group node
filepath (str): path to json file
data (dict): data from json file
group_node (Optional[nuke.Node]): group node or None
Returns:
nuke.Node: group node with all the nodes inside
@ -117,7 +116,6 @@ class LoadOcioLookNodes(load.LoaderPlugin):
input_node = None
output_node = None
group_node = nuke.toNode(object_name)
if group_node:
# remove all nodes between Input and Output nodes
for node in group_node.nodes():
@ -130,7 +128,6 @@ class LoadOcioLookNodes(load.LoaderPlugin):
else:
group_node = nuke.createNode(
"Group",
"name {}_1".format(object_name),
inpanel=False
)
@ -227,16 +224,16 @@ class LoadOcioLookNodes(load.LoaderPlugin):
project_name = get_current_project_name()
version_doc = get_version_by_id(project_name, representation["parent"])
object_name = container['objectName']
group_node = container["node"]
filepath = get_representation_path(representation)
json_f = self._load_json_data(filepath)
group_node = self._create_group_node(
object_name,
filepath,
json_f["data"]
json_f["data"],
group_node
)
self._node_version_color(version_doc, group_node)

View file

@ -46,8 +46,6 @@ class LinkAsGroup(load.LoaderPlugin):
file = self.filepath_from_context(context).replace("\\", "/")
self.log.info("file: {}\n".format(file))
precomp_name = context["representation"]["context"]["subset"]
self.log.info("versionData: {}\n".format(context["version"]["data"]))
# add additional metadata from the version to imprint to Avalon knob
@ -62,7 +60,6 @@ class LinkAsGroup(load.LoaderPlugin):
}
for k in add_keys:
data_imprint.update({k: context["version"]['data'][k]})
data_imprint.update({"objectName": precomp_name})
# group context is set to precomp, so back up one level.
nuke.endGroup()
@ -118,7 +115,7 @@ class LinkAsGroup(load.LoaderPlugin):
inputs:
"""
node = nuke.toNode(container['objectName'])
node = container["node"]
root = get_representation_path(representation).replace("\\", "/")
@ -159,6 +156,6 @@ class LinkAsGroup(load.LoaderPlugin):
self.log.info("updated to version: {}".format(version_doc.get("name")))
def remove(self, container):
node = nuke.toNode(container['objectName'])
node = container["node"]
with viewer_update_and_undo_stop():
nuke.delete(node)

View file

@ -48,11 +48,6 @@ class PhotoshopHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
pyblish.api.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
log.info(PUBLISH_PATH)
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled
)
register_event_callback("application.launched", on_application_launch)
@ -177,11 +172,6 @@ def on_application_launch():
check_inventory()
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle layer visibility on instance toggles."""
instance[0].Visible = new_value
def ls():
"""Yields containers from active Photoshop document

View file

@ -84,10 +84,6 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
register_loader_plugin_path(load_dir)
register_creator_plugin_path(create_dir)
registered_callbacks = (
pyblish.api.registered_callbacks().get("instanceToggled") or []
)
register_event_callback("application.launched", self.initial_launch)
register_event_callback("application.exit", self.application_exit)

View file

@ -1,3 +1,6 @@
import collections
from openpype.client import get_project
from openpype_modules.ftrack.lib import BaseEvent
@ -73,8 +76,21 @@ class FirstVersionStatus(BaseEvent):
if not self.task_status_map:
return
entities_info = self.filter_event_ents(event)
if not entities_info:
filtered_entities_info = self.filter_entities_info(event)
if not filtered_entities_info:
return
for project_id, entities_info in filtered_entities_info.items():
self.process_by_project(session, event, project_id, entities_info)
def process_by_project(self, session, event, project_id, entities_info):
project_name = self.get_project_name_from_event(
session, event, project_id
)
if get_project(project_name) is None:
self.log.debug(
f"Project '{project_name}' not found in OpenPype. Skipping"
)
return
entity_ids = []
@ -154,18 +170,18 @@ class FirstVersionStatus(BaseEvent):
exc_info=True
)
def filter_event_ents(self, event):
filtered_ents = []
for entity in event["data"].get("entities", []):
def filter_entities_info(self, event):
filtered_entities_info = collections.defaultdict(list)
for entity_info in event["data"].get("entities", []):
# Care only about add actions
if entity.get("action") != "add":
if entity_info.get("action") != "add":
continue
# Filter AssetVersions
if entity["entityType"] != "assetversion":
if entity_info["entityType"] != "assetversion":
continue
entity_changes = entity.get("changes") or {}
entity_changes = entity_info.get("changes") or {}
# Check if version of Asset Version is `1`
version_num = entity_changes.get("version", {}).get("new")
@ -177,9 +193,18 @@ class FirstVersionStatus(BaseEvent):
if not task_id:
continue
filtered_ents.append(entity)
project_id = None
for parent_item in reversed(entity_info["parents"]):
if parent_item["entityType"] == "show":
project_id = parent_item["entityId"]
break
return filtered_ents
if project_id is None:
continue
filtered_entities_info[project_id].append(entity_info)
return filtered_entities_info
def register(session):

View file

@ -1,4 +1,6 @@
import collections
from openpype.client import get_project
from openpype_modules.ftrack.lib import BaseEvent
@ -99,6 +101,10 @@ class NextTaskUpdate(BaseEvent):
project_name = self.get_project_name_from_event(
session, event, project_id
)
if get_project(project_name) is None:
self.log.debug("Project not found in OpenPype. Skipping")
return
# Load settings
project_settings = self.get_project_settings_from_event(
event, project_name

View file

@ -3,6 +3,8 @@ import copy
from typing import Any
import ftrack_api
from openpype.client import get_project
from openpype_modules.ftrack.lib import (
BaseEvent,
query_custom_attributes,
@ -139,6 +141,10 @@ class PushHierValuesToNonHierEvent(BaseEvent):
project_name: str = self.get_project_name_from_event(
session, event, project_id
)
if get_project(project_name) is None:
self.log.debug("Project not found in OpenPype. Skipping")
return set(), set()
# Load settings
project_settings: dict[str, Any] = (
self.get_project_settings_from_event(event, project_name)

View file

@ -1,4 +1,6 @@
import collections
from openpype.client import get_project
from openpype_modules.ftrack.lib import BaseEvent
@ -60,6 +62,10 @@ class TaskStatusToParent(BaseEvent):
project_name = self.get_project_name_from_event(
session, event, project_id
)
if get_project(project_name) is None:
self.log.debug("Project not found in OpenPype. Skipping")
return
# Load settings
project_settings = self.get_project_settings_from_event(
event, project_name

View file

@ -1,4 +1,6 @@
import collections
from openpype.client import get_project
from openpype_modules.ftrack.lib import BaseEvent
@ -102,6 +104,10 @@ class TaskToVersionStatus(BaseEvent):
project_name = self.get_project_name_from_event(
session, event, project_id
)
if get_project(project_name) is None:
self.log.debug("Project not found in OpenPype. Skipping")
return
# Load settings
project_settings = self.get_project_settings_from_event(
event, project_name

View file

@ -1,4 +1,6 @@
import collections
from openpype.client import get_project
from openpype_modules.ftrack.lib import BaseEvent
@ -22,6 +24,10 @@ class ThumbnailEvents(BaseEvent):
project_name = self.get_project_name_from_event(
session, event, project_id
)
if get_project(project_name) is None:
self.log.debug("Project not found in OpenPype. Skipping")
return
# Load settings
project_settings = self.get_project_settings_from_event(
event, project_name

View file

@ -1,3 +1,4 @@
from openpype.client import get_project
from openpype_modules.ftrack.lib import BaseEvent
@ -50,6 +51,10 @@ class VersionToTaskStatus(BaseEvent):
project_name = self.get_project_name_from_event(
session, event, project_id
)
if get_project(project_name) is None:
self.log.debug("Project not found in OpenPype. Skipping")
return
# Load settings
project_settings = self.get_project_settings_from_event(
event, project_name

View file

@ -214,7 +214,7 @@ class PypeCommands:
def run_tests(self, folder, mark, pyargs,
test_data_folder, persist, app_variant, timeout, setup_only,
mongo_url):
mongo_url, app_group):
"""
Runs tests from 'folder'
@ -260,6 +260,9 @@ class PypeCommands:
if persist:
args.extend(["--persist", persist])
if app_group:
args.extend(["--app_group", app_group])
if app_variant:
args.extend(["--app_variant", app_variant])

View file

@ -639,6 +639,15 @@ def _convert_3dsmax_project_settings(ayon_settings, output):
for item in point_cloud_attribute
}
ayon_max["PointCloud"]["attribute"] = new_point_cloud_attribute
# --- Publish (START) ---
ayon_publish = ayon_max["publish"]
try:
attributes = json.loads(
ayon_publish["ValidateAttributes"]["attributes"]
)
except ValueError:
attributes = {}
ayon_publish["ValidateAttributes"]["attributes"] = attributes
output["max"] = ayon_max

View file

@ -137,7 +137,7 @@
}
},
"publish": {
"CollectRopFrameRange": {
"CollectAssetHandles": {
"use_asset_handles": true
},
"ValidateContainers": {

View file

@ -36,6 +36,10 @@
"enabled": true,
"optional": true,
"active": true
},
"ValidateAttributes": {
"enabled": false,
"attributes": {}
}
}
}

View file

@ -11,8 +11,8 @@
{
"type": "dict",
"collapsible": true,
"key": "CollectRopFrameRange",
"label": "Collect Rop Frame Range",
"key": "CollectAssetHandles",
"label": "Collect Asset Handles",
"children": [
{
"type": "label",

View file

@ -28,6 +28,25 @@
"label": "Active"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "ValidateAttributes",
"label": "ValidateAttributes",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "raw-json",
"key": "attributes",
"label": "Attributes"
}
]
}
]
}

View file

@ -3,7 +3,7 @@ from qtpy import QtWidgets, QtCore
from openpype.tools.flickcharm import FlickCharm
from openpype.tools.utils import PlaceholderLineEdit, RefreshButton
from openpype.tools.ayon_utils.widgets import (
ProjectsModel,
ProjectsQtModel,
ProjectSortFilterProxy,
)
from openpype.tools.ayon_utils.models import PROJECTS_MODEL_SENDER
@ -95,7 +95,7 @@ class ProjectsWidget(QtWidgets.QWidget):
projects_view.setSelectionMode(QtWidgets.QListView.NoSelection)
flick = FlickCharm(parent=self)
flick.activateOn(projects_view)
projects_model = ProjectsModel(controller)
projects_model = ProjectsQtModel(controller)
projects_proxy_model = ProjectSortFilterProxy()
projects_proxy_model.setSourceModel(projects_model)
@ -133,9 +133,14 @@ class ProjectsWidget(QtWidgets.QWidget):
return self._projects_model.has_content()
def _on_view_clicked(self, index):
if index.isValid():
project_name = index.data(QtCore.Qt.DisplayRole)
self._controller.set_selected_project(project_name)
if not index.isValid():
return
model = index.model()
flags = model.flags(index)
if not flags & QtCore.Qt.ItemIsEnabled:
return
project_name = index.data(QtCore.Qt.DisplayRole)
self._controller.set_selected_project(project_name)
def _on_project_filter_change(self, text):
self._projects_proxy_model.setFilterFixedString(text)

View file

@ -8,7 +8,7 @@ from openpype.tools.utils import (
from openpype.style import get_objected_colors
from openpype.tools.ayon_utils.widgets import (
FoldersModel,
FoldersQtModel,
FOLDERS_MODEL_SENDER_NAME,
)
from openpype.tools.ayon_utils.widgets.folders_widget import FOLDER_ID_ROLE
@ -182,7 +182,7 @@ class UnderlinesFolderDelegate(QtWidgets.QItemDelegate):
painter.restore()
class LoaderFoldersModel(FoldersModel):
class LoaderFoldersModel(FoldersQtModel):
def __init__(self, *args, **kwargs):
super(LoaderFoldersModel, self).__init__(*args, **kwargs)

View file

@ -87,7 +87,7 @@ def _get_project_items_from_entitiy(projects):
class ProjectsModel(object):
def __init__(self, controller):
self._projects_cache = CacheItem(default_factory=dict)
self._projects_cache = CacheItem(default_factory=list)
self._project_items_by_name = {}
self._projects_by_name = {}
@ -103,8 +103,18 @@ class ProjectsModel(object):
self._refresh_projects_cache()
def get_project_items(self, sender):
"""
Args:
sender (str): Name of sender who asked for items.
Returns:
Union[list[ProjectItem], None]: List of project items, or None
if model is refreshing.
"""
if not self._projects_cache.is_valid:
self._refresh_projects_cache(sender)
return self._refresh_projects_cache(sender)
return self._projects_cache.get_data()
def get_project_entity(self, project_name):
@ -136,11 +146,12 @@ class ProjectsModel(object):
def _refresh_projects_cache(self, sender=None):
if self._is_refreshing:
return
return None
with self._project_refresh_event_manager(sender):
project_items = self._query_projects()
self._projects_cache.update_data(project_items)
return self._projects_cache.get_data()
def _query_projects(self):
projects = ayon_api.get_projects(fields=["name", "active", "library"])

View file

@ -1,19 +1,19 @@
from .projects_widget import (
# ProjectsWidget,
ProjectsCombobox,
ProjectsModel,
ProjectsQtModel,
ProjectSortFilterProxy,
)
from .folders_widget import (
FoldersWidget,
FoldersModel,
FoldersQtModel,
FOLDERS_MODEL_SENDER_NAME,
)
from .tasks_widget import (
TasksWidget,
TasksModel,
TasksQtModel,
TASKS_MODEL_SENDER_NAME,
)
from .utils import (
@ -25,15 +25,15 @@ from .utils import (
__all__ = (
# "ProjectsWidget",
"ProjectsCombobox",
"ProjectsModel",
"ProjectsQtModel",
"ProjectSortFilterProxy",
"FoldersWidget",
"FoldersModel",
"FoldersQtModel",
"FOLDERS_MODEL_SENDER_NAME",
"TasksWidget",
"TasksModel",
"TasksQtModel",
"TASKS_MODEL_SENDER_NAME",
"get_qt_icon",

View file

@ -16,7 +16,7 @@ FOLDER_PATH_ROLE = QtCore.Qt.UserRole + 3
FOLDER_TYPE_ROLE = QtCore.Qt.UserRole + 4
class FoldersModel(QtGui.QStandardItemModel):
class FoldersQtModel(QtGui.QStandardItemModel):
"""Folders model which cares about refresh of folders.
Args:
@ -26,7 +26,7 @@ class FoldersModel(QtGui.QStandardItemModel):
refreshed = QtCore.Signal()
def __init__(self, controller):
super(FoldersModel, self).__init__()
super(FoldersQtModel, self).__init__()
self._controller = controller
self._items_by_id = {}
@ -104,8 +104,8 @@ class FoldersModel(QtGui.QStandardItemModel):
if not project_name:
self._last_project_name = project_name
self._current_refresh_thread = None
self._fill_items({})
self._current_refresh_thread = None
return
self._is_refreshing = True
@ -152,6 +152,7 @@ class FoldersModel(QtGui.QStandardItemModel):
return
self._fill_items(thread.get_result())
self._current_refresh_thread = None
def _fill_item_data(self, item, folder_item):
"""
@ -281,7 +282,7 @@ class FoldersWidget(QtWidgets.QWidget):
folders_view = TreeView(self)
folders_view.setHeaderHidden(True)
folders_model = FoldersModel(controller)
folders_model = FoldersQtModel(controller)
folders_proxy_model = RecursiveSortFilterProxyModel()
folders_proxy_model.setSourceModel(folders_model)
folders_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)

View file

@ -10,11 +10,11 @@ PROJECT_IS_CURRENT_ROLE = QtCore.Qt.UserRole + 4
LIBRARY_PROJECT_SEPARATOR_ROLE = QtCore.Qt.UserRole + 5
class ProjectsModel(QtGui.QStandardItemModel):
class ProjectsQtModel(QtGui.QStandardItemModel):
refreshed = QtCore.Signal()
def __init__(self, controller):
super(ProjectsModel, self).__init__()
super(ProjectsQtModel, self).__init__()
self._controller = controller
self._project_items = {}
@ -35,12 +35,11 @@ class ProjectsModel(QtGui.QStandardItemModel):
self._selected_project = None
self._is_refreshing = False
self._refresh_thread = None
@property
def is_refreshing(self):
return self._is_refreshing
return self._refresh_thread is not None
def refresh(self):
self._refresh()
@ -169,28 +168,33 @@ class ProjectsModel(QtGui.QStandardItemModel):
return self._select_item
def _refresh(self):
if self._is_refreshing:
if self._refresh_thread is not None:
return
self._is_refreshing = True
refresh_thread = RefreshThread(
"projects", self._query_project_items
)
refresh_thread.refresh_finished.connect(self._refresh_finished)
refresh_thread.start()
self._refresh_thread = refresh_thread
refresh_thread.start()
def _query_project_items(self):
return self._controller.get_project_items()
return self._controller.get_project_items(
sender=PROJECTS_MODEL_SENDER
)
def _refresh_finished(self):
# TODO check if failed
result = self._refresh_thread.get_result()
if result is not None:
self._fill_items(result)
self._refresh_thread = None
self._fill_items(result)
self._is_refreshing = False
self.refreshed.emit()
if result is None:
self._refresh()
else:
self.refreshed.emit()
def _fill_items(self, project_items):
new_project_names = {
@ -403,7 +407,7 @@ class ProjectsCombobox(QtWidgets.QWidget):
projects_combobox = QtWidgets.QComboBox(self)
combobox_delegate = QtWidgets.QStyledItemDelegate(projects_combobox)
projects_combobox.setItemDelegate(combobox_delegate)
projects_model = ProjectsModel(controller)
projects_model = ProjectsQtModel(controller)
projects_proxy_model = ProjectSortFilterProxy()
projects_proxy_model.setSourceModel(projects_model)
projects_combobox.setModel(projects_proxy_model)

View file

@ -12,7 +12,7 @@ ITEM_NAME_ROLE = QtCore.Qt.UserRole + 3
TASK_TYPE_ROLE = QtCore.Qt.UserRole + 4
class TasksModel(QtGui.QStandardItemModel):
class TasksQtModel(QtGui.QStandardItemModel):
"""Tasks model which cares about refresh of tasks by folder id.
Args:
@ -22,7 +22,7 @@ class TasksModel(QtGui.QStandardItemModel):
refreshed = QtCore.Signal()
def __init__(self, controller):
super(TasksModel, self).__init__()
super(TasksQtModel, self).__init__()
self._controller = controller
@ -185,28 +185,7 @@ class TasksModel(QtGui.QStandardItemModel):
thread.refresh_finished.connect(self._on_refresh_thread)
thread.start()
def _on_refresh_thread(self, thread_id):
"""Callback when refresh thread is finished.
Technically can be running multiple refresh threads at the same time,
to avoid using values from wrong thread, we check if thread id is
current refresh thread id.
Tasks are stored by name, so if a folder has same task name as
previously selected folder it keeps the selection.
Args:
thread_id (str): Thread id.
"""
# Make sure to remove thread from '_refresh_threads' dict
thread = self._refresh_threads.pop(thread_id)
if (
self._current_refresh_thread is None
or thread_id != self._current_refresh_thread.id
):
return
def _fill_data_from_thread(self, thread):
task_items = thread.get_result()
# Task items are refreshed
if task_items is None:
@ -247,7 +226,33 @@ class TasksModel(QtGui.QStandardItemModel):
if new_items:
root_item.appendRows(new_items)
def _on_refresh_thread(self, thread_id):
"""Callback when refresh thread is finished.
Technically can be running multiple refresh threads at the same time,
to avoid using values from wrong thread, we check if thread id is
current refresh thread id.
Tasks are stored by name, so if a folder has same task name as
previously selected folder it keeps the selection.
Args:
thread_id (str): Thread id.
"""
# Make sure to remove thread from '_refresh_threads' dict
thread = self._refresh_threads.pop(thread_id)
if (
self._current_refresh_thread is None
or thread_id != self._current_refresh_thread.id
):
return
self._fill_data_from_thread(thread)
root_item = self.invisibleRootItem()
self._has_content = root_item.rowCount() > 0
self._current_refresh_thread = None
self._is_refreshing = False
self.refreshed.emit()
@ -280,7 +285,7 @@ class TasksModel(QtGui.QStandardItemModel):
if section == 0:
return "Tasks"
return super(TasksModel, self).headerData(
return super(TasksQtModel, self).headerData(
section, orientation, role
)
@ -305,7 +310,7 @@ class TasksWidget(QtWidgets.QWidget):
tasks_view = DeselectableTreeView(self)
tasks_view.setIndentation(0)
tasks_model = TasksModel(controller)
tasks_model = TasksQtModel(controller)
tasks_proxy_model = QtCore.QSortFilterProxyModel()
tasks_proxy_model.setSourceModel(tasks_model)
tasks_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)

View file

@ -54,6 +54,8 @@ class _IconsCache:
@classmethod
def get_icon(cls, icon_def):
if not icon_def:
return None
icon_type = icon_def["type"]
cache_key = cls._get_cache_key(icon_def)
cache = cls._cache.get(cache_key)

View file

@ -21,7 +21,7 @@ class ValidateBaseModel(BaseSettingsModel):
class CollectAnatomyInstanceDataModel(BaseSettingsModel):
_isGroup = True
follow_workfile_version: bool = Field(
True, title="Collect Anatomy Instance Data"
True, title="Follow workfile version"
)

View file

@ -489,7 +489,7 @@ DEFAULT_TOOLS_VALUES = {
"template_name": "publish_online"
},
{
"families": [
"product_types": [
"tycache"
],
"hosts": [

View file

@ -3,7 +3,7 @@ from ayon_server.settings import BaseSettingsModel
# Publish Plugins
class CollectRopFrameRangeModel(BaseSettingsModel):
class CollectAssetHandlesModel(BaseSettingsModel):
"""Collect Frame Range
Disable this if you want the publisher to
ignore start and end handles specified in the

View file

@ -1 +1 @@
__version__ = "0.2.7"
__version__ = "0.2.8"

View file

@ -1,6 +1,30 @@
from pydantic import Field
import json
from pydantic import Field, validator
from ayon_server.settings import BaseSettingsModel
from ayon_server.exceptions import BadRequestException
class ValidateAttributesModel(BaseSettingsModel):
enabled: bool = Field(title="ValidateAttributes")
attributes: str = Field(
"{}", title="Attributes", widget="textarea")
@validator("attributes")
def validate_json(cls, value):
if not value.strip():
return "{}"
try:
converted_value = json.loads(value)
success = isinstance(converted_value, dict)
except json.JSONDecodeError:
success = False
if not success:
raise BadRequestException(
"The attibutes can't be parsed as json object"
)
return value
class BasicValidateModel(BaseSettingsModel):
@ -15,6 +39,10 @@ class PublishersModel(BaseSettingsModel):
title="Validate Frame Range",
section="Validators"
)
ValidateAttributes: ValidateAttributesModel = Field(
default_factory=ValidateAttributesModel,
title="Validate Attributes"
)
DEFAULT_PUBLISH_SETTINGS = {
@ -22,5 +50,9 @@ DEFAULT_PUBLISH_SETTINGS = {
"enabled": True,
"optional": True,
"active": True
}
},
"ValidateAttributes": {
"enabled": False,
"attributes": "{}"
},
}

View file

@ -1 +1 @@
__version__ = "0.1.0"
__version__ = "0.1.1"

View file

@ -14,6 +14,11 @@ def pytest_addoption(parser):
help="True - keep test_db, test_openpype, outputted test files"
)
parser.addoption(
"--app_group", action="store", default=None,
help="Keep empty to use default application or explicit"
)
parser.addoption(
"--app_variant", action="store", default=None,
help="Keep empty to locate latest installed variant or explicit"
@ -45,6 +50,11 @@ def persist(request):
return request.config.getoption("--persist")
@pytest.fixture(scope="module")
def app_group(request):
return request.config.getoption("--app_group")
@pytest.fixture(scope="module")
def app_variant(request):
return request.config.getoption("--app_variant")

View file

@ -248,19 +248,22 @@ class PublishTest(ModuleUnitTest):
SETUP_ONLY = False
@pytest.fixture(scope="module")
def app_name(self, app_variant):
def app_name(self, app_variant, app_group):
"""Returns calculated value for ApplicationManager. Eg.(nuke/12-2)"""
from openpype.lib import ApplicationManager
app_variant = app_variant or self.APP_VARIANT
app_group = app_group or self.APP_GROUP
application_manager = ApplicationManager()
if not app_variant:
variant = (
application_manager.find_latest_available_variant_for_group(
self.APP_GROUP))
app_group
)
)
app_variant = variant.name
yield "{}/{}".format(self.APP_GROUP, app_variant)
yield "{}/{}".format(app_group, app_variant)
@pytest.fixture(scope="module")
def app_args(self, download_test_data):

View file

@ -118,4 +118,28 @@ Current OpenPype integration (ver 3.15.0) supports only ```PointCache```, ```Ca
This part of documentation is still work in progress.
:::
## Validators
Current Openpype integration supports different validators such as Frame Range and Attributes.
Some validators are mandatory while some are optional and user can choose to enable them in the setting.
**Validate Frame Range**: Optional Validator for checking Frame Range
**Validate Attributes**: Optional Validator for checking if object properties' attributes are valid
in MaxWrapper Class.
:::note
Users can write the properties' attributes they want to check in dict format in the setting
before validation. The attributes are then to be converted into Maxscript and do a check.
E.g. ```renderers.current.separateAovFiles``` and ```renderers.current.PrimaryGIEngine```
User can put the attributes in the dict format below
```
{
"renderer.current":{
"separateAovFiles" : True
"PrimaryGIEngine": "#RS_GIENGINE_BRUTE_FORCE"
}
}
```
![Validate Attribute Setting](assets/3dsmax_validate_attributes.png)
:::
## ...to be added

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB