Merge remote-tracking branch 'upstream/develop' into chore/maya_remove_publish_gui_filters

This commit is contained in:
Roy Nieterau 2023-10-07 20:25:54 +02:00
commit 3a0f2e383a
104 changed files with 3128 additions and 404 deletions

View file

@ -35,6 +35,8 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
- 3.17.2-nightly.3
- 3.17.2-nightly.2
- 3.17.2-nightly.1
- 3.17.1
- 3.17.1-nightly.3
@ -133,8 +135,6 @@ body:
- 3.14.10
- 3.14.10-nightly.9
- 3.14.10-nightly.8
- 3.14.10-nightly.7
- 3.14.10-nightly.6
validations:
required: true
- type: dropdown

View file

@ -290,11 +290,15 @@ def run(script):
"--setup_only",
help="Only create dbs, do not run tests",
default=None)
@click.option("--mongo_url",
help="MongoDB for testing.",
default=None)
def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant,
timeout, setup_only):
timeout, setup_only, mongo_url):
"""Run all automatic tests after proper initialization via start.py"""
PypeCommands().run_tests(folder, mark, pyargs, test_data_folder,
persist, app_variant, timeout, setup_only)
persist, app_variant, timeout, setup_only,
mongo_url)
@main.command(help="DEPRECATED - run sync server")

View file

@ -13,7 +13,7 @@ class OCIOEnvHook(PreLaunchHook):
"fusion",
"blender",
"aftereffects",
"max",
"3dsmax",
"houdini",
"maya",
"nuke",

View file

@ -38,6 +38,8 @@ from .lib import (
from .capture import capture
from .render_lib import prepare_rendering
__all__ = [
"install",
@ -66,4 +68,5 @@ __all__ = [
"get_selection",
"capture",
# "unique_name",
"prepare_rendering",
]

View file

@ -0,0 +1,51 @@
import attr
import bpy
@attr.s
class LayerMetadata(object):
"""Data class for Render Layer metadata."""
frameStart = attr.ib()
frameEnd = attr.ib()
@attr.s
class RenderProduct(object):
"""
Getting Colorspace as Specific Render Product Parameter for submitting
publish job.
"""
colorspace = attr.ib() # colorspace
view = attr.ib() # OCIO view transform
productName = attr.ib(default=None)
class ARenderProduct(object):
def __init__(self):
"""Constructor."""
# Initialize
self.layer_data = self._get_layer_data()
self.layer_data.products = self.get_render_products()
def _get_layer_data(self):
scene = bpy.context.scene
return LayerMetadata(
frameStart=int(scene.frame_start),
frameEnd=int(scene.frame_end),
)
def get_render_products(self):
"""To be implemented by renderer class.
This should return a list of RenderProducts.
Returns:
list: List of RenderProduct
"""
return [
RenderProduct(
colorspace="sRGB",
view="ACES 1.0",
productName=""
)
]

View file

@ -16,6 +16,7 @@ import bpy
import bpy.utils.previews
from openpype import style
from openpype import AYON_SERVER_ENABLED
from openpype.pipeline import get_current_asset_name, get_current_task_name
from openpype.tools.utils import host_tools
@ -331,10 +332,11 @@ class LaunchWorkFiles(LaunchQtApp):
def execute(self, context):
result = super().execute(context)
self._window.set_context({
"asset": get_current_asset_name(),
"task": get_current_task_name()
})
if not AYON_SERVER_ENABLED:
self._window.set_context({
"asset": get_current_asset_name(),
"task": get_current_task_name()
})
return result
def before_window_show(self):

View file

@ -0,0 +1,255 @@
import os
import bpy
from openpype.settings import get_project_settings
from openpype.pipeline import get_current_project_name
def get_default_render_folder(settings):
"""Get default render folder from blender settings."""
return (settings["blender"]
["RenderSettings"]
["default_render_image_folder"])
def get_aov_separator(settings):
"""Get aov separator from blender settings."""
aov_sep = (settings["blender"]
["RenderSettings"]
["aov_separator"])
if aov_sep == "dash":
return "-"
elif aov_sep == "underscore":
return "_"
elif aov_sep == "dot":
return "."
else:
raise ValueError(f"Invalid aov separator: {aov_sep}")
def get_image_format(settings):
"""Get image format from blender settings."""
return (settings["blender"]
["RenderSettings"]
["image_format"])
def get_multilayer(settings):
"""Get multilayer from blender settings."""
return (settings["blender"]
["RenderSettings"]
["multilayer_exr"])
def get_render_product(output_path, name, aov_sep):
"""
Generate the path to the render product. Blender interprets the `#`
as the frame number, when it renders.
Args:
file_path (str): The path to the blender scene.
render_folder (str): The render folder set in settings.
file_name (str): The name of the blender scene.
instance (pyblish.api.Instance): The instance to publish.
ext (str): The image format to render.
"""
filepath = os.path.join(output_path, name)
render_product = f"{filepath}{aov_sep}beauty.####"
render_product = render_product.replace("\\", "/")
return render_product
def set_render_format(ext, multilayer):
# Set Blender to save the file with the right extension
bpy.context.scene.render.use_file_extension = True
image_settings = bpy.context.scene.render.image_settings
if ext == "exr":
image_settings.file_format = (
"OPEN_EXR_MULTILAYER" if multilayer else "OPEN_EXR")
elif ext == "bmp":
image_settings.file_format = "BMP"
elif ext == "rgb":
image_settings.file_format = "IRIS"
elif ext == "png":
image_settings.file_format = "PNG"
elif ext == "jpeg":
image_settings.file_format = "JPEG"
elif ext == "jp2":
image_settings.file_format = "JPEG2000"
elif ext == "tga":
image_settings.file_format = "TARGA"
elif ext == "tif":
image_settings.file_format = "TIFF"
def set_render_passes(settings):
aov_list = (settings["blender"]
["RenderSettings"]
["aov_list"])
custom_passes = (settings["blender"]
["RenderSettings"]
["custom_passes"])
vl = bpy.context.view_layer
vl.use_pass_combined = "combined" in aov_list
vl.use_pass_z = "z" in aov_list
vl.use_pass_mist = "mist" in aov_list
vl.use_pass_normal = "normal" in aov_list
vl.use_pass_diffuse_direct = "diffuse_light" in aov_list
vl.use_pass_diffuse_color = "diffuse_color" in aov_list
vl.use_pass_glossy_direct = "specular_light" in aov_list
vl.use_pass_glossy_color = "specular_color" in aov_list
vl.eevee.use_pass_volume_direct = "volume_light" in aov_list
vl.use_pass_emit = "emission" in aov_list
vl.use_pass_environment = "environment" in aov_list
vl.use_pass_shadow = "shadow" in aov_list
vl.use_pass_ambient_occlusion = "ao" in aov_list
cycles = vl.cycles
cycles.denoising_store_passes = "denoising" in aov_list
cycles.use_pass_volume_direct = "volume_direct" in aov_list
cycles.use_pass_volume_indirect = "volume_indirect" in aov_list
aovs_names = [aov.name for aov in vl.aovs]
for cp in custom_passes:
cp_name = cp[0]
if cp_name not in aovs_names:
aov = vl.aovs.add()
aov.name = cp_name
else:
aov = vl.aovs[cp_name]
aov.type = cp[1].get("type", "VALUE")
return aov_list, custom_passes
def set_node_tree(output_path, name, aov_sep, ext, multilayer):
# Set the scene to use the compositor node tree to render
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
# Get the Render Layers node
rl_node = None
for node in tree.nodes:
if node.bl_idname == "CompositorNodeRLayers":
rl_node = node
break
# If there's not a Render Layers node, we create it
if not rl_node:
rl_node = tree.nodes.new("CompositorNodeRLayers")
# Get the enabled output sockets, that are the active passes for the
# render.
# We also exclude some layers.
exclude_sockets = ["Image", "Alpha", "Noisy Image"]
passes = [
socket
for socket in rl_node.outputs
if socket.enabled and socket.name not in exclude_sockets
]
# Remove all output nodes
for node in tree.nodes:
if node.bl_idname == "CompositorNodeOutputFile":
tree.nodes.remove(node)
# Create a new output node
output = tree.nodes.new("CompositorNodeOutputFile")
image_settings = bpy.context.scene.render.image_settings
output.format.file_format = image_settings.file_format
# In case of a multilayer exr, we don't need to use the output node,
# because the blender render already outputs a multilayer exr.
if ext == "exr" and multilayer:
output.layer_slots.clear()
return []
output.file_slots.clear()
output.base_path = output_path
aov_file_products = []
# For each active render pass, we add a new socket to the output node
# and link it
for render_pass in passes:
filepath = f"{name}{aov_sep}{render_pass.name}.####"
output.file_slots.new(filepath)
aov_file_products.append(
(render_pass.name, os.path.join(output_path, filepath)))
node_input = output.inputs[-1]
tree.links.new(render_pass, node_input)
return aov_file_products
def imprint_render_settings(node, data):
RENDER_DATA = "render_data"
if not node.get(RENDER_DATA):
node[RENDER_DATA] = {}
for key, value in data.items():
if value is None:
continue
node[RENDER_DATA][key] = value
def prepare_rendering(asset_group):
name = asset_group.name
filepath = bpy.data.filepath
assert filepath, "Workfile not saved. Please save the file first."
file_path = os.path.dirname(filepath)
file_name = os.path.basename(filepath)
file_name, _ = os.path.splitext(file_name)
project = get_current_project_name()
settings = get_project_settings(project)
render_folder = get_default_render_folder(settings)
aov_sep = get_aov_separator(settings)
ext = get_image_format(settings)
multilayer = get_multilayer(settings)
set_render_format(ext, multilayer)
aov_list, custom_passes = set_render_passes(settings)
output_path = os.path.join(file_path, render_folder, file_name)
render_product = get_render_product(output_path, name, aov_sep)
aov_file_product = set_node_tree(
output_path, name, aov_sep, ext, multilayer)
bpy.context.scene.render.filepath = render_product
render_settings = {
"render_folder": render_folder,
"aov_separator": aov_sep,
"image_format": ext,
"multilayer_exr": multilayer,
"aov_list": aov_list,
"custom_passes": custom_passes,
"render_product": render_product,
"aov_file_product": aov_file_product,
"review": True,
}
imprint_render_settings(asset_group, render_settings)

View file

@ -0,0 +1,53 @@
"""Create render."""
import bpy
from openpype.pipeline import get_current_task_name
from openpype.hosts.blender.api import plugin, lib
from openpype.hosts.blender.api.render_lib import prepare_rendering
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
class CreateRenderlayer(plugin.Creator):
"""Single baked camera"""
name = "renderingMain"
label = "Render"
family = "render"
icon = "eye"
def process(self):
# Get Instance Container or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
# Create instance object
asset = self.data["asset"]
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
asset_group = bpy.data.collections.new(name=name)
try:
instances.children.link(asset_group)
self.data['task'] = get_current_task_name()
lib.imprint(asset_group, self.data)
prepare_rendering(asset_group)
except Exception:
# Remove the instance if there was an error
bpy.data.collections.remove(asset_group)
raise
# TODO: this is undesiderable, but it's the only way to be sure that
# the file is saved before the render starts.
# Blender, by design, doesn't set the file as dirty if modifications
# happen by script. So, when creating the instance and setting the
# render settings, the file is not marked as dirty. This means that
# there is the risk of sending to deadline a file without the right
# settings. Even the validator to check that the file is saved will
# detect the file as saved, even if it isn't. The only solution for
# now it is to force the file to be saved.
bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath)
return asset_group

View file

@ -244,7 +244,7 @@ class BlendLoader(plugin.AssetLoader):
for parent in parent_containers:
parent.get(AVALON_PROPERTY)["members"] = list(filter(
lambda i: i not in members,
parent.get(AVALON_PROPERTY)["members"]))
parent.get(AVALON_PROPERTY).get("members", [])))
for attr in attrs:
for data in getattr(bpy.data, attr):

View file

@ -0,0 +1,123 @@
# -*- coding: utf-8 -*-
"""Collect render data."""
import os
import re
import bpy
from openpype.hosts.blender.api import colorspace
import pyblish.api
class CollectBlenderRender(pyblish.api.InstancePlugin):
"""Gather all publishable render layers from renderSetup."""
order = pyblish.api.CollectorOrder + 0.01
hosts = ["blender"]
families = ["render"]
label = "Collect Render Layers"
sync_workfile_version = False
@staticmethod
def generate_expected_beauty(
render_product, frame_start, frame_end, frame_step, ext
):
"""
Generate the expected files for the render product for the beauty
render. This returns a list of files that should be rendered. It
replaces the sequence of `#` with the frame number.
"""
path = os.path.dirname(render_product)
file = os.path.basename(render_product)
expected_files = []
for frame in range(frame_start, frame_end + 1, frame_step):
frame_str = str(frame).rjust(4, "0")
filename = re.sub("#+", frame_str, file)
expected_file = f"{os.path.join(path, filename)}.{ext}"
expected_files.append(expected_file.replace("\\", "/"))
return {
"beauty": expected_files
}
@staticmethod
def generate_expected_aovs(
aov_file_product, frame_start, frame_end, frame_step, ext
):
"""
Generate the expected files for the render product for the beauty
render. This returns a list of files that should be rendered. It
replaces the sequence of `#` with the frame number.
"""
expected_files = {}
for aov_name, aov_file in aov_file_product:
path = os.path.dirname(aov_file)
file = os.path.basename(aov_file)
aov_files = []
for frame in range(frame_start, frame_end + 1, frame_step):
frame_str = str(frame).rjust(4, "0")
filename = re.sub("#+", frame_str, file)
expected_file = f"{os.path.join(path, filename)}.{ext}"
aov_files.append(expected_file.replace("\\", "/"))
expected_files[aov_name] = aov_files
return expected_files
def process(self, instance):
context = instance.context
render_data = bpy.data.collections[str(instance)].get("render_data")
assert render_data, "No render data found."
self.log.info(f"render_data: {dict(render_data)}")
render_product = render_data.get("render_product")
aov_file_product = render_data.get("aov_file_product")
ext = render_data.get("image_format")
multilayer = render_data.get("multilayer_exr")
frame_start = context.data["frameStart"]
frame_end = context.data["frameEnd"]
frame_handle_start = context.data["frameStartHandle"]
frame_handle_end = context.data["frameEndHandle"]
expected_beauty = self.generate_expected_beauty(
render_product, int(frame_start), int(frame_end),
int(bpy.context.scene.frame_step), ext)
expected_aovs = self.generate_expected_aovs(
aov_file_product, int(frame_start), int(frame_end),
int(bpy.context.scene.frame_step), ext)
expected_files = expected_beauty | expected_aovs
instance.data.update({
"family": "render.farm",
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartHandle": frame_handle_start,
"frameEndHandle": frame_handle_end,
"fps": context.data["fps"],
"byFrameStep": bpy.context.scene.frame_step,
"review": render_data.get("review", False),
"multipartExr": ext == "exr" and multilayer,
"farm": True,
"expectedFiles": [expected_files],
# OCIO not currently implemented in Blender, but the following
# settings are required by the schema, so it is hardcoded.
# TODO: Implement OCIO in Blender
"colorspaceConfig": "",
"colorspaceDisplay": "sRGB",
"colorspaceView": "ACES 1.0 SDR-video",
"renderProducts": colorspace.ARenderProduct(),
})
self.log.info(f"data: {instance.data}")

View file

@ -9,7 +9,8 @@ class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
label = "Increment Workfile Version"
optional = True
hosts = ["blender"]
families = ["animation", "model", "rig", "action", "layout", "blendScene"]
families = ["animation", "model", "rig", "action", "layout", "blendScene",
"render"]
def process(self, context):

View file

@ -0,0 +1,47 @@
import os
import bpy
import pyblish.api
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
PublishValidationError,
OptionalPyblishPluginMixin
)
from openpype.hosts.blender.api.render_lib import prepare_rendering
class ValidateDeadlinePublish(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""Validates Render File Directory is
not the same in every submission
"""
order = ValidateContentsOrder
families = ["render.farm"]
hosts = ["blender"]
label = "Validate Render Output for Deadline"
optional = True
actions = [RepairAction]
def process(self, instance):
if not self.is_active(instance.data):
return
filepath = bpy.data.filepath
file = os.path.basename(filepath)
filename, ext = os.path.splitext(file)
if filename not in bpy.context.scene.render.filepath:
raise PublishValidationError(
"Render output folder "
"doesn't match the blender scene name! "
"Use Repair action to "
"fix the folder file path.."
)
@classmethod
def repair(cls, instance):
container = bpy.data.collections[str(instance)]
prepare_rendering(container)
bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath)
cls.log.debug("Reset the render output folder...")

View file

@ -0,0 +1,20 @@
import bpy
import pyblish.api
class ValidateFileSaved(pyblish.api.InstancePlugin):
"""Validate that the workfile has been saved."""
order = pyblish.api.ValidatorOrder - 0.01
hosts = ["blender"]
label = "Validate File Saved"
optional = False
exclude_families = []
def process(self, instance):
if [ef for ef in self.exclude_families
if instance.data["family"] in ef]:
return
if bpy.data.is_dirty:
raise RuntimeError("Workfile is not saved.")

View file

@ -0,0 +1,17 @@
import bpy
import pyblish.api
class ValidateRenderCameraIsSet(pyblish.api.InstancePlugin):
"""Validate that there is a camera set as active for rendering."""
order = pyblish.api.ValidatorOrder
hosts = ["blender"]
families = ["render"]
label = "Validate Render Camera Is Set"
optional = False
def process(self, instance):
if not bpy.context.scene.camera:
raise RuntimeError("No camera is active for rendering.")

View file

@ -123,6 +123,9 @@ class CreateSaver(NewCreator):
def _imprint(self, tool, data):
# Save all data in a "openpype.{key}" = value data
# Instance id is the tool's name so we don't need to imprint as data
data.pop("instance_id", None)
active = data.pop("active", None)
if active is not None:
# Use active value to set the passthrough state
@ -188,6 +191,10 @@ class CreateSaver(NewCreator):
passthrough = attrs["TOOLB_PassThrough"]
data["active"] = not passthrough
# Override publisher's UUID generation because tool names are
# already unique in Fusion in a comp
data["instance_id"] = tool.Name
return data
def get_pre_create_attr_defs(self):

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import sys
import os
import errno
import re
import uuid
import logging
@ -9,10 +10,15 @@ import json
import six
from openpype.lib import StringTemplate
from openpype.client import get_asset_by_name
from openpype.settings import get_current_project_settings
from openpype.pipeline import get_current_project_name, get_current_asset_name
from openpype.pipeline.context_tools import get_current_project_asset
from openpype.pipeline.context_tools import (
get_current_context_template_data,
get_current_project_asset
)
from openpype.widgets import popup
import hou
@ -160,8 +166,6 @@ def validate_fps():
if current_fps != fps:
from openpype.widgets import popup
# Find main window
parent = hou.ui.mainQtWindow()
if parent is None:
@ -747,3 +751,99 @@ def get_camera_from_container(container):
assert len(cameras) == 1, "Camera instance must have only one camera"
return cameras[0]
def get_context_var_changes():
"""get context var changes."""
houdini_vars_to_update = {}
project_settings = get_current_project_settings()
houdini_vars_settings = \
project_settings["houdini"]["general"]["update_houdini_var_context"]
if not houdini_vars_settings["enabled"]:
return houdini_vars_to_update
houdini_vars = houdini_vars_settings["houdini_vars"]
# No vars specified - nothing to do
if not houdini_vars:
return houdini_vars_to_update
# Get Template data
template_data = get_current_context_template_data()
# Set Houdini Vars
for item in houdini_vars:
# For consistency reasons we always force all vars to be uppercase
# Also remove any leading, and trailing whitespaces.
var = item["var"].strip().upper()
# get and resolve template in value
item_value = StringTemplate.format_template(
item["value"],
template_data
)
if var == "JOB" and item_value == "":
# sync $JOB to $HIP if $JOB is empty
item_value = os.environ["HIP"]
if item["is_directory"]:
item_value = item_value.replace("\\", "/")
current_value = hou.hscript("echo -n `${}`".format(var))[0]
if current_value != item_value:
houdini_vars_to_update[var] = (
current_value, item_value, item["is_directory"]
)
return houdini_vars_to_update
def update_houdini_vars_context():
"""Update asset context variables"""
for var, (_old, new, is_directory) in get_context_var_changes().items():
if is_directory:
try:
os.makedirs(new)
except OSError as e:
if e.errno != errno.EEXIST:
print(
"Failed to create ${} dir. Maybe due to "
"insufficient permissions.".format(var)
)
hou.hscript("set {}={}".format(var, new))
os.environ[var] = new
print("Updated ${} to {}".format(var, new))
def update_houdini_vars_context_dialog():
"""Show pop-up to update asset context variables"""
update_vars = get_context_var_changes()
if not update_vars:
# Nothing to change
print("Nothing to change, Houdini vars are already up to date.")
return
message = "\n".join(
"${}: {} -> {}".format(var, old or "None", new or "None")
for var, (old, new, _is_directory) in update_vars.items()
)
# TODO: Use better UI!
parent = hou.ui.mainQtWindow()
dialog = popup.Popup(parent=parent)
dialog.setModal(True)
dialog.setWindowTitle("Houdini scene has outdated asset variables")
dialog.setMessage(message)
dialog.setButtonText("Fix")
# on_show is the Fix button clicked callback
dialog.on_clicked.connect(update_houdini_vars_context)
dialog.show()

View file

@ -300,6 +300,9 @@ def on_save():
log.info("Running callback on save..")
# update houdini vars
lib.update_houdini_vars_context_dialog()
nodes = lib.get_id_required_nodes()
for node, new_id in lib.generate_ids(nodes):
lib.set_id(node, new_id, overwrite=False)
@ -335,6 +338,9 @@ def on_open():
log.info("Running callback on open..")
# update houdini vars
lib.update_houdini_vars_context_dialog()
# Validate FPS after update_task_from_path to
# ensure it is using correct FPS for the asset
lib.validate_fps()
@ -399,6 +405,7 @@ def _set_context_settings():
"""
lib.reset_framerange()
lib.update_houdini_vars_context()
def on_pyblish_instance_toggled(instance, new_value, old_value):

View file

@ -187,13 +187,14 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
self.customize_node_look(instance_node)
instance_data["instance_node"] = instance_node.path()
instance_data["instance_id"] = instance_node.path()
instance = CreatedInstance(
self.family,
subset_name,
instance_data,
self)
self._add_instance_to_context(instance)
imprint(instance_node, instance.data_to_store())
self.imprint(instance_node, instance.data_to_store())
return instance
except hou.Error as er:
@ -222,25 +223,41 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
self.cache_subsets(self.collection_shared_data)
for instance in self.collection_shared_data[
"houdini_cached_subsets"].get(self.identifier, []):
node_data = read(instance)
# Node paths are always the full node path since that is unique
# Because it's the node's path it's not written into attributes
# but explicitly collected
node_path = instance.path()
node_data["instance_id"] = node_path
node_data["instance_node"] = node_path
created_instance = CreatedInstance.from_existing(
read(instance), self
node_data, self
)
self._add_instance_to_context(created_instance)
def update_instances(self, update_list):
for created_inst, changes in update_list:
instance_node = hou.node(created_inst.get("instance_node"))
new_values = {
key: changes[key].new_value
for key in changes.changed_keys
}
imprint(
self.imprint(
instance_node,
new_values,
update=True
)
def imprint(self, node, values, update=False):
# Never store instance node and instance id since that data comes
# from the node's path
values.pop("instance_node", None)
values.pop("instance_id", None)
imprint(node, values, update=update)
def remove_instances(self, instances):
"""Remove specified instance from the scene.

View file

@ -86,6 +86,14 @@ openpype.hosts.houdini.api.lib.reset_framerange()
]]></scriptCode>
</scriptItem>
<scriptItem id="update_context_vars">
<label>Update Houdini Vars</label>
<scriptCode><![CDATA[
import openpype.hosts.houdini.api.lib
openpype.hosts.houdini.api.lib.update_houdini_vars_context_dialog()
]]></scriptCode>
</scriptItem>
<separatorItem/>
<scriptItem id="experimental_tools">
<label>Experimental tools...</label>

View file

@ -1,15 +1,35 @@
# -*- coding: utf-8 -*-
"""Library of functions useful for 3dsmax pipeline."""
import contextlib
import logging
import json
from typing import Any, Dict, Union
import six
from openpype.pipeline import get_current_project_name, colorspace
from openpype.settings import get_project_settings
from openpype.pipeline.context_tools import (
get_current_project, get_current_project_asset)
from openpype.style import load_stylesheet
from pymxs import runtime as rt
JSON_PREFIX = "JSON::"
log = logging.getLogger("openpype.hosts.max")
def get_main_window():
"""Acquire Max's main window"""
from qtpy import QtWidgets
top_widgets = QtWidgets.QApplication.topLevelWidgets()
name = "QmaxApplicationWindow"
for widget in top_widgets:
if (
widget.inherits("QMainWindow")
and widget.metaObject().className() == name
):
return widget
raise RuntimeError('Count not find 3dsMax main window.')
def imprint(node_name: str, data: dict) -> bool:
@ -277,6 +297,7 @@ def set_context_setting():
"""
reset_scene_resolution()
reset_frame_range()
reset_colorspace()
def get_max_version():
@ -292,6 +313,14 @@ def get_max_version():
return max_info[7]
def is_headless():
"""Check if 3dsMax runs in batch mode.
If it returns True, it runs in 3dsbatch.exe
If it returns False, it runs in 3dsmax.exe
"""
return rt.maxops.isInNonInteractiveMode()
@contextlib.contextmanager
def viewport_camera(camera):
original = rt.viewport.getCamera()
@ -314,6 +343,51 @@ def set_timeline(frameStart, frameEnd):
return rt.animationRange
def reset_colorspace():
"""OCIO Configuration
Supports in 3dsMax 2024+
"""
if int(get_max_version()) < 2024:
return
project_name = get_current_project_name()
colorspace_mgr = rt.ColorPipelineMgr
project_settings = get_project_settings(project_name)
max_config_data = colorspace.get_imageio_config(
project_name, "max", project_settings)
if max_config_data:
ocio_config_path = max_config_data["path"]
colorspace_mgr = rt.ColorPipelineMgr
colorspace_mgr.Mode = rt.Name("OCIO_Custom")
colorspace_mgr.OCIOConfigPath = ocio_config_path
colorspace_mgr.OCIOConfigPath = ocio_config_path
def check_colorspace():
parent = get_main_window()
if parent is None:
log.info("Skipping outdated pop-up "
"because Max main window can't be found.")
if int(get_max_version()) >= 2024:
color_mgr = rt.ColorPipelineMgr
project_name = get_current_project_name()
project_settings = get_project_settings(project_name)
max_config_data = colorspace.get_imageio_config(
project_name, "max", project_settings)
if max_config_data and color_mgr.Mode != rt.Name("OCIO_Custom"):
if not is_headless():
from openpype.widgets import popup
dialog = popup.Popup(parent=parent)
dialog.setWindowTitle("Warning: Wrong OCIO Mode")
dialog.setMessage("This scene has wrong OCIO "
"Mode setting.")
dialog.setButtonText("Fix")
dialog.setStyleSheet(load_stylesheet())
dialog.on_clicked.connect(reset_colorspace)
dialog.show()
def unique_namespace(namespace, format="%02d",
prefix="", suffix="", con_suffix="CON"):
"""Return unique namespace

View file

@ -119,6 +119,10 @@ class OpenPypeMenu(object):
frame_action.triggered.connect(self.frame_range_callback)
openpype_menu.addAction(frame_action)
colorspace_action = QtWidgets.QAction("Set Colorspace", openpype_menu)
colorspace_action.triggered.connect(self.colorspace_callback)
openpype_menu.addAction(colorspace_action)
return openpype_menu
def load_callback(self):
@ -148,3 +152,7 @@ class OpenPypeMenu(object):
def frame_range_callback(self):
"""Callback to reset frame range"""
return lib.reset_frame_range()
def colorspace_callback(self):
"""Callback to reset colorspace"""
return lib.reset_colorspace()

View file

@ -57,6 +57,9 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
rt.callbacks.addScript(rt.Name('systemPostNew'),
context_setting)
rt.callbacks.addScript(rt.Name('filePostOpen'),
lib.check_colorspace)
def has_unsaved_changes(self):
# TODO: how to get it from 3dsmax?
return True

View file

@ -34,6 +34,12 @@ class CollectRender(pyblish.api.InstancePlugin):
files_by_aov.update(aovs)
camera = rt.viewport.GetCamera()
if instance.data.get("members"):
camera_list = [member for member in instance.data["members"]
if rt.ClassOf(member) == rt.Camera.Classes]
if camera_list:
camera = camera_list[-1]
instance.data["cameras"] = [camera.name] if camera else None # noqa
if "expectedFiles" not in instance.data:
@ -63,6 +69,17 @@ class CollectRender(pyblish.api.InstancePlugin):
instance.data["colorspaceConfig"] = ""
instance.data["colorspaceDisplay"] = "sRGB"
instance.data["colorspaceView"] = "ACES 1.0 SDR-video"
if int(get_max_version()) >= 2024:
colorspace_mgr = rt.ColorPipelineMgr # noqa
display = next(
(display for display in colorspace_mgr.GetDisplayList()))
view_transform = next(
(view for view in colorspace_mgr.GetViewList(display)))
instance.data["colorspaceConfig"] = colorspace_mgr.OCIOConfigPath
instance.data["colorspaceDisplay"] = display
instance.data["colorspaceView"] = view_transform
instance.data["renderProducts"] = colorspace.ARenderProduct()
instance.data["publishJobState"] = "Suspended"
instance.data["attachTo"] = []

View file

@ -4,6 +4,7 @@ import pyblish.api
from pymxs import runtime as rt
from openpype.lib import BoolDef
from openpype.hosts.max.api.lib import get_max_version
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
@ -43,6 +44,17 @@ class CollectReview(pyblish.api.InstancePlugin,
"dspSafeFrame": attr_values.get("dspSafeFrame"),
"dspFrameNums": attr_values.get("dspFrameNums")
}
if int(get_max_version()) >= 2024:
colorspace_mgr = rt.ColorPipelineMgr # noqa
display = next(
(display for display in colorspace_mgr.GetDisplayList()))
view_transform = next(
(view for view in colorspace_mgr.GetViewList(display)))
instance.data["colorspaceConfig"] = colorspace_mgr.OCIOConfigPath
instance.data["colorspaceDisplay"] = display
instance.data["colorspaceView"] = view_transform
# Enable ftrack functionality
instance.data.setdefault("families", []).append('ftrack')
@ -54,7 +66,6 @@ class CollectReview(pyblish.api.InstancePlugin,
@classmethod
def get_attribute_defs(cls):
return [
BoolDef("dspGeometry",
label="Geometry",

View file

@ -6,6 +6,7 @@ from pyblish.api import Instance
from maya import cmds # noqa
import maya.mel as mel # noqa
from openpype.hosts.maya.api.lib import maintained_selection
class FBXExtractor:
@ -53,7 +54,6 @@ class FBXExtractor:
"bakeComplexEnd": int,
"bakeComplexStep": int,
"bakeResampleAnimation": bool,
"animationOnly": bool,
"useSceneName": bool,
"quaternion": str, # "euler"
"shapes": bool,
@ -63,7 +63,10 @@ class FBXExtractor:
"embeddedTextures": bool,
"inputConnections": bool,
"upAxis": str, # x, y or z,
"triangulate": bool
"triangulate": bool,
"fileVersion": str,
"skeletonDefinitions": bool,
"referencedAssetsContent": bool
}
@property
@ -94,7 +97,6 @@ class FBXExtractor:
"bakeComplexEnd": end_frame,
"bakeComplexStep": 1,
"bakeResampleAnimation": True,
"animationOnly": False,
"useSceneName": False,
"quaternion": "euler",
"shapes": True,
@ -104,7 +106,10 @@ class FBXExtractor:
"embeddedTextures": False,
"inputConnections": True,
"upAxis": "y",
"triangulate": False
"triangulate": False,
"fileVersion": "FBX202000",
"skeletonDefinitions": False,
"referencedAssetsContent": False
}
def __init__(self, log=None):
@ -198,5 +203,9 @@ class FBXExtractor:
path (str): Path to use for export.
"""
cmds.select(members, r=True, noExpand=True)
mel.eval('FBXExport -f "{}" -s'.format(path))
# The export requires forward slashes because we need
# to format it into a string in a mel expression
path = path.replace("\\", "/")
with maintained_selection():
cmds.select(members, r=True, noExpand=True)
mel.eval('FBXExport -f "{}" -s'.format(path))

View file

@ -183,6 +183,51 @@ def maintained_selection():
cmds.select(clear=True)
def get_namespace(node):
"""Return namespace of given node"""
node_name = node.rsplit("|", 1)[-1]
if ":" in node_name:
return node_name.rsplit(":", 1)[0]
else:
return ""
def strip_namespace(node, namespace):
"""Strip given namespace from node path.
The namespace will only be stripped from names
if it starts with that namespace. If the namespace
occurs within another namespace it's not removed.
Examples:
>>> strip_namespace("namespace:node", namespace="namespace:")
"node"
>>> strip_namespace("hello:world:node", namespace="hello:world")
"node"
>>> strip_namespace("hello:world:node", namespace="hello")
"world:node"
>>> strip_namespace("hello:world:node", namespace="world")
"hello:world:node"
>>> strip_namespace("ns:group|ns:node", namespace="ns")
"group|node"
Returns:
str: Node name without given starting namespace.
"""
# Ensure namespace ends with `:`
if not namespace.endswith(":"):
namespace = "{}:".format(namespace)
# The long path for a node can also have the namespace
# in its parents so we need to remove it from each
return "|".join(
name[len(namespace):] if name.startswith(namespace) else name
for name in node.split("|")
)
def get_custom_namespace(custom_namespace):
"""Return unique namespace.
@ -922,7 +967,7 @@ def no_display_layers(nodes):
@contextlib.contextmanager
def namespaced(namespace, new=True):
def namespaced(namespace, new=True, relative_names=None):
"""Work inside namespace during context
Args:
@ -934,15 +979,19 @@ def namespaced(namespace, new=True):
"""
original = cmds.namespaceInfo(cur=True, absoluteName=True)
original_relative_names = cmds.namespace(query=True, relativeNames=True)
if new:
namespace = unique_namespace(namespace)
cmds.namespace(add=namespace)
if relative_names is not None:
cmds.namespace(relativeNames=relative_names)
try:
cmds.namespace(set=namespace)
yield namespace
finally:
cmds.namespace(set=original)
if relative_names is not None:
cmds.namespace(relativeNames=original_relative_names)
@contextlib.contextmanager
@ -4100,14 +4149,19 @@ def create_rig_animation_instance(
"""
if options is None:
options = {}
name = context["representation"]["name"]
output = next((node for node in nodes if
node.endswith("out_SET")), None)
controls = next((node for node in nodes if
node.endswith("controls_SET")), None)
if name != "fbx":
assert output, "No out_SET in rig, this is a bug."
assert controls, "No controls_SET in rig, this is a bug."
assert output, "No out_SET in rig, this is a bug."
assert controls, "No controls_SET in rig, this is a bug."
anim_skeleton = next((node for node in nodes if
node.endswith("skeletonAnim_SET")), None)
skeleton_mesh = next((node for node in nodes if
node.endswith("skeletonMesh_SET")), None)
# Find the roots amongst the loaded nodes
roots = (
@ -4119,9 +4173,7 @@ def create_rig_animation_instance(
custom_subset = options.get("animationSubsetName")
if custom_subset:
formatting_data = {
# TODO remove 'asset_type' and replace 'asset_name' with 'asset'
"asset_name": context['asset']['name'],
"asset_type": context['asset']['type'],
"asset": context["asset"],
"subset": context['subset']['name'],
"family": (
context['subset']['data'].get('family') or
@ -4142,10 +4194,12 @@ def create_rig_animation_instance(
host = registered_host()
create_context = CreateContext(host)
# Create the animation instance
rig_sets = [output, controls, anim_skeleton, skeleton_mesh]
# Remove sets that this particular rig does not have
rig_sets = [s for s in rig_sets if s is not None]
with maintained_selection():
cmds.select([output, controls] + roots, noExpand=True)
cmds.select(rig_sets + roots, noExpand=True)
create_context.create(
creator_identifier=creator_identifier,
variant=namespace,

View file

@ -1,14 +1,13 @@
import os
import logging
from functools import partial
from qtpy import QtWidgets, QtGui
import maya.utils
import maya.cmds as cmds
from openpype.settings import get_project_settings
from openpype.pipeline import (
get_current_project_name,
get_current_asset_name,
get_current_task_name
)
@ -46,12 +45,12 @@ def get_context_label():
)
def install():
def install(project_settings):
if cmds.about(batch=True):
log.info("Skipping openpype.menu initialization in batch mode..")
return
def deferred():
def add_menu():
pyblish_icon = host_tools.get_pyblish_icon()
parent_widget = get_main_window()
cmds.menu(
@ -191,7 +190,7 @@ def install():
cmds.setParent(MENU_NAME, menu=True)
def add_scripts_menu():
def add_scripts_menu(project_settings):
try:
import scriptsmenu.launchformaya as launchformaya
except ImportError:
@ -201,9 +200,6 @@ def install():
)
return
# load configuration of custom menu
project_name = get_current_project_name()
project_settings = get_project_settings(project_name)
config = project_settings["maya"]["scriptsmenu"]["definition"]
_menu = project_settings["maya"]["scriptsmenu"]["name"]
@ -225,8 +221,9 @@ def install():
# so that it only gets called after Maya UI has initialized too.
# This is crucial with Maya 2020+ which initializes without UI
# first as a QCoreApplication
maya.utils.executeDeferred(deferred)
cmds.evalDeferred(add_scripts_menu, lowestPriority=True)
maya.utils.executeDeferred(add_menu)
cmds.evalDeferred(partial(add_scripts_menu, project_settings),
lowestPriority=True)
def uninstall():

View file

@ -28,8 +28,6 @@ from openpype.lib import (
from openpype.pipeline import (
legacy_io,
get_current_project_name,
get_current_asset_name,
get_current_task_name,
register_loader_plugin_path,
register_inventory_action_path,
register_creator_plugin_path,
@ -108,7 +106,7 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
_set_project()
self._register_callbacks()
menu.install()
menu.install(project_settings)
register_event_callback("save", on_save)
register_event_callback("open", on_open)

View file

@ -151,6 +151,7 @@ class MayaCreatorBase(object):
# We never store the instance_node as value on the node since
# it's the node name itself
data.pop("instance_node", None)
data.pop("instance_id", None)
# Don't store `families` since it's up to the creator itself
# to define the initial publish families - not a stored attribute of
@ -227,6 +228,7 @@ class MayaCreatorBase(object):
# Explicitly re-parse the node name
node_data["instance_node"] = node
node_data["instance_id"] = node
# If the creator plug-in specifies
families = self.get_publish_families()
@ -601,6 +603,13 @@ class RenderlayerCreator(NewCreator, MayaCreatorBase):
class Loader(LoaderPlugin):
hosts = ["maya"]
load_settings = {} # defined in settings
@classmethod
def apply_settings(cls, project_settings, system_settings):
super(Loader, cls).apply_settings(project_settings, system_settings)
cls.load_settings = project_settings['maya']['load']
def get_custom_namespace_and_group(self, context, options, loader_key):
"""Queries Settings to get custom template for namespace and group.
@ -613,12 +622,9 @@ class Loader(LoaderPlugin):
loader_key (str): key to get separate configuration from Settings
('reference_loader'|'import_loader')
"""
options["attach_to_root"] = True
asset = context['asset']
subset = context['subset']
settings = get_project_settings(context['project']['name'])
custom_naming = settings['maya']['load'][loader_key]
options["attach_to_root"] = True
custom_naming = self.load_settings[loader_key]
if not custom_naming['namespace']:
raise LoadError("No namespace specified in "
@ -627,6 +633,8 @@ class Loader(LoaderPlugin):
self.log.debug("No custom group_name, no group will be created.")
options["attach_to_root"] = False
asset = context['asset']
subset = context['subset']
formatting_data = {
"asset_name": asset['name'],
"asset_type": asset['type'],

View file

@ -20,6 +20,13 @@ class CreateRig(plugin.MayaCreator):
instance_node = instance.get("instance_node")
self.log.info("Creating Rig instance set up ...")
# TODOchange name (_controls_SET -> _rigs_SET)
controls = cmds.sets(name=subset_name + "_controls_SET", empty=True)
# TODOchange name (_out_SET -> _geo_SET)
pointcache = cmds.sets(name=subset_name + "_out_SET", empty=True)
cmds.sets([controls, pointcache], forceElement=instance_node)
skeleton = cmds.sets(
name=subset_name + "_skeletonAnim_SET", empty=True)
skeleton_mesh = cmds.sets(
name=subset_name + "_skeletonMesh_SET", empty=True)
cmds.sets([controls, pointcache,
skeleton, skeleton_mesh], forceElement=instance_node)

View file

@ -1,4 +1,46 @@
import openpype.hosts.maya.api.plugin
import maya.cmds as cmds
def _process_reference(file_url, name, namespace, options):
"""Load files by referencing scene in Maya.
Args:
file_url (str): fileapth of the objects to be loaded
name (str): subset name
namespace (str): namespace
options (dict): dict of storing the param
Returns:
list: list of object nodes
"""
from openpype.hosts.maya.api.lib import unique_namespace
# Get name from asset being loaded
# Assuming name is subset name from the animation, we split the number
# suffix from the name to ensure the namespace is unique
name = name.split("_")[0]
ext = file_url.split(".")[-1]
namespace = unique_namespace(
"{}_".format(name),
format="%03d",
suffix="_{}".format(ext)
)
attach_to_root = options.get("attach_to_root", True)
group_name = options["group_name"]
# no group shall be created
if not attach_to_root:
group_name = namespace
nodes = cmds.file(file_url,
namespace=namespace,
sharedReferenceFile=False,
groupReference=attach_to_root,
groupName=group_name,
reference=True,
returnNewNodes=True)
return nodes
class AbcLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
@ -16,44 +58,42 @@ class AbcLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
def process_reference(self, context, name, namespace, options):
import maya.cmds as cmds
from openpype.hosts.maya.api.lib import unique_namespace
cmds.loadPlugin("AbcImport.mll", quiet=True)
# Prevent identical alembic nodes from being shared
# Create unique namespace for the cameras
# Get name from asset being loaded
# Assuming name is subset name from the animation, we split the number
# suffix from the name to ensure the namespace is unique
name = name.split("_")[0]
namespace = unique_namespace(
"{}_".format(name),
format="%03d",
suffix="_abc"
)
attach_to_root = options.get("attach_to_root", True)
group_name = options["group_name"]
# no group shall be created
if not attach_to_root:
group_name = namespace
# hero_001 (abc)
# asset_counter{optional}
path = self.filepath_from_context(context)
file_url = self.prepare_root_value(path,
context["project"]["name"])
nodes = cmds.file(file_url,
namespace=namespace,
sharedReferenceFile=False,
groupReference=attach_to_root,
groupName=group_name,
reference=True,
returnNewNodes=True)
nodes = _process_reference(file_url, name, namespace, options)
# load colorbleed ID attribute
self[:] = nodes
return nodes
class FbxLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
"""Loader to reference an Fbx files"""
families = ["animation",
"camera"]
representations = ["fbx"]
label = "Reference animation"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, options):
cmds.loadPlugin("fbx4maya.mll", quiet=True)
path = self.filepath_from_context(context)
file_url = self.prepare_root_value(path,
context["project"]["name"])
nodes = _process_reference(file_url, name, namespace, options)
self[:] = nodes
return nodes

View file

@ -0,0 +1,36 @@
# -*- coding: utf-8 -*-
from maya import cmds # noqa
import pyblish.api
from openpype.pipeline import OptionalPyblishPluginMixin
class CollectFbxAnimation(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""Collect Animated Rig Data for FBX Extractor."""
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Fbx Animation"
hosts = ["maya"]
families = ["animation"]
optional = True
def process(self, instance):
if not self.is_active(instance.data):
return
skeleton_sets = [
i for i in instance
if i.endswith("skeletonAnim_SET")
]
if not skeleton_sets:
return
instance.data["families"].append("animation.fbx")
instance.data["animated_skeleton"] = []
for skeleton_set in skeleton_sets:
skeleton_content = cmds.sets(skeleton_set, query=True)
self.log.debug(
"Collected animated skeleton data: {}".format(
skeleton_content
))
if skeleton_content:
instance.data["animated_skeleton"] = skeleton_content

View file

@ -22,7 +22,8 @@ class CollectRigSets(pyblish.api.InstancePlugin):
def process(self, instance):
# Find required sets by suffix
searching = {"controls_SET", "out_SET"}
searching = {"controls_SET", "out_SET",
"skeletonAnim_SET", "skeletonMesh_SET"}
found = {}
for node in cmds.ls(instance, exactType="objectSet"):
for suffix in searching:

View file

@ -0,0 +1,44 @@
# -*- coding: utf-8 -*-
from maya import cmds # noqa
import pyblish.api
class CollectSkeletonMesh(pyblish.api.InstancePlugin):
"""Collect Static Rig Data for FBX Extractor."""
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Skeleton Mesh"
hosts = ["maya"]
families = ["rig"]
def process(self, instance):
skeleton_mesh_set = instance.data["rig_sets"].get(
"skeletonMesh_SET")
if not skeleton_mesh_set:
self.log.debug(
"No skeletonMesh_SET found. "
"Skipping collecting of skeleton mesh..."
)
return
# Store current frame to ensure single frame export
frame = cmds.currentTime(query=True)
instance.data["frameStart"] = frame
instance.data["frameEnd"] = frame
instance.data["skeleton_mesh"] = []
skeleton_mesh_content = cmds.sets(
skeleton_mesh_set, query=True) or []
if not skeleton_mesh_content:
self.log.debug(
"No object nodes in skeletonMesh_SET. "
"Skipping collecting of skeleton mesh..."
)
return
instance.data["families"] += ["rig.fbx"]
instance.data["skeleton_mesh"] = skeleton_mesh_content
self.log.debug(
"Collected skeletonMesh_SET members: {}".format(
skeleton_mesh_content
))

View file

@ -0,0 +1,65 @@
# -*- coding: utf-8 -*-
import os
from maya import cmds # noqa
import pyblish.api
from openpype.pipeline import publish
from openpype.hosts.maya.api import fbx
from openpype.hosts.maya.api.lib import (
namespaced, get_namespace, strip_namespace
)
class ExtractFBXAnimation(publish.Extractor):
"""Extract Rig in FBX format from Maya.
This extracts the rig in fbx with the constraints
and referenced asset content included.
This also optionally extract animated rig in fbx with
geometries included.
"""
order = pyblish.api.ExtractorOrder
label = "Extract Animation (FBX)"
hosts = ["maya"]
families = ["animation.fbx"]
def process(self, instance):
# Define output path
staging_dir = self.staging_dir(instance)
filename = "{0}.fbx".format(instance.name)
path = os.path.join(staging_dir, filename)
path = path.replace("\\", "/")
fbx_exporter = fbx.FBXExtractor(log=self.log)
out_members = instance.data.get("animated_skeleton", [])
# Export
instance.data["constraints"] = True
instance.data["skeletonDefinitions"] = True
instance.data["referencedAssetsContent"] = True
fbx_exporter.set_options_from_instance(instance)
# Export from the rig's namespace so that the exported
# FBX does not include the namespace but preserves the node
# names as existing in the rig workfile
namespace = get_namespace(out_members[0])
relative_out_members = [
strip_namespace(node, namespace) for node in out_members
]
with namespaced(
":" + namespace,
new=False,
relative_names=True
) as namespace:
fbx_exporter.export(relative_out_members, path)
representations = instance.data.setdefault("representations", [])
representations.append({
'name': 'fbx',
'ext': 'fbx',
'files': filename,
"stagingDir": staging_dir
})
self.log.debug(
"Extracted FBX animation to: {0}".format(path))

View file

@ -0,0 +1,54 @@
# -*- coding: utf-8 -*-
import os
from maya import cmds # noqa
import pyblish.api
from openpype.pipeline import publish
from openpype.pipeline.publish import OptionalPyblishPluginMixin
from openpype.hosts.maya.api import fbx
class ExtractSkeletonMesh(publish.Extractor,
OptionalPyblishPluginMixin):
"""Extract Rig in FBX format from Maya.
This extracts the rig in fbx with the constraints
and referenced asset content included.
This also optionally extract animated rig in fbx with
geometries included.
"""
order = pyblish.api.ExtractorOrder
label = "Extract Skeleton Mesh"
hosts = ["maya"]
families = ["rig.fbx"]
def process(self, instance):
if not self.is_active(instance.data):
return
# Define output path
staging_dir = self.staging_dir(instance)
filename = "{0}.fbx".format(instance.name)
path = os.path.join(staging_dir, filename)
fbx_exporter = fbx.FBXExtractor(log=self.log)
out_set = instance.data.get("skeleton_mesh", [])
instance.data["constraints"] = True
instance.data["skeletonDefinitions"] = True
fbx_exporter.set_options_from_instance(instance)
# Export
fbx_exporter.export(out_set, path)
representations = instance.data.setdefault("representations", [])
representations.append({
'name': 'fbx',
'ext': 'fbx',
'files': filename,
"stagingDir": staging_dir
})
self.log.debug("Extract FBX to: {0}".format(path))

View file

@ -0,0 +1,66 @@
import pyblish.api
import openpype.hosts.maya.api.action
from openpype.pipeline.publish import (
PublishValidationError,
ValidateContentsOrder
)
from maya import cmds
class ValidateAnimatedReferenceRig(pyblish.api.InstancePlugin):
"""Validate all nodes in skeletonAnim_SET are referenced"""
order = ValidateContentsOrder
hosts = ["maya"]
families = ["animation.fbx"]
label = "Animated Reference Rig"
accepted_controllers = ["transform", "locator"]
actions = [openpype.hosts.maya.api.action.SelectInvalidAction]
def process(self, instance):
animated_sets = instance.data.get("animated_skeleton", [])
if not animated_sets:
self.log.debug(
"No nodes found in skeletonAnim_SET. "
"Skipping validation of animated reference rig..."
)
return
for animated_reference in animated_sets:
is_referenced = cmds.referenceQuery(
animated_reference, isNodeReferenced=True)
if not bool(is_referenced):
raise PublishValidationError(
"All the content in skeletonAnim_SET"
" should be referenced nodes"
)
invalid_controls = self.validate_controls(animated_sets)
if invalid_controls:
raise PublishValidationError(
"All the content in skeletonAnim_SET"
" should be transforms"
)
@classmethod
def validate_controls(self, set_members):
"""Check if the controller set contains only accepted node types.
Checks if all its set members are within the hierarchy of the root
Checks if the node types of the set members valid
Args:
set_members: list of nodes of the skeleton_anim_set
hierarchy: list of nodes which reside under the root node
Returns:
errors (list)
"""
# Validate control types
invalid = []
set_members = cmds.ls(set_members, long=True)
for node in set_members:
if cmds.nodeType(node) not in self.accepted_controllers:
invalid.append(node)
return invalid

View file

@ -30,18 +30,21 @@ class ValidatePluginPathAttributes(pyblish.api.InstancePlugin):
def get_invalid(cls, instance):
invalid = list()
file_attr = cls.attribute
if not file_attr:
file_attrs = cls.attribute
if not file_attrs:
return invalid
# Consider only valid node types to avoid "Unknown object type" warning
all_node_types = set(cmds.allNodeTypes())
node_types = [key for key in file_attr.keys() if key in all_node_types]
node_types = [
key for key in file_attrs.keys()
if key in all_node_types
]
for node, node_type in pairwise(cmds.ls(type=node_types,
showType=True)):
# get the filepath
file_attr = "{}.{}".format(node, file_attr[node_type])
file_attr = "{}.{}".format(node, file_attrs[node_type])
filepath = cmds.getAttr(file_attr)
if filepath and not os.path.exists(filepath):

View file

@ -1,6 +1,6 @@
import pyblish.api
from maya import cmds
import openpype.hosts.maya.api.action
from openpype.pipeline.publish import (
PublishValidationError,
ValidateContentsOrder
@ -20,33 +20,27 @@ class ValidateRigContents(pyblish.api.InstancePlugin):
label = "Rig Contents"
hosts = ["maya"]
families = ["rig"]
action = [openpype.hosts.maya.api.action.SelectInvalidAction]
accepted_output = ["mesh", "transform"]
accepted_controllers = ["transform"]
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise PublishValidationError(
"Invalid rig content. See log for details.")
@classmethod
def get_invalid(cls, instance):
# Find required sets by suffix
required = ["controls_SET", "out_SET"]
missing = [
key for key in required if key not in instance.data["rig_sets"]
]
if missing:
raise PublishValidationError(
"%s is missing sets: %s" % (instance, ", ".join(missing))
)
required, rig_sets = cls.get_nodes(instance)
controls_set = instance.data["rig_sets"]["controls_SET"]
out_set = instance.data["rig_sets"]["out_SET"]
cls.validate_missing_objectsets(instance, required, rig_sets)
# Ensure there are at least some transforms or dag nodes
# in the rig instance
set_members = instance.data['setMembers']
if not cmds.ls(set_members, type="dagNode", long=True):
raise PublishValidationError(
"No dag nodes in the pointcache instance. "
"(Empty instance?)"
)
controls_set = rig_sets["controls_SET"]
out_set = rig_sets["out_SET"]
# Ensure contents in sets and retrieve long path for all objects
output_content = cmds.sets(out_set, query=True) or []
@ -61,49 +55,92 @@ class ValidateRigContents(pyblish.api.InstancePlugin):
)
controls_content = cmds.ls(controls_content, long=True)
# Validate members are inside the hierarchy from root node
root_nodes = cmds.ls(set_members, assemblies=True, long=True)
hierarchy = cmds.listRelatives(root_nodes, allDescendents=True,
fullPath=True) + root_nodes
hierarchy = set(hierarchy)
invalid_hierarchy = []
for node in output_content:
if node not in hierarchy:
invalid_hierarchy.append(node)
for node in controls_content:
if node not in hierarchy:
invalid_hierarchy.append(node)
rig_content = output_content + controls_content
invalid_hierarchy = cls.invalid_hierarchy(instance, rig_content)
# Additional validations
invalid_geometry = self.validate_geometry(output_content)
invalid_controls = self.validate_controls(controls_content)
invalid_geometry = cls.validate_geometry(output_content)
invalid_controls = cls.validate_controls(controls_content)
error = False
if invalid_hierarchy:
self.log.error("Found nodes which reside outside of root group "
cls.log.error("Found nodes which reside outside of root group "
"while they are set up for publishing."
"\n%s" % invalid_hierarchy)
error = True
if invalid_controls:
self.log.error("Only transforms can be part of the controls_SET."
cls.log.error("Only transforms can be part of the controls_SET."
"\n%s" % invalid_controls)
error = True
if invalid_geometry:
self.log.error("Only meshes can be part of the out_SET\n%s"
cls.log.error("Only meshes can be part of the out_SET\n%s"
% invalid_geometry)
error = True
if error:
return invalid_hierarchy + invalid_controls + invalid_geometry
@classmethod
def validate_missing_objectsets(cls, instance,
required_objsets, rig_sets):
"""Validate missing objectsets in rig sets
Args:
instance (str): instance
required_objsets (list): list of objectset names
rig_sets (list): list of rig sets
Raises:
PublishValidationError: When the error is raised, it will show
which instance has the missing object sets
"""
missing = [
key for key in required_objsets if key not in rig_sets
]
if missing:
raise PublishValidationError(
"Invalid rig content. See log for details.")
"%s is missing sets: %s" % (instance, ", ".join(missing))
)
def validate_geometry(self, set_members):
"""Check if the out set passes the validations
@classmethod
def invalid_hierarchy(cls, instance, content):
"""
Check if all rig set members are within the hierarchy of the rig root
Checks if all its set members are within the hierarchy of the root
Args:
instance (str): instance
content (list): list of content from rig sets
Raises:
PublishValidationError: It means no dag nodes in
the rig instance
Returns:
list: invalid hierarchy
"""
# Ensure there are at least some transforms or dag nodes
# in the rig instance
set_members = instance.data['setMembers']
if not cmds.ls(set_members, type="dagNode", long=True):
raise PublishValidationError(
"No dag nodes in the rig instance. "
"(Empty instance?)"
)
# Validate members are inside the hierarchy from root node
root_nodes = cmds.ls(set_members, assemblies=True, long=True)
hierarchy = cmds.listRelatives(root_nodes, allDescendents=True,
fullPath=True) + root_nodes
hierarchy = set(hierarchy)
invalid_hierarchy = []
for node in content:
if node not in hierarchy:
invalid_hierarchy.append(node)
return invalid_hierarchy
@classmethod
def validate_geometry(cls, set_members):
"""
Checks if the node types of the set members valid
Args:
@ -122,15 +159,13 @@ class ValidateRigContents(pyblish.api.InstancePlugin):
fullPath=True) or []
all_shapes = cmds.ls(set_members + shapes, long=True, shapes=True)
for shape in all_shapes:
if cmds.nodeType(shape) not in self.accepted_output:
if cmds.nodeType(shape) not in cls.accepted_output:
invalid.append(shape)
return invalid
def validate_controls(self, set_members):
"""Check if the controller set passes the validations
Checks if all its set members are within the hierarchy of the root
@classmethod
def validate_controls(cls, set_members):
"""
Checks if the control set members are allowed node types.
Checks if the node types of the set members valid
Args:
@ -144,7 +179,80 @@ class ValidateRigContents(pyblish.api.InstancePlugin):
# Validate control types
invalid = []
for node in set_members:
if cmds.nodeType(node) not in self.accepted_controllers:
if cmds.nodeType(node) not in cls.accepted_controllers:
invalid.append(node)
return invalid
@classmethod
def get_nodes(cls, instance):
"""Get the target objectsets and rig sets nodes
Args:
instance (str): instance
Returns:
tuple: 2-tuple of list of objectsets,
list of rig sets nodes
"""
objectsets = ["controls_SET", "out_SET"]
rig_sets_nodes = instance.data.get("rig_sets", [])
return objectsets, rig_sets_nodes
class ValidateSkeletonRigContents(ValidateRigContents):
"""Ensure skeleton rigs contains pipeline-critical content
The rigs optionally contain at least two object sets:
"skeletonMesh_SET" - Set of the skinned meshes
with bone hierarchies
"""
order = ValidateContentsOrder
label = "Skeleton Rig Contents"
hosts = ["maya"]
families = ["rig.fbx"]
@classmethod
def get_invalid(cls, instance):
objectsets, skeleton_mesh_nodes = cls.get_nodes(instance)
cls.validate_missing_objectsets(
instance, objectsets, instance.data["rig_sets"])
# Ensure contents in sets and retrieve long path for all objects
output_content = instance.data.get("skeleton_mesh", [])
output_content = cmds.ls(skeleton_mesh_nodes, long=True)
invalid_hierarchy = cls.invalid_hierarchy(
instance, output_content)
invalid_geometry = cls.validate_geometry(output_content)
error = False
if invalid_hierarchy:
cls.log.error("Found nodes which reside outside of root group "
"while they are set up for publishing."
"\n%s" % invalid_hierarchy)
error = True
if invalid_geometry:
cls.log.error("Found nodes which reside outside of root group "
"while they are set up for publishing."
"\n%s" % invalid_hierarchy)
error = True
if error:
return invalid_hierarchy + invalid_geometry
@classmethod
def get_nodes(cls, instance):
"""Get the target objectsets and rig sets nodes
Args:
instance (str): instance
Returns:
tuple: 2-tuple of list of objectsets,
list of rig sets nodes
"""
objectsets = ["skeletonMesh_SET"]
skeleton_mesh_nodes = instance.data.get("skeleton_mesh", [])
return objectsets, skeleton_mesh_nodes

View file

@ -59,7 +59,7 @@ class ValidateRigControllers(pyblish.api.InstancePlugin):
@classmethod
def get_invalid(cls, instance):
controls_set = instance.data["rig_sets"].get("controls_SET")
controls_set = cls.get_node(instance)
if not controls_set:
cls.log.error(
"Must have 'controls_SET' in rig instance"
@ -189,7 +189,7 @@ class ValidateRigControllers(pyblish.api.InstancePlugin):
@classmethod
def repair(cls, instance):
controls_set = instance.data["rig_sets"].get("controls_SET")
controls_set = cls.get_node(instance)
if not controls_set:
cls.log.error(
"Unable to repair because no 'controls_SET' found in rig "
@ -228,3 +228,64 @@ class ValidateRigControllers(pyblish.api.InstancePlugin):
default = cls.CONTROLLER_DEFAULTS[attr]
cls.log.info("Setting %s to %s" % (plug, default))
cmds.setAttr(plug, default)
@classmethod
def get_node(cls, instance):
"""Get target object nodes from controls_SET
Args:
instance (str): instance
Returns:
list: list of object nodes from controls_SET
"""
return instance.data["rig_sets"].get("controls_SET")
class ValidateSkeletonRigControllers(ValidateRigControllers):
"""Validate rig controller for skeletonAnim_SET
Controls must have the transformation attributes on their default
values of translate zero, rotate zero and scale one when they are
unlocked attributes.
Unlocked keyable attributes may not have any incoming connections. If
these connections are required for the rig then lock the attributes.
The visibility attribute must be locked.
Note that `repair` will:
- Lock all visibility attributes
- Reset all default values for translate, rotate, scale
- Break all incoming connections to keyable attributes
"""
order = ValidateContentsOrder + 0.05
label = "Skeleton Rig Controllers"
hosts = ["maya"]
families = ["rig.fbx"]
# Default controller values
CONTROLLER_DEFAULTS = {
"translateX": 0,
"translateY": 0,
"translateZ": 0,
"rotateX": 0,
"rotateY": 0,
"rotateZ": 0,
"scaleX": 1,
"scaleY": 1,
"scaleZ": 1
}
@classmethod
def get_node(cls, instance):
"""Get target object nodes from skeletonMesh_SET
Args:
instance (str): instance
Returns:
list: list of object nodes from skeletonMesh_SET
"""
return instance.data["rig_sets"].get("skeletonMesh_SET")

View file

@ -46,7 +46,7 @@ class ValidateRigOutSetNodeIds(pyblish.api.InstancePlugin):
def get_invalid(cls, instance):
"""Get all nodes which do not match the criteria"""
out_set = instance.data["rig_sets"].get("out_SET")
out_set = cls.get_node(instance)
if not out_set:
return []
@ -85,3 +85,45 @@ class ValidateRigOutSetNodeIds(pyblish.api.InstancePlugin):
continue
lib.set_id(node, sibling_id, overwrite=True)
@classmethod
def get_node(cls, instance):
"""Get target object nodes from out_SET
Args:
instance (str): instance
Returns:
list: list of object nodes from out_SET
"""
return instance.data["rig_sets"].get("out_SET")
class ValidateSkeletonRigOutSetNodeIds(ValidateRigOutSetNodeIds):
"""Validate if deformed shapes have related IDs to the original shapes
from skeleton set.
When a deformer is applied in the scene on a referenced mesh that already
had deformers then Maya will create a new shape node for the mesh that
does not have the original id. This validator checks whether the ids are
valid on all the shape nodes in the instance.
"""
order = ValidateContentsOrder
families = ["rig.fbx"]
hosts = ['maya']
label = 'Skeleton Rig Out Set Node Ids'
@classmethod
def get_node(cls, instance):
"""Get target object nodes from skeletonMesh_SET
Args:
instance (str): instance
Returns:
list: list of object nodes from skeletonMesh_SET
"""
return instance.data["rig_sets"].get(
"skeletonMesh_SET")

View file

@ -47,7 +47,7 @@ class ValidateRigOutputIds(pyblish.api.InstancePlugin):
invalid = {}
if compute:
out_set = instance.data["rig_sets"].get("out_SET")
out_set = cls.get_node(instance)
if not out_set:
instance.data["mismatched_output_ids"] = invalid
return invalid
@ -115,3 +115,40 @@ class ValidateRigOutputIds(pyblish.api.InstancePlugin):
"Multiple matched ids found. Please repair manually: "
"{}".format(multiple_ids_match)
)
@classmethod
def get_node(cls, instance):
"""Get target object nodes from out_SET
Args:
instance (str): instance
Returns:
list: list of object nodes from out_SET
"""
return instance.data["rig_sets"].get("out_SET")
class ValidateSkeletonRigOutputIds(ValidateRigOutputIds):
"""Validate rig output ids from the skeleton sets.
Ids must share the same id as similarly named nodes in the scene. This is
to ensure the id from the model is preserved through animation.
"""
order = ValidateContentsOrder + 0.05
label = "Skeleton Rig Output Ids"
hosts = ["maya"]
families = ["rig.fbx"]
@classmethod
def get_node(cls, instance):
"""Get target object nodes from skeletonMesh_SET
Args:
instance (str): instance
Returns:
list: list of object nodes from skeletonMesh_SET
"""
return instance.data["rig_sets"].get("skeletonMesh_SET")

View file

@ -0,0 +1,40 @@
# -*- coding: utf-8 -*-
"""Plugin for validating naming conventions."""
from maya import cmds
import pyblish.api
from openpype.pipeline.publish import (
ValidateContentsOrder,
OptionalPyblishPluginMixin,
PublishValidationError
)
class ValidateSkeletonTopGroupHierarchy(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""Validates top group hierarchy in the SETs
Make sure the object inside the SETs are always top
group of the hierarchy
"""
order = ValidateContentsOrder + 0.05
label = "Skeleton Rig Top Group Hierarchy"
families = ["rig.fbx"]
def process(self, instance):
invalid = []
skeleton_mesh_data = instance.data("skeleton_mesh", [])
if skeleton_mesh_data:
invalid = self.get_top_hierarchy(skeleton_mesh_data)
if invalid:
raise PublishValidationError(
"The skeletonMesh_SET includes the object which "
"is not at the top hierarchy: {}".format(invalid))
def get_top_hierarchy(self, targets):
targets = cmds.ls(targets, long=True) # ensure long names
non_top_hierarchy_list = [
target for target in targets if target.count("|") > 2
]
return non_top_hierarchy_list

View file

@ -69,11 +69,8 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin,
invalid = []
project_settings = get_project_settings(
legacy_io.Session["AVALON_PROJECT"]
)
collision_prefixes = (
project_settings
instance.context.data["project_settings"]
["maya"]
["create"]
["CreateUnrealStaticMesh"]

View file

@ -3425,34 +3425,6 @@ def create_viewer_profile_string(viewer, display=None, path_like=False):
return "{} ({})".format(viewer, display)
def get_head_filename_without_hashes(original_path, name):
"""Function to get the renamed head filename without frame hashes
To avoid the system being confused on finding the filename with
frame hashes if the head of the filename has the hashed symbol
Examples:
>>> get_head_filename_without_hashes("render.####.exr", "baking")
render.baking.####.exr
>>> get_head_filename_without_hashes("render.%04d.exr", "tag")
render.tag.%d.exr
>>> get_head_filename_without_hashes("exr.####.exr", "foo")
exr.foo.%04d.exr
Args:
original_path (str): the filename with frame hashes
name (str): the name of the tags
Returns:
str: the renamed filename with the tag
"""
filename = os.path.basename(original_path)
def insert_name(matchobj):
return "{}.{}".format(name, matchobj.group(0))
return re.sub(r"(%\d*d)|#+", insert_name, filename)
def get_filenames_without_hash(filename, frame_start, frame_end):
"""Get filenames without frame hash
i.e. "renderCompositingMain.baking.0001.exr"

View file

@ -39,7 +39,6 @@ from .lib import (
get_view_process_node,
get_viewer_config_from_string,
deprecated,
get_head_filename_without_hashes,
get_filenames_without_hash
)
from .pipeline import (
@ -816,19 +815,20 @@ class ExporterReviewMov(ExporterReview):
self.log.info("File info was set...")
self.file = self.fhead + self.name + ".{}".format(self.ext)
if ".{}".format(self.ext) not in VIDEO_EXTENSIONS:
# filename would be with frame hashes if
# the file extension is not in video format
filename = get_head_filename_without_hashes(
self.path_in, self.name)
self.file = filename
# make sure the filename are in
# correct image output format
if ".{}".format(self.ext) not in self.file:
filename_no_ext, _ = os.path.splitext(filename)
self.file = "{}.{}".format(filename_no_ext, self.ext)
if ".{}".format(self.ext) in VIDEO_EXTENSIONS:
self.file = "{}{}.{}".format(
self.fhead, self.name, self.ext)
else:
# Output is image (or image sequence)
# When the file is an image it's possible it
# has extra information after the `fhead` that
# we want to preserve, e.g. like frame numbers
# or frames hashes like `####`
filename_no_ext = os.path.splitext(
os.path.basename(self.path_in))[0]
after_head = filename_no_ext[len(self.fhead):]
self.file = "{}{}.{}.{}".format(
self.fhead, self.name, after_head, self.ext)
self.path = os.path.join(
self.staging_dir, self.file).replace("\\", "/")

View file

@ -2,7 +2,7 @@ import nuke
import pyblish.api
class CollectNukeInstanceData(pyblish.api.InstancePlugin):
class CollectInstanceData(pyblish.api.InstancePlugin):
"""Collect Nuke instance data
"""

View file

@ -33,11 +33,13 @@ class ExtractReviewIntermediates(publish.Extractor):
"""
nuke_publish = project_settings["nuke"]["publish"]
deprecated_setting = nuke_publish["ExtractReviewDataMov"]
current_setting = nuke_publish["ExtractReviewIntermediates"]
current_setting = nuke_publish.get("ExtractReviewIntermediates")
if deprecated_setting["enabled"]:
# Use deprecated settings if they are still enabled
cls.viewer_lut_raw = deprecated_setting["viewer_lut_raw"]
cls.outputs = deprecated_setting["outputs"]
elif current_setting is None:
pass
elif current_setting["enabled"]:
cls.viewer_lut_raw = current_setting["viewer_lut_raw"]
cls.outputs = current_setting["outputs"]

View file

@ -6,13 +6,10 @@ from .utils import (
)
from .pipeline import (
install,
uninstall,
ResolveHost,
ls,
containerise,
update_container,
publish,
launch_workfiles_app,
maintained_selection,
remove_instance,
list_instances
@ -76,14 +73,10 @@ __all__ = [
"bmdvf",
# pipeline
"install",
"uninstall",
"ResolveHost",
"ls",
"containerise",
"update_container",
"reload_pipeline",
"publish",
"launch_workfiles_app",
"maintained_selection",
"remove_instance",
"list_instances",

View file

@ -5,11 +5,6 @@ from qtpy import QtWidgets, QtCore
from openpype.tools.utils import host_tools
from .pipeline import (
publish,
launch_workfiles_app
)
def load_stylesheet():
path = os.path.join(os.path.dirname(__file__), "menu_style.qss")
@ -113,7 +108,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
def on_workfile_clicked(self):
print("Clicked Workfile")
launch_workfiles_app()
host_tools.show_workfiles()
def on_create_clicked(self):
print("Clicked Create")
@ -121,7 +116,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
def on_publish_clicked(self):
print("Clicked Publish")
publish(None)
host_tools.show_publish(parent=None)
def on_load_clicked(self):
print("Clicked Load")

View file

@ -12,14 +12,24 @@ from openpype.pipeline import (
schema,
register_loader_plugin_path,
register_creator_plugin_path,
deregister_loader_plugin_path,
deregister_creator_plugin_path,
AVALON_CONTAINER_ID,
)
from openpype.tools.utils import host_tools
from openpype.host import (
HostBase,
IWorkfileHost,
ILoadHost
)
from . import lib
from .utils import get_resolve_module
from .workio import (
open_file,
save_file,
file_extensions,
has_unsaved_changes,
work_root,
current_file
)
log = Logger.get_logger(__name__)
@ -32,53 +42,56 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
def install():
"""Install resolve-specific functionality of avalon-core.
class ResolveHost(HostBase, IWorkfileHost, ILoadHost):
name = "resolve"
This is where you install menus and register families, data
and loaders into resolve.
def install(self):
"""Install resolve-specific functionality of avalon-core.
It is called automatically when installing via `api.install(resolve)`.
This is where you install menus and register families, data
and loaders into resolve.
See the Maya equivalent for inspiration on how to implement this.
It is called automatically when installing via `api.install(resolve)`.
"""
See the Maya equivalent for inspiration on how to implement this.
log.info("openpype.hosts.resolve installed")
"""
pyblish.register_host("resolve")
pyblish.register_plugin_path(PUBLISH_PATH)
log.info("Registering DaVinci Resovle plug-ins..")
log.info("openpype.hosts.resolve installed")
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
pyblish.register_host(self.name)
pyblish.register_plugin_path(PUBLISH_PATH)
print("Registering DaVinci Resolve plug-ins..")
# register callback for switching publishable
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
get_resolve_module()
# register callback for switching publishable
pyblish.register_callback("instanceToggled",
on_pyblish_instance_toggled)
get_resolve_module()
def uninstall():
"""Uninstall all that was installed
def open_workfile(self, filepath):
return open_file(filepath)
This is where you undo everything that was done in `install()`.
That means, removing menus, deregistering families and data
and everything. It should be as though `install()` was never run,
because odds are calling this function means the user is interested
in re-installing shortly afterwards. If, for example, he has been
modifying the menu or registered families.
def save_workfile(self, filepath=None):
return save_file(filepath)
"""
pyblish.deregister_host("resolve")
pyblish.deregister_plugin_path(PUBLISH_PATH)
log.info("Deregistering DaVinci Resovle plug-ins..")
def work_root(self, session):
return work_root(session)
deregister_loader_plugin_path(LOAD_PATH)
deregister_creator_plugin_path(CREATE_PATH)
def get_current_workfile(self):
return current_file()
# register callback for switching publishable
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
def workfile_has_unsaved_changes(self):
return has_unsaved_changes()
def get_workfile_extensions(self):
return file_extensions()
def get_containers(self):
return ls()
def containerise(timeline_item,
@ -206,15 +219,6 @@ def update_container(timeline_item, data=None):
return bool(lib.set_timeline_item_pype_tag(timeline_item, container))
def launch_workfiles_app(*args):
host_tools.show_workfiles()
def publish(parent):
"""Shorthand to publish from within host"""
return host_tools.show_publish()
@contextlib.contextmanager
def maintained_selection():
"""Maintain selection during context

View file

@ -17,7 +17,7 @@ def get_resolve_module():
# dont run if already loaded
if api.bmdvr:
log.info(("resolve module is assigned to "
f"`pype.hosts.resolve.api.bmdvr`: {api.bmdvr}"))
f"`openpype.hosts.resolve.api.bmdvr`: {api.bmdvr}"))
return api.bmdvr
try:
"""
@ -41,6 +41,10 @@ def get_resolve_module():
)
elif sys.platform.startswith("linux"):
expected_path = "/opt/resolve/libs/Fusion/Modules"
else:
raise NotImplementedError(
"Unsupported platform: {}".format(sys.platform)
)
# check if the default path has it...
print(("Unable to find module DaVinciResolveScript from "
@ -74,6 +78,6 @@ def get_resolve_module():
api.bmdvr = bmdvr
api.bmdvf = bmdvf
log.info(("Assigning resolve module to "
f"`pype.hosts.resolve.api.bmdvr`: {api.bmdvr}"))
f"`openpype.hosts.resolve.api.bmdvr`: {api.bmdvr}"))
log.info(("Assigning resolve module to "
f"`pype.hosts.resolve.api.bmdvf`: {api.bmdvf}"))
f"`openpype.hosts.resolve.api.bmdvf`: {api.bmdvf}"))

View file

@ -27,7 +27,8 @@ def ensure_installed_host():
if host:
return host
install_host(openpype.hosts.resolve.api)
host = openpype.hosts.resolve.api.ResolveHost()
install_host(host)
return registered_host()
@ -37,10 +38,10 @@ def launch_menu():
openpype.hosts.resolve.api.launch_pype_menu()
def open_file(path):
def open_workfile(path):
# Avoid the need to "install" the host
host = ensure_installed_host()
host.open_file(path)
host.open_workfile(path)
def main():
@ -49,7 +50,7 @@ def main():
if workfile_path and os.path.exists(workfile_path):
log.info(f"Opening last workfile: {workfile_path}")
open_file(workfile_path)
open_workfile(workfile_path)
else:
log.info("No last workfile set to open. Skipping..")

View file

@ -8,12 +8,13 @@ log = Logger.get_logger(__name__)
def main(env):
import openpype.hosts.resolve.api as bmdvr
from openpype.hosts.resolve.api import ResolveHost, launch_pype_menu
# activate resolve from openpype
install_host(bmdvr)
host = ResolveHost()
install_host(host)
bmdvr.launch_pype_menu()
launch_pype_menu()
if __name__ == "__main__":

View file

@ -0,0 +1,181 @@
# -*- coding: utf-8 -*-
"""Submitting render job to Deadline."""
import os
import getpass
import attr
from datetime import datetime
import bpy
from openpype.lib import is_running_from_build
from openpype.pipeline import legacy_io
from openpype.pipeline.farm.tools import iter_expected_files
from openpype.tests.lib import is_in_tests
from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
@attr.s
class BlenderPluginInfo():
SceneFile = attr.ib(default=None) # Input
Version = attr.ib(default=None) # Mandatory for Deadline
SaveFile = attr.ib(default=True)
class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
label = "Submit Render to Deadline"
hosts = ["blender"]
families = ["render.farm"]
use_published = True
priority = 50
chunk_size = 1
jobInfo = {}
pluginInfo = {}
group = None
def get_job_info(self):
job_info = DeadlineJobInfo(Plugin="Blender")
job_info.update(self.jobInfo)
instance = self._instance
context = instance.context
# Always use the original work file name for the Job name even when
# rendering is done from the published Work File. The original work
# file name is clearer because it can also have subversion strings,
# etc. which are stripped for the published file.
src_filepath = context.data["currentFile"]
src_filename = os.path.basename(src_filepath)
if is_in_tests():
src_filename += datetime.now().strftime("%d%m%Y%H%M%S")
job_info.Name = f"{src_filename} - {instance.name}"
job_info.BatchName = src_filename
instance.data.get("blenderRenderPlugin", "Blender")
job_info.UserName = context.data.get("deadlineUser", getpass.getuser())
# Deadline requires integers in frame range
frames = "{start}-{end}x{step}".format(
start=int(instance.data["frameStartHandle"]),
end=int(instance.data["frameEndHandle"]),
step=int(instance.data["byFrameStep"]),
)
job_info.Frames = frames
job_info.Pool = instance.data.get("primaryPool")
job_info.SecondaryPool = instance.data.get("secondaryPool")
job_info.Comment = context.data.get("comment")
job_info.Priority = instance.data.get("priority", self.priority)
if self.group != "none" and self.group:
job_info.Group = self.group
attr_values = self.get_attr_values_from_data(instance.data)
render_globals = instance.data.setdefault("renderGlobals", {})
machine_list = attr_values.get("machineList", "")
if machine_list:
if attr_values.get("whitelist", True):
machine_list_key = "Whitelist"
else:
machine_list_key = "Blacklist"
render_globals[machine_list_key] = machine_list
job_info.Priority = attr_values.get("priority")
job_info.ChunkSize = attr_values.get("chunkSize")
# Add options from RenderGlobals
render_globals = instance.data.get("renderGlobals", {})
job_info.update(render_globals)
keys = [
"FTRACK_API_KEY",
"FTRACK_API_USER",
"FTRACK_SERVER",
"OPENPYPE_SG_USER",
"AVALON_PROJECT",
"AVALON_ASSET",
"AVALON_TASK",
"AVALON_APP_NAME",
"OPENPYPE_DEV"
"IS_TEST"
]
# Add OpenPype version if we are running from build.
if is_running_from_build():
keys.append("OPENPYPE_VERSION")
# Add mongo url if it's enabled
if self._instance.context.data.get("deadlinePassMongoUrl"):
keys.append("OPENPYPE_MONGO")
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **legacy_io.Session)
for key in keys:
value = environment.get(key)
if not value:
continue
job_info.EnvironmentKeyValue[key] = value
# to recognize job from PYPE for turning Event On/Off
job_info.add_render_job_env_var()
job_info.EnvironmentKeyValue["OPENPYPE_LOG_NO_COLORS"] = "1"
# Adding file dependencies.
if self.asset_dependencies:
dependencies = instance.context.data["fileDependencies"]
for dependency in dependencies:
job_info.AssetDependency += dependency
# Add list of expected files to job
# ---------------------------------
exp = instance.data.get("expectedFiles")
for filepath in iter_expected_files(exp):
job_info.OutputDirectory += os.path.dirname(filepath)
job_info.OutputFilename += os.path.basename(filepath)
return job_info
def get_plugin_info(self):
plugin_info = BlenderPluginInfo(
SceneFile=self.scene_path,
Version=bpy.app.version_string,
SaveFile=True,
)
plugin_payload = attr.asdict(plugin_info)
# Patching with pluginInfo from settings
for key, value in self.pluginInfo.items():
plugin_payload[key] = value
return plugin_payload
def process_submission(self):
instance = self._instance
expected_files = instance.data["expectedFiles"]
if not expected_files:
raise RuntimeError("No Render Elements found!")
first_file = next(iter_expected_files(expected_files))
output_dir = os.path.dirname(first_file)
instance.data["outputDir"] = output_dir
instance.data["toBeRenderedOn"] = "deadline"
payload = self.assemble_payload()
return self.submit(payload)
def from_published_scene(self):
"""
This is needed to set the correct path for the json metadata. Because
the rendering path is set in the blend file during the collection,
and the path is adjusted to use the published scene, this ensures that
the metadata and the rendered files are in the same location.
"""
return super().from_published_scene(False)

View file

@ -6,6 +6,7 @@ import requests
import pyblish.api
from openpype import AYON_SERVER_ENABLED
from openpype.pipeline import legacy_io
from openpype.pipeline.publish import (
OpenPypePyblishPluginMixin
@ -34,6 +35,8 @@ class FusionSubmitDeadline(
targets = ["local"]
# presets
plugin = None
priority = 50
chunk_size = 1
concurrent_tasks = 1
@ -173,7 +176,7 @@ class FusionSubmitDeadline(
"SecondaryPool": instance.data.get("secondaryPool"),
"Group": self.group,
"Plugin": "Fusion",
"Plugin": self.plugin,
"Frames": "{start}-{end}".format(
start=int(instance.data["frameStartHandle"]),
end=int(instance.data["frameEndHandle"])
@ -216,16 +219,29 @@ class FusionSubmitDeadline(
# Include critical variables with submission
keys = [
# TODO: This won't work if the slaves don't have access to
# these paths, such as if slaves are running Linux and the
# submitter is on Windows.
"PYTHONPATH",
"OFX_PLUGIN_PATH",
"FUSION9_MasterPrefs"
"FTRACK_API_KEY",
"FTRACK_API_USER",
"FTRACK_SERVER",
"AVALON_PROJECT",
"AVALON_ASSET",
"AVALON_TASK",
"AVALON_APP_NAME",
"OPENPYPE_DEV",
"OPENPYPE_LOG_NO_COLORS",
"IS_TEST"
]
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **legacy_io.Session)
# to recognize render jobs
if AYON_SERVER_ENABLED:
environment["AYON_BUNDLE_NAME"] = os.environ["AYON_BUNDLE_NAME"]
render_job_label = "AYON_RENDER_JOB"
else:
render_job_label = "OPENPYPE_RENDER_JOB"
environment[render_job_label] = "1"
payload["JobInfo"].update({
"EnvironmentKeyValue%d" % index: "{key}={value}".format(
key=key,

View file

@ -238,9 +238,10 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
plugin_data["redshift_SeparateAovFiles"] = instance.data.get(
"separateAovFiles")
if instance.data["cameras"]:
plugin_info["Camera0"] = None
plugin_info["Camera"] = instance.data["cameras"][0]
plugin_info["Camera1"] = instance.data["cameras"][0]
camera = instance.data["cameras"][0]
plugin_info["Camera0"] = camera
plugin_info["Camera"] = camera
plugin_info["Camera1"] = camera
self.log.debug("plugin data:{}".format(plugin_data))
plugin_info.update(plugin_data)

View file

@ -96,7 +96,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
targets = ["local"]
hosts = ["fusion", "max", "maya", "nuke", "houdini",
"celaction", "aftereffects", "harmony"]
"celaction", "aftereffects", "harmony", "blender"]
families = ["render.farm", "render.frames_farm",
"prerender.farm", "prerender.frames_farm",
@ -107,6 +107,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
"redshift_rop"]
aov_filter = {"maya": [r".*([Bb]eauty).*"],
"blender": [r".*([Bb]eauty).*"],
"aftereffects": [r".*"], # for everything from AE
"harmony": [r".*"], # for everything from AE
"celaction": [r".*"],

View file

@ -96,7 +96,7 @@ class AyonDeadlinePlugin(DeadlinePlugin):
for path in exe_list.split(";"):
if path.startswith("~"):
path = os.path.expanduser(path)
expanded_paths.append(path)
expanded_paths.append(path)
exe = FileUtils.SearchFileList(";".join(expanded_paths))
if exe == "":

View file

@ -25,6 +25,7 @@ def _get_template_id(renderer):
:rtype: int
"""
# TODO: Use settings from context?
templates = get_system_settings()["modules"]["muster"]["templates_mapping"]
if not templates:
raise RuntimeError(("Muster template mapping missing in "

View file

@ -25,7 +25,10 @@ from openpype.tests.lib import is_in_tests
from .publish.lib import filter_pyblish_plugins
from .anatomy import Anatomy
from .template_data import get_template_data_with_names
from .template_data import (
get_template_data_with_names,
get_template_data
)
from .workfile import (
get_workfile_template_key,
get_custom_workfile_template_by_string_context,
@ -658,3 +661,70 @@ def get_process_id():
if _process_id is None:
_process_id = str(uuid.uuid4())
return _process_id
def get_current_context_template_data():
"""Template data for template fill from current context
Returns:
Dict[str, Any] of the following tokens and their values
Supported Tokens:
- Regular Tokens
- app
- user
- asset
- parent
- hierarchy
- folder[name]
- root[work, ...]
- studio[code, name]
- project[code, name]
- task[type, name, short]
- Context Specific Tokens
- assetData[frameStart]
- assetData[frameEnd]
- assetData[handleStart]
- assetData[handleEnd]
- assetData[frameStartHandle]
- assetData[frameEndHandle]
- assetData[resolutionHeight]
- assetData[resolutionWidth]
"""
# pre-prepare get_template_data args
current_context = get_current_context()
project_name = current_context["project_name"]
asset_name = current_context["asset_name"]
anatomy = Anatomy(project_name)
# prepare get_template_data args
project_doc = get_project(project_name)
asset_doc = get_asset_by_name(project_name, asset_name)
task_name = current_context["task_name"]
host_name = get_current_host_name()
# get regular template data
template_data = get_template_data(
project_doc, asset_doc, task_name, host_name
)
template_data["root"] = anatomy.roots
# get context specific vars
asset_data = asset_doc["data"].copy()
# compute `frameStartHandle` and `frameEndHandle`
if "frameStart" in asset_data and "handleStart" in asset_data:
asset_data["frameStartHandle"] = \
asset_data["frameStart"] - asset_data["handleStart"]
if "frameEnd" in asset_data and "handleEnd" in asset_data:
asset_data["frameEndHandle"] = \
asset_data["frameEnd"] + asset_data["handleEnd"]
# add assetData
template_data["assetData"] = asset_data
return template_data

View file

@ -83,10 +83,6 @@ class OpenTaskPath(LauncherAction):
if os.path.exists(valid_workdir):
return valid_workdir
# If task was selected, try to find asset path only to asset
if not task_name:
raise AssertionError("Folder does not exist.")
data.pop("task", None)
workdir = anatomy.templates_obj["work"]["folder"].format(data)
valid_workdir = self._find_first_filled_path(workdir)
@ -95,7 +91,7 @@ class OpenTaskPath(LauncherAction):
valid_workdir = os.path.normpath(valid_workdir)
if os.path.exists(valid_workdir):
return valid_workdir
raise AssertionError("Folder does not exist.")
raise AssertionError("Folder does not exist yet.")
@staticmethod
def open_in_explorer(path):

View file

@ -213,7 +213,8 @@ class PypeCommands:
pass
def run_tests(self, folder, mark, pyargs,
test_data_folder, persist, app_variant, timeout, setup_only):
test_data_folder, persist, app_variant, timeout, setup_only,
mongo_url):
"""
Runs tests from 'folder'
@ -226,6 +227,10 @@ class PypeCommands:
end
app_variant (str): variant (eg 2020 for AE), empty if use
latest installed version
timeout (int): explicit timeout for single test
setup_only (bool): if only preparation steps should be
triggered, no tests (useful for debugging/development)
mongo_url (str): url to Openpype Mongo database
"""
print("run_tests")
if folder:
@ -264,6 +269,9 @@ class PypeCommands:
if setup_only:
args.extend(["--setup_only", setup_only])
if mongo_url:
args.extend(["--mongo_url", mongo_url])
print("run_tests args: {}".format(args))
import pytest
pytest.main(args)

View file

@ -748,15 +748,17 @@ def _convert_nuke_project_settings(ayon_settings, output):
)
new_review_data_outputs = {}
outputs_settings = None
outputs_settings = []
# Check deprecated ExtractReviewDataMov
# settings for backwards compatibility
deprecrated_review_settings = ayon_publish["ExtractReviewDataMov"]
current_review_settings = (
ayon_publish["ExtractReviewIntermediates"]
ayon_publish.get("ExtractReviewIntermediates")
)
if deprecrated_review_settings["enabled"]:
outputs_settings = deprecrated_review_settings["outputs"]
elif current_review_settings is None:
pass
elif current_review_settings["enabled"]:
outputs_settings = current_review_settings["outputs"]

View file

@ -17,6 +17,14 @@
"rules": {}
}
},
"RenderSettings": {
"default_render_image_folder": "renders/blender",
"aov_separator": "underscore",
"image_format": "exr",
"multilayer_exr": true,
"aov_list": [],
"custom_passes": []
},
"workfile_builder": {
"create_first_version": false,
"custom_templates": []
@ -27,6 +35,22 @@
"optional": true,
"active": true
},
"ValidateFileSaved": {
"enabled": true,
"optional": false,
"active": true,
"exclude_families": []
},
"ValidateRenderCameraIsSet": {
"enabled": true,
"optional": false,
"active": true
},
"ValidateDeadlinePublish": {
"enabled": true,
"optional": false,
"active": true
},
"ValidateMeshHasUvs": {
"enabled": true,
"optional": true,

View file

@ -52,7 +52,8 @@
"priority": 50,
"chunk_size": 10,
"concurrent_tasks": 1,
"group": ""
"group": "",
"plugin": "Fusion"
},
"NukeSubmitDeadline": {
"enabled": true,
@ -99,6 +100,15 @@
"deadline_chunk_size": 10,
"deadline_job_delay": "00:00:00:00"
},
"BlenderSubmitDeadline": {
"enabled": true,
"optional": false,
"active": true,
"use_published": true,
"priority": 50,
"chunk_size": 10,
"group": "none"
},
"ProcessSubmittedJobOnFarm": {
"enabled": true,
"deadline_department": "",
@ -112,6 +122,9 @@
"maya": [
".*([Bb]eauty).*"
],
"blender": [
".*([Bb]eauty).*"
],
"aftereffects": [
".*"
],

View file

@ -1,4 +1,16 @@
{
"general": {
"update_houdini_var_context": {
"enabled": true,
"houdini_vars":[
{
"var": "JOB",
"value": "{root[work]}/{project[name]}/{hierarchy}/{asset}/work/{task[name]}",
"is_directory": true
}
]
}
},
"imageio": {
"activate_host_color_management": true,
"ocio_config": {

View file

@ -707,6 +707,9 @@
"CollectMayaRender": {
"sync_workfile_version": false
},
"CollectFbxAnimation": {
"enabled": true
},
"CollectFbxCamera": {
"enabled": false
},
@ -1120,6 +1123,11 @@
"optional": true,
"active": true
},
"ValidateAnimatedReferenceRig": {
"enabled": true,
"optional": false,
"active": true
},
"ValidateAnimationContent": {
"enabled": true,
"optional": false,
@ -1140,6 +1148,16 @@
"optional": false,
"active": true
},
"ValidateSkeletonRigContents": {
"enabled": true,
"optional": true,
"active": true
},
"ValidateSkeletonRigControllers": {
"enabled": false,
"optional": true,
"active": true
},
"ValidateSkinclusterDeformerSet": {
"enabled": true,
"optional": false,
@ -1150,6 +1168,21 @@
"optional": false,
"allow_history_only": false
},
"ValidateSkeletonRigOutSetNodeIds": {
"enabled": false,
"optional": false,
"allow_history_only": false
},
"ValidateSkeletonRigOutputIds": {
"enabled": false,
"optional": true,
"active": true
},
"ValidateSkeletonTopGroupHierarchy": {
"enabled": true,
"optional": true,
"active": true
},
"ValidateCameraAttributes": {
"enabled": false,
"optional": true,

View file

@ -54,6 +54,110 @@
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "RenderSettings",
"label": "Render Settings",
"children": [
{
"type": "text",
"key": "default_render_image_folder",
"label": "Default render image folder"
},
{
"key": "aov_separator",
"label": "AOV Separator Character",
"type": "enum",
"multiselection": false,
"defaults": "underscore",
"enum_items": [
{"dash": "- (dash)"},
{"underscore": "_ (underscore)"},
{"dot": ". (dot)"}
]
},
{
"key": "image_format",
"label": "Output Image Format",
"type": "enum",
"multiselection": false,
"defaults": "exr",
"enum_items": [
{"exr": "OpenEXR"},
{"bmp": "BMP"},
{"rgb": "Iris"},
{"png": "PNG"},
{"jpg": "JPEG"},
{"jp2": "JPEG 2000"},
{"tga": "Targa"},
{"tif": "TIFF"}
]
},
{
"key": "multilayer_exr",
"type": "boolean",
"label": "Multilayer (EXR)"
},
{
"type": "label",
"label": "Note: Multilayer EXR is only used when output format type set to EXR."
},
{
"key": "aov_list",
"label": "AOVs to create",
"type": "enum",
"multiselection": true,
"defaults": "empty",
"enum_items": [
{"empty": "< empty >"},
{"combined": "Combined"},
{"z": "Z"},
{"mist": "Mist"},
{"normal": "Normal"},
{"diffuse_light": "Diffuse Light"},
{"diffuse_color": "Diffuse Color"},
{"specular_light": "Specular Light"},
{"specular_color": "Specular Color"},
{"volume_light": "Volume Light"},
{"emission": "Emission"},
{"environment": "Environment"},
{"shadow": "Shadow"},
{"ao": "Ambient Occlusion"},
{"denoising": "Denoising"},
{"volume_direct": "Direct Volumetric Scattering"},
{"volume_indirect": "Indirect Volumetric Scattering"}
]
},
{
"type": "label",
"label": "Add custom AOVs. They are added to the view layer and in the Compositing Nodetree,\nbut they need to be added manually to the Shader Nodetree."
},
{
"type": "dict-modifiable",
"store_as_list": true,
"key": "custom_passes",
"label": "Custom Passes",
"use_label_wrap": true,
"object_type": {
"type": "dict",
"children": [
{
"key": "type",
"label": "Type",
"type": "enum",
"multiselection": false,
"default": "COLOR",
"enum_items": [
{"COLOR": "Color"},
{"VALUE": "Value"}
]
}
]
}
}
]
},
{
"type": "schema_template",
"name": "template_workfile_options",

View file

@ -289,6 +289,15 @@
"type": "text",
"key": "group",
"label": "Group Name"
},
{
"type": "enum",
"key": "plugin",
"label": "Deadline Plugin",
"enum_items": [
{"Fusion": "Fusion"},
{"FusionCmd": "FusionCmd"}
]
}
]
},
@ -531,6 +540,50 @@
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "BlenderSubmitDeadline",
"label": "Blender Submit to Deadline",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "optional",
"label": "Optional"
},
{
"type": "boolean",
"key": "active",
"label": "Active"
},
{
"type": "boolean",
"key": "use_published",
"label": "Use Published scene"
},
{
"type": "number",
"key": "priority",
"label": "Priority"
},
{
"type": "number",
"key": "chunk_size",
"label": "Frame per Task"
},
{
"type": "text",
"key": "group",
"label": "Group Name"
}
]
},
{
"type": "dict",
"collapsible": true,

View file

@ -5,6 +5,10 @@
"label": "Houdini",
"is_file": true,
"children": [
{
"type": "schema",
"name": "schema_houdini_general"
},
{
"key": "imageio",
"type": "dict",

View file

@ -18,6 +18,39 @@
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "ValidateFileSaved",
"label": "Validate File Saved",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "optional",
"label": "Optional"
},
{
"type": "boolean",
"key": "active",
"label": "Active"
},
{
"type": "splitter"
},
{
"key": "exclude_families",
"label": "Exclude Families",
"type": "list",
"object_type": "text"
}
]
},
{
"type": "collapsible-wrap",
"label": "Model",
@ -46,6 +79,66 @@
}
]
},
{
"type": "collapsible-wrap",
"label": "Render",
"children": [
{
"type": "schema_template",
"name": "template_publish_plugin",
"template_data": [
{
"type": "dict",
"collapsible": true,
"key": "ValidateRenderCameraIsSet",
"label": "Validate Render Camera Is Set",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "optional",
"label": "Optional"
},
{
"type": "boolean",
"key": "active",
"label": "Active"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "ValidateDeadlinePublish",
"label": "Validate Render Output for Deadline",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "optional",
"label": "Optional"
},
{
"type": "boolean",
"key": "active",
"label": "Active"
}
]
}
]
}
]
},
{
"type": "splitter"
},

View file

@ -0,0 +1,53 @@
{
"type": "dict",
"key": "general",
"label": "General",
"collapsible": true,
"is_group": true,
"children": [
{
"type": "dict",
"collapsible": true,
"checkbox_key": "enabled",
"key": "update_houdini_var_context",
"label": "Update Houdini Vars on context change",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "label",
"label": "Sync vars with context changes.<br>If a value is treated as a directory on update it will be ensured the folder exists"
},
{
"type": "list",
"key": "houdini_vars",
"label": "Houdini Vars",
"collapsible": false,
"object_type": {
"type": "dict",
"children": [
{
"type": "text",
"key": "var",
"label": "Var"
},
{
"type": "text",
"key": "value",
"label": "Value"
},
{
"type": "boolean",
"key": "is_directory",
"label": "Treat as directory"
}
]
}
}
]
}
]
}

View file

@ -21,6 +21,20 @@
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "CollectFbxAnimation",
"label": "Collect Fbx Animation",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
}
]
},
{
"type": "dict",
"collapsible": true,
@ -793,6 +807,10 @@
"key": "ValidateRigControllers",
"label": "Validate Rig Controllers"
},
{
"key": "ValidateAnimatedReferenceRig",
"label": "Validate Animated Reference Rig"
},
{
"key": "ValidateAnimationContent",
"label": "Validate Animation Content"
@ -809,9 +827,51 @@
"key": "ValidateSkeletalMeshHierarchy",
"label": "Validate Skeletal Mesh Top Node"
},
{
{
"key": "ValidateSkeletonRigContents",
"label": "Validate Skeleton Rig Contents"
},
{
"key": "ValidateSkeletonRigControllers",
"label": "Validate Skeleton Rig Controllers"
},
{
"key": "ValidateSkinclusterDeformerSet",
"label": "Validate Skincluster Deformer Relationships"
},
{
"key": "ValidateSkeletonRigOutputIds",
"label": "Validate Skeleton Rig Output Ids"
},
{
"key": "ValidateSkeletonTopGroupHierarchy",
"label": "Validate Skeleton Top Group Hierarchy"
}
]
},
{
"type": "dict",
"collapsible": true,
"checkbox_key": "enabled",
"key": "ValidateRigOutSetNodeIds",
"label": "Validate Rig Out Set Node Ids",
"is_group": true,
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "optional",
"label": "Optional"
},
{
"type": "boolean",
"key": "allow_history_only",
"label": "Allow history only"
}
]
},
@ -819,8 +879,8 @@
"type": "dict",
"collapsible": true,
"checkbox_key": "enabled",
"key": "ValidateRigOutSetNodeIds",
"label": "Validate Rig Out Set Node Ids",
"key": "ValidateSkeletonRigOutSetNodeIds",
"label": "Validate Skeleton Rig Out Set Node Ids",
"is_group": true,
"children": [
{

View file

@ -272,7 +272,7 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
@abstractmethod
def set_application_force_not_open_workfile(
self, project_name, folder_id, task_id, action_id, enabled
self, project_name, folder_id, task_id, action_ids, enabled
):
"""This is application action related to force not open last workfile.
@ -280,7 +280,7 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
project_name (Union[str, None]): Project name.
folder_id (Union[str, None]): Folder id.
task_id (Union[str, None]): Task id.
action_id (str): Action identifier.
action_id (Iterable[str]): Action identifiers.
enabled (bool): New value of force not open workfile.
"""
@ -295,3 +295,13 @@ class AbstractLauncherFrontEnd(AbstractLauncherCommon):
"""
pass
@abstractmethod
def refresh_actions(self):
"""Refresh actions and all related data.
Triggers 'controller.refresh.actions.started' event at the beginning
and 'controller.refresh.actions.finished' at the end.
"""
pass

View file

@ -121,10 +121,10 @@ class BaseLauncherController(
project_name, folder_id, task_id)
def set_application_force_not_open_workfile(
self, project_name, folder_id, task_id, action_id, enabled
self, project_name, folder_id, task_id, action_ids, enabled
):
self._actions_model.set_application_force_not_open_workfile(
project_name, folder_id, task_id, action_id, enabled
project_name, folder_id, task_id, action_ids, enabled
)
def trigger_action(self, project_name, folder_id, task_id, identifier):
@ -145,5 +145,17 @@ class BaseLauncherController(
self._emit_event("controller.refresh.finished")
def refresh_actions(self):
self._emit_event("controller.refresh.actions.started")
# Refresh project settings (used for actions discovery)
self._project_settings = {}
# Refresh projects - they define applications
self._projects_model.reset()
# Refresh actions
self._actions_model.refresh()
self._emit_event("controller.refresh.actions.finished")
def _emit_event(self, topic, data=None):
self.emit_event(topic, data, "controller")

View file

@ -326,13 +326,14 @@ class ActionsModel:
return output
def set_application_force_not_open_workfile(
self, project_name, folder_id, task_id, action_id, enabled
self, project_name, folder_id, task_id, action_ids, enabled
):
no_workfile_reg_data = self._get_no_last_workfile_reg_data()
project_data = no_workfile_reg_data.setdefault(project_name, {})
folder_data = project_data.setdefault(folder_id, {})
task_data = folder_data.setdefault(task_id, {})
task_data[action_id] = enabled
for action_id in action_ids:
task_data[action_id] = enabled
self._launcher_tool_reg.set_item(
self._not_open_workfile_reg_key, no_workfile_reg_data
)
@ -359,7 +360,10 @@ class ActionsModel:
project_name, folder_id, task_id
)
force_not_open_workfile = per_action.get(identifier, False)
action.data["start_last_workfile"] = force_not_open_workfile
if force_not_open_workfile:
action.data["start_last_workfile"] = False
else:
action.data.pop("start_last_workfile", None)
action.process(session)
except Exception as exc:
self.log.warning("Action trigger failed.", exc_info=True)

View file

@ -19,6 +19,21 @@ ANIMATION_STATE_ROLE = QtCore.Qt.UserRole + 6
FORCE_NOT_OPEN_WORKFILE_ROLE = QtCore.Qt.UserRole + 7
def _variant_label_sort_getter(action_item):
"""Get variant label value for sorting.
Make sure the output value is a string.
Args:
action_item (ActionItem): Action item.
Returns:
str: Variant label or empty string.
"""
return action_item.variant_label or ""
class ActionsQtModel(QtGui.QStandardItemModel):
"""Qt model for actions.
@ -31,10 +46,6 @@ class ActionsQtModel(QtGui.QStandardItemModel):
def __init__(self, controller):
super(ActionsQtModel, self).__init__()
controller.register_event_callback(
"controller.refresh.finished",
self._on_controller_refresh_finished,
)
controller.register_event_callback(
"selection.project.changed",
self._on_selection_project_changed,
@ -51,6 +62,7 @@ class ActionsQtModel(QtGui.QStandardItemModel):
self._controller = controller
self._items_by_id = {}
self._action_items_by_id = {}
self._groups_by_id = {}
self._selected_project_name = None
@ -72,8 +84,12 @@ class ActionsQtModel(QtGui.QStandardItemModel):
def get_item_by_id(self, action_id):
return self._items_by_id.get(action_id)
def get_action_item_by_id(self, action_id):
return self._action_items_by_id.get(action_id)
def _clear_items(self):
self._items_by_id = {}
self._action_items_by_id = {}
self._groups_by_id = {}
root = self.invisibleRootItem()
root.removeRows(0, root.rowCount())
@ -101,12 +117,14 @@ class ActionsQtModel(QtGui.QStandardItemModel):
groups_by_id = {}
for action_items in items_by_label.values():
action_items.sort(key=_variant_label_sort_getter, reverse=True)
first_item = next(iter(action_items))
all_action_items_info.append((first_item, len(action_items) > 1))
groups_by_id[first_item.identifier] = action_items
new_items = []
items_by_id = {}
action_items_by_id = {}
for action_item_info in all_action_items_info:
action_item, is_group = action_item_info
icon = get_qt_icon(action_item.icon)
@ -132,6 +150,7 @@ class ActionsQtModel(QtGui.QStandardItemModel):
action_item.force_not_open_workfile,
FORCE_NOT_OPEN_WORKFILE_ROLE)
items_by_id[action_item.identifier] = item
action_items_by_id[action_item.identifier] = action_item
if new_items:
root_item.appendRows(new_items)
@ -139,19 +158,14 @@ class ActionsQtModel(QtGui.QStandardItemModel):
to_remove = set(self._items_by_id.keys()) - set(items_by_id.keys())
for identifier in to_remove:
item = self._items_by_id.pop(identifier)
self._action_items_by_id.pop(identifier)
root_item.removeRow(item.row())
self._groups_by_id = groups_by_id
self._items_by_id = items_by_id
self._action_items_by_id = action_items_by_id
self.refreshed.emit()
def _on_controller_refresh_finished(self):
context = self._controller.get_selected_context()
self._selected_project_name = context["project_name"]
self._selected_folder_id = context["folder_id"]
self._selected_task_id = context["task_id"]
self.refresh()
def _on_selection_project_changed(self, event):
self._selected_project_name = event["project_name"]
self._selected_folder_id = None
@ -336,6 +350,9 @@ class ActionsWidget(QtWidgets.QWidget):
self._set_row_height(1)
def refresh(self):
self._model.refresh()
def _set_row_height(self, rows):
self.setMinimumHeight(rows * 75)
@ -387,9 +404,15 @@ class ActionsWidget(QtWidgets.QWidget):
checkbox.setChecked(True)
action_id = index.data(ACTION_ID_ROLE)
is_group = index.data(ACTION_IS_GROUP_ROLE)
if is_group:
action_items = self._model.get_group_items(action_id)
else:
action_items = [self._model.get_action_item_by_id(action_id)]
action_ids = {action_item.identifier for action_item in action_items}
checkbox.stateChanged.connect(
lambda: self._on_checkbox_changed(
action_id, checkbox.isChecked()
action_ids, checkbox.isChecked()
)
)
action = QtWidgets.QWidgetAction(menu)
@ -402,7 +425,7 @@ class ActionsWidget(QtWidgets.QWidget):
menu.exec_(global_point)
self._context_menu = None
def _on_checkbox_changed(self, action_id, is_checked):
def _on_checkbox_changed(self, action_ids, is_checked):
if self._context_menu is not None:
self._context_menu.close()
@ -410,7 +433,7 @@ class ActionsWidget(QtWidgets.QWidget):
folder_id = self._model.get_selected_folder_id()
task_id = self._model.get_selected_task_id()
self._controller.set_application_force_not_open_workfile(
project_name, folder_id, task_id, action_id, is_checked)
project_name, folder_id, task_id, action_ids, is_checked)
self._model.refresh()
def _on_clicked(self, index):

View file

@ -92,6 +92,10 @@ class HierarchyPage(QtWidgets.QWidget):
if visible and project_name:
self._projects_combobox.set_selection(project_name)
def refresh(self):
self._folders_widget.refresh()
self._tasks_widget.refresh()
def _on_back_clicked(self):
self._controller.set_selected_project(None)

View file

@ -73,6 +73,9 @@ class ProjectIconView(QtWidgets.QListView):
class ProjectsWidget(QtWidgets.QWidget):
"""Projects Page"""
refreshed = QtCore.Signal()
def __init__(self, controller, parent=None):
super(ProjectsWidget, self).__init__(parent=parent)
@ -104,6 +107,7 @@ class ProjectsWidget(QtWidgets.QWidget):
main_layout.addWidget(projects_view, 1)
projects_view.clicked.connect(self._on_view_clicked)
projects_model.refreshed.connect(self.refreshed)
projects_filter_text.textChanged.connect(
self._on_project_filter_change)
refresh_btn.clicked.connect(self._on_refresh_clicked)
@ -119,6 +123,15 @@ class ProjectsWidget(QtWidgets.QWidget):
self._projects_model = projects_model
self._projects_proxy_model = projects_proxy_model
def has_content(self):
"""Model has at least one project.
Returns:
bool: True if there is any content in the model.
"""
return self._projects_model.has_content()
def _on_view_clicked(self, index):
if index.isValid():
project_name = index.data(QtCore.Qt.DisplayRole)

View file

@ -99,8 +99,8 @@ class LauncherWindow(QtWidgets.QWidget):
message_timer.setInterval(self.message_interval)
message_timer.setSingleShot(True)
refresh_timer = QtCore.QTimer()
refresh_timer.setInterval(self.refresh_interval)
actions_refresh_timer = QtCore.QTimer()
actions_refresh_timer.setInterval(self.refresh_interval)
page_slide_anim = QtCore.QVariantAnimation(self)
page_slide_anim.setDuration(self.page_side_anim_interval)
@ -108,8 +108,10 @@ class LauncherWindow(QtWidgets.QWidget):
page_slide_anim.setEndValue(1.0)
page_slide_anim.setEasingCurve(QtCore.QEasingCurve.OutQuad)
projects_page.refreshed.connect(self._on_projects_refresh)
message_timer.timeout.connect(self._on_message_timeout)
refresh_timer.timeout.connect(self._on_refresh_timeout)
actions_refresh_timer.timeout.connect(
self._on_actions_refresh_timeout)
page_slide_anim.valueChanged.connect(
self._on_page_slide_value_changed)
page_slide_anim.finished.connect(self._on_page_slide_finished)
@ -132,6 +134,7 @@ class LauncherWindow(QtWidgets.QWidget):
self._is_on_projects_page = True
self._window_is_active = False
self._refresh_on_activate = False
self._selected_project_name = None
self._pages_widget = pages_widget
self._pages_layout = pages_layout
@ -143,7 +146,7 @@ class LauncherWindow(QtWidgets.QWidget):
# self._action_history = action_history
self._message_timer = message_timer
self._refresh_timer = refresh_timer
self._actions_refresh_timer = actions_refresh_timer
self._page_slide_anim = page_slide_anim
hierarchy_page.setVisible(not self._is_on_projects_page)
@ -152,14 +155,14 @@ class LauncherWindow(QtWidgets.QWidget):
def showEvent(self, event):
super(LauncherWindow, self).showEvent(event)
self._window_is_active = True
if not self._refresh_timer.isActive():
self._refresh_timer.start()
if not self._actions_refresh_timer.isActive():
self._actions_refresh_timer.start()
self._controller.refresh()
def closeEvent(self, event):
super(LauncherWindow, self).closeEvent(event)
self._window_is_active = False
self._refresh_timer.stop()
self._actions_refresh_timer.stop()
def changeEvent(self, event):
if event.type() in (
@ -170,15 +173,15 @@ class LauncherWindow(QtWidgets.QWidget):
self._window_is_active = is_active
if is_active and self._refresh_on_activate:
self._refresh_on_activate = False
self._on_refresh_timeout()
self._refresh_timer.start()
self._on_actions_refresh_timeout()
self._actions_refresh_timer.start()
super(LauncherWindow, self).changeEvent(event)
def _on_refresh_timeout(self):
def _on_actions_refresh_timeout(self):
# Stop timer if widget is not visible
if self._window_is_active:
self._controller.refresh()
self._controller.refresh_actions()
else:
self._refresh_on_activate = True
@ -191,12 +194,26 @@ class LauncherWindow(QtWidgets.QWidget):
def _on_project_selection_change(self, event):
project_name = event["project_name"]
self._selected_project_name = project_name
if not project_name:
self._go_to_projects_page()
elif self._is_on_projects_page:
self._go_to_hierarchy_page(project_name)
def _on_projects_refresh(self):
# There is nothing to do, we're on projects page
if self._is_on_projects_page:
return
# No projects were found -> go back to projects page
if not self._projects_page.has_content():
self._go_to_projects_page()
return
self._hierarchy_page.refresh()
self._actions_widget.refresh()
def _on_action_trigger_started(self, event):
self._echo("Running action: {}".format(event["full_label"]))

View file

@ -199,13 +199,18 @@ class HierarchyModel(object):
Hierarchy items are folders and tasks. Folders can have as parent another
folder or project. Tasks can have as parent only folder.
"""
lifetime = 60 # A minute
def __init__(self, controller):
self._folders_items = NestedCacheItem(levels=1, default_factory=dict)
self._folders_by_id = NestedCacheItem(levels=2, default_factory=dict)
self._folders_items = NestedCacheItem(
levels=1, default_factory=dict, lifetime=self.lifetime)
self._folders_by_id = NestedCacheItem(
levels=2, default_factory=dict, lifetime=self.lifetime)
self._task_items = NestedCacheItem(levels=2, default_factory=dict)
self._tasks_by_id = NestedCacheItem(levels=2, default_factory=dict)
self._task_items = NestedCacheItem(
levels=2, default_factory=dict, lifetime=self.lifetime)
self._tasks_by_id = NestedCacheItem(
levels=2, default_factory=dict, lifetime=self.lifetime)
self._folders_refreshing = set()
self._tasks_refreshing = set()

View file

@ -56,11 +56,21 @@ class FoldersModel(QtGui.QStandardItemModel):
return self._has_content
def clear(self):
def refresh(self):
"""Refresh folders for last selected project.
Force to update folders model from controller. This may or may not
trigger query from server, that's based on controller's cache.
"""
self.set_project_name(self._last_project_name)
def _clear_items(self):
self._items_by_id = {}
self._parent_id_by_id = {}
self._has_content = False
super(FoldersModel, self).clear()
root_item = self.invisibleRootItem()
root_item.removeRows(0, root_item.rowCount())
def get_index_by_id(self, item_id):
"""Get index by folder id.
@ -90,7 +100,7 @@ class FoldersModel(QtGui.QStandardItemModel):
self._is_refreshing = True
if self._last_project_name != project_name:
self.clear()
self._clear_items()
self._last_project_name = project_name
thread = self._refresh_threads.get(project_name)
@ -135,7 +145,7 @@ class FoldersModel(QtGui.QStandardItemModel):
def _fill_items(self, folder_items_by_id):
if not folder_items_by_id:
if folder_items_by_id is not None:
self.clear()
self._clear_items()
self._is_refreshing = False
self.refreshed.emit()
return
@ -247,6 +257,7 @@ class FoldersWidget(QtWidgets.QWidget):
folders_model = FoldersModel(controller)
folders_proxy_model = RecursiveSortFilterProxyModel()
folders_proxy_model.setSourceModel(folders_model)
folders_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)
folders_view.setModel(folders_proxy_model)
@ -293,6 +304,14 @@ class FoldersWidget(QtWidgets.QWidget):
self._folders_proxy_model.setFilterFixedString(name)
def refresh(self):
"""Refresh folders model.
Force to update folders model from controller.
"""
self._folders_model.refresh()
def _on_project_selection_change(self, event):
project_name = event["project_name"]
self._set_project_name(project_name)
@ -300,9 +319,6 @@ class FoldersWidget(QtWidgets.QWidget):
def _set_project_name(self, project_name):
self._folders_model.set_project_name(project_name)
def _clear(self):
self._folders_model.clear()
def _on_folders_refresh_finished(self, event):
if event["sender"] != SENDER_NAME:
self._set_project_name(event["project_name"])

View file

@ -44,14 +44,20 @@ class TasksModel(QtGui.QStandardItemModel):
# Initial state
self._add_invalid_selection_item()
def clear(self):
def _clear_items(self):
self._items_by_name = {}
self._has_content = False
self._remove_invalid_items()
super(TasksModel, self).clear()
root_item = self.invisibleRootItem()
root_item.removeRows(0, root_item.rowCount())
def refresh(self, project_name, folder_id):
"""Refresh tasks for folder.
def refresh(self):
"""Refresh tasks for last project and folder."""
self._refresh(self._last_project_name, self._last_folder_id)
def set_context(self, project_name, folder_id):
"""Set context for which should be tasks showed.
Args:
project_name (Union[str]): Name of project.
@ -121,7 +127,7 @@ class TasksModel(QtGui.QStandardItemModel):
return self._empty_tasks_item
def _add_invalid_item(self, item):
self.clear()
self._clear_items()
root_item = self.invisibleRootItem()
root_item.appendRow(item)
@ -299,6 +305,7 @@ class TasksWidget(QtWidgets.QWidget):
tasks_model = TasksModel(controller)
tasks_proxy_model = QtCore.QSortFilterProxyModel()
tasks_proxy_model.setSourceModel(tasks_model)
tasks_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)
tasks_view.setModel(tasks_proxy_model)
@ -334,8 +341,14 @@ class TasksWidget(QtWidgets.QWidget):
self._handle_expected_selection = handle_expected_selection
self._expected_selection_data = None
def _clear(self):
self._tasks_model.clear()
def refresh(self):
"""Refresh folders for last selected project.
Force to update folders model from controller. This may or may not
trigger query from server, that's based on controller's cache.
"""
self._tasks_model.refresh()
def _on_tasks_refresh_finished(self, event):
"""Tasks were refreshed in controller.
@ -353,13 +366,13 @@ class TasksWidget(QtWidgets.QWidget):
or event["folder_id"] != self._selected_folder_id
):
return
self._tasks_model.refresh(
self._tasks_model.set_context(
event["project_name"], self._selected_folder_id
)
def _folder_selection_changed(self, event):
self._selected_folder_id = event["folder_id"]
self._tasks_model.refresh(
self._tasks_model.set_context(
event["project_name"], self._selected_folder_id
)

View file

@ -914,10 +914,12 @@ class AbstractWorkfilesFrontend(AbstractWorkfilesCommon):
# Controller actions
@abstractmethod
def open_workfile(self, filepath):
"""Open a workfile.
def open_workfile(self, folder_id, task_id, filepath):
"""Open a workfile for context.
Args:
folder_id (str): Folder id.
task_id (str): Task id.
filepath (str): Workfile path.
"""

View file

@ -452,12 +452,12 @@ class BaseWorkfileController(
self._emit_event("controller.refresh.finished")
# Controller actions
def open_workfile(self, filepath):
def open_workfile(self, folder_id, task_id, filepath):
self._emit_event("open_workfile.started")
failed = False
try:
self._host_open_workfile(filepath)
self._open_workfile(folder_id, task_id, filepath)
except Exception:
failed = True
@ -575,6 +575,53 @@ class BaseWorkfileController(
self._expected_selection.get_expected_selection_data(),
)
def _get_event_context_data(
self, project_name, folder_id, task_id, folder=None, task=None
):
if folder is None:
folder = self.get_folder_entity(folder_id)
if task is None:
task = self.get_task_entity(task_id)
# NOTE keys should be OpenPype compatible
return {
"project_name": project_name,
"folder_id": folder_id,
"asset_id": folder_id,
"asset_name": folder["name"],
"task_id": task_id,
"task_name": task["name"],
"host_name": self.get_host_name(),
}
def _open_workfile(self, folder_id, task_id, filepath):
project_name = self.get_current_project_name()
event_data = self._get_event_context_data(
project_name, folder_id, task_id
)
event_data["filepath"] = filepath
emit_event("workfile.open.before", event_data, source="workfiles.tool")
# Change context
task_name = event_data["task_name"]
if (
folder_id != self.get_current_folder_id()
or task_name != self.get_current_task_name()
):
# Use OpenPype asset-like object
asset_doc = get_asset_by_id(
event_data["project_name"],
event_data["folder_id"],
)
change_current_context(
asset_doc,
event_data["task_name"]
)
self._host_open_workfile(filepath)
emit_event("workfile.open.after", event_data, source="workfiles.tool")
def _save_as_workfile(
self,
folder_id,
@ -591,18 +638,14 @@ class BaseWorkfileController(
task_name = task["name"]
# QUESTION should the data be different for 'before' and 'after'?
# NOTE keys should be OpenPype compatible
event_data = {
"project_name": project_name,
"folder_id": folder_id,
"asset_id": folder_id,
"asset_name": folder["name"],
"task_id": task_id,
"task_name": task_name,
"host_name": self.get_host_name(),
event_data = self._get_event_context_data(
project_name, folder_id, task_id, folder, task
)
event_data.update({
"filename": filename,
"workdir_path": workdir,
}
})
emit_event("workfile.save.before", event_data, source="workfiles.tool")
# Create workfiles root folder

View file

@ -106,7 +106,8 @@ class FilesWidget(QtWidgets.QWidget):
self._on_published_cancel_clicked)
self._selected_folder_id = None
self._selected_tak_name = None
self._selected_task_id = None
self._selected_task_name = None
self._pre_select_folder_id = None
self._pre_select_task_name = None
@ -178,7 +179,7 @@ class FilesWidget(QtWidgets.QWidget):
# -------------------------------------------------------------
# Workarea workfiles
# -------------------------------------------------------------
def _open_workfile(self, filepath):
def _open_workfile(self, folder_id, task_name, filepath):
if self._controller.has_unsaved_changes():
result = self._save_changes_prompt()
if result is None:
@ -186,12 +187,15 @@ class FilesWidget(QtWidgets.QWidget):
if result:
self._controller.save_current_workfile()
self._controller.open_workfile(filepath)
self._controller.open_workfile(folder_id, task_name, filepath)
def _on_workarea_open_clicked(self):
path = self._workarea_widget.get_selected_path()
if path:
self._open_workfile(path)
if not path:
return
folder_id = self._selected_folder_id
task_id = self._selected_task_id
self._open_workfile(folder_id, task_id, path)
def _on_current_open_requests(self):
self._on_workarea_open_clicked()
@ -238,8 +242,12 @@ class FilesWidget(QtWidgets.QWidget):
}
filepath = QtWidgets.QFileDialog.getOpenFileName(**kwargs)[0]
if filepath:
self._open_workfile(filepath)
if not filepath:
return
folder_id = self._selected_folder_id
task_id = self._selected_task_id
self._open_workfile(folder_id, task_id, filepath)
def _on_workarea_save_clicked(self):
result = self._exec_save_as_dialog()
@ -279,10 +287,11 @@ class FilesWidget(QtWidgets.QWidget):
def _on_task_changed(self, event):
self._selected_folder_id = event["folder_id"]
self._selected_tak_name = event["task_name"]
self._selected_task_id = event["task_id"]
self._selected_task_name = event["task_name"]
self._valid_selected_context = (
self._selected_folder_id is not None
and self._selected_tak_name is not None
and self._selected_task_id is not None
)
self._update_published_btns_state()
@ -311,7 +320,7 @@ class FilesWidget(QtWidgets.QWidget):
if enabled:
self._pre_select_folder_id = self._selected_folder_id
self._pre_select_task_name = self._selected_tak_name
self._pre_select_task_name = self._selected_task_name
else:
self._pre_select_folder_id = None
self._pre_select_task_name = None
@ -334,7 +343,7 @@ class FilesWidget(QtWidgets.QWidget):
return True
if self._pre_select_task_name is None:
return False
return self._pre_select_task_name != self._selected_tak_name
return self._pre_select_task_name != self._selected_task_name
def _on_published_cancel_clicked(self):
folder_id = self._pre_select_folder_id

View file

@ -176,11 +176,10 @@ class PublishReportMaker:
self._create_discover_result = None
self._convert_discover_result = None
self._publish_discover_result = None
self._plugin_data = []
self._plugin_data_with_plugin = []
self._stored_plugins = set()
self._current_plugin_data = []
self._plugin_data_by_id = {}
self._current_plugin = None
self._current_plugin_data = {}
self._all_instances_by_id = {}
self._current_context = None
@ -192,9 +191,9 @@ class PublishReportMaker:
create_context.convertor_discover_result
)
self._publish_discover_result = create_context.publish_discover_result
self._plugin_data = []
self._plugin_data_with_plugin = []
self._stored_plugins = set()
self._plugin_data_by_id = {}
self._current_plugin = None
self._current_plugin_data = {}
self._all_instances_by_id = {}
self._current_context = context
@ -211,18 +210,11 @@ class PublishReportMaker:
if self._current_plugin_data:
self._current_plugin_data["passed"] = True
self._current_plugin = plugin
self._current_plugin_data = self._add_plugin_data_item(plugin)
def _get_plugin_data_item(self, plugin):
store_item = None
for item in self._plugin_data_with_plugin:
if item["plugin"] is plugin:
store_item = item["data"]
break
return store_item
def _add_plugin_data_item(self, plugin):
if plugin in self._stored_plugins:
if plugin.id in self._plugin_data_by_id:
# A plugin would be processed more than once. What can cause it:
# - there is a bug in controller
# - plugin class is imported into multiple files
@ -230,15 +222,9 @@ class PublishReportMaker:
raise ValueError(
"Plugin '{}' is already stored".format(str(plugin)))
self._stored_plugins.add(plugin)
plugin_data_item = self._create_plugin_data_item(plugin)
self._plugin_data_by_id[plugin.id] = plugin_data_item
self._plugin_data_with_plugin.append({
"plugin": plugin,
"data": plugin_data_item
})
self._plugin_data.append(plugin_data_item)
return plugin_data_item
def _create_plugin_data_item(self, plugin):
@ -279,7 +265,7 @@ class PublishReportMaker:
"""Add result of single action."""
plugin = result["plugin"]
store_item = self._get_plugin_data_item(plugin)
store_item = self._plugin_data_by_id.get(plugin.id)
if store_item is None:
store_item = self._add_plugin_data_item(plugin)
@ -301,14 +287,24 @@ class PublishReportMaker:
instance, instance in self._current_context
)
plugins_data = copy.deepcopy(self._plugin_data)
if plugins_data and not plugins_data[-1]["passed"]:
plugins_data[-1]["passed"] = True
plugins_data_by_id = copy.deepcopy(
self._plugin_data_by_id
)
# Ensure the current plug-in is marked as `passed` in the result
# so that it shows on reports for paused publishes
if self._current_plugin is not None:
current_plugin_data = plugins_data_by_id.get(
self._current_plugin.id
)
if current_plugin_data and not current_plugin_data["passed"]:
current_plugin_data["passed"] = True
if publish_plugins:
for plugin in publish_plugins:
if plugin not in self._stored_plugins:
plugins_data.append(self._create_plugin_data_item(plugin))
if plugin.id not in plugins_data_by_id:
plugins_data_by_id[plugin.id] = \
self._create_plugin_data_item(plugin)
reports = []
if self._create_discover_result is not None:
@ -329,7 +325,7 @@ class PublishReportMaker:
)
return {
"plugins_data": plugins_data,
"plugins_data": list(plugins_data_by_id.values()),
"instances": instances_details,
"context": self._extract_context_data(self._current_context),
"crashed_file_paths": crashed_file_paths,

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
__version__ = "3.17.2-nightly.1"
__version__ = "3.17.2-nightly.3"

View file

@ -237,6 +237,7 @@
},
{
"name": "13-0",
"label": "13.0",
"use_python_2": false,
"executables": {
"windows": [
@ -319,6 +320,7 @@
},
{
"name": "13-0",
"label": "13.0",
"use_python_2": false,
"executables": {
"windows": [
@ -405,6 +407,7 @@
},
{
"name": "13-0",
"label": "13.0",
"use_python_2": false,
"executables": {
"windows": [
@ -491,6 +494,7 @@
},
{
"name": "13-0",
"label": "13.0",
"use_python_2": false,
"executables": {
"windows": [
@ -577,6 +581,7 @@
},
{
"name": "13-0",
"label": "13.0",
"use_python_2": false,
"executables": {
"windows": [

View file

@ -9,6 +9,10 @@ from .publish_plugins import (
PublishPuginsModel,
DEFAULT_BLENDER_PUBLISH_SETTINGS
)
from .render_settings import (
RenderSettingsModel,
DEFAULT_RENDER_SETTINGS
)
class UnitScaleSettingsModel(BaseSettingsModel):
@ -37,6 +41,8 @@ class BlenderSettings(BaseSettingsModel):
default_factory=BlenderImageIOModel,
title="Color Management (ImageIO)"
)
render_settings: RenderSettingsModel = Field(
default_factory=RenderSettingsModel, title="Render Settings")
workfile_builder: TemplateWorkfileBaseOptions = Field(
default_factory=TemplateWorkfileBaseOptions,
title="Workfile Builder"
@ -55,6 +61,7 @@ DEFAULT_VALUES = {
},
"set_frames_startup": True,
"set_resolution_startup": True,
"render_settings": DEFAULT_RENDER_SETTINGS,
"publish": DEFAULT_BLENDER_PUBLISH_SETTINGS,
"workfile_builder": {
"create_first_version": False,

View file

@ -26,6 +26,16 @@ class ValidatePluginModel(BaseSettingsModel):
active: bool = Field(title="Active")
class ValidateFileSavedModel(BaseSettingsModel):
enabled: bool = Field(title="ValidateFileSaved")
optional: bool = Field(title="Optional")
active: bool = Field(title="Active")
exclude_families: list[str] = Field(
default_factory=list,
title="Exclude product types"
)
class ExtractBlendModel(BaseSettingsModel):
enabled: bool = Field(True)
optional: bool = Field(title="Optional")
@ -53,6 +63,21 @@ class PublishPuginsModel(BaseSettingsModel):
title="Validate Camera Zero Keyframe",
section="Validators"
)
ValidateFileSaved: ValidateFileSavedModel = Field(
default_factory=ValidateFileSavedModel,
title="Validate File Saved",
section="Validators"
)
ValidateRenderCameraIsSet: ValidatePluginModel = Field(
default_factory=ValidatePluginModel,
title="Validate Render Camera Is Set",
section="Validators"
)
ValidateDeadlinePublish: ValidatePluginModel = Field(
default_factory=ValidatePluginModel,
title="Validate Render Output for Deadline",
section="Validators"
)
ValidateMeshHasUvs: ValidatePluginModel = Field(
default_factory=ValidatePluginModel,
title="Validate Mesh Has Uvs"
@ -118,6 +143,22 @@ DEFAULT_BLENDER_PUBLISH_SETTINGS = {
"optional": True,
"active": True
},
"ValidateFileSaved": {
"enabled": True,
"optional": False,
"active": True,
"exclude_families": []
},
"ValidateRenderCameraIsSet": {
"enabled": True,
"optional": False,
"active": True
},
"ValidateDeadlinePublish": {
"enabled": True,
"optional": False,
"active": True
},
"ValidateMeshHasUvs": {
"enabled": True,
"optional": True,

View file

@ -0,0 +1,109 @@
"""Providing models and values for Blender Render Settings."""
from pydantic import Field
from ayon_server.settings import BaseSettingsModel
def aov_separators_enum():
return [
{"value": "dash", "label": "- (dash)"},
{"value": "underscore", "label": "_ (underscore)"},
{"value": "dot", "label": ". (dot)"}
]
def image_format_enum():
return [
{"value": "exr", "label": "OpenEXR"},
{"value": "bmp", "label": "BMP"},
{"value": "rgb", "label": "Iris"},
{"value": "png", "label": "PNG"},
{"value": "jpg", "label": "JPEG"},
{"value": "jp2", "label": "JPEG 2000"},
{"value": "tga", "label": "Targa"},
{"value": "tif", "label": "TIFF"},
]
def aov_list_enum():
return [
{"value": "empty", "label": "< none >"},
{"value": "combined", "label": "Combined"},
{"value": "z", "label": "Z"},
{"value": "mist", "label": "Mist"},
{"value": "normal", "label": "Normal"},
{"value": "diffuse_light", "label": "Diffuse Light"},
{"value": "diffuse_color", "label": "Diffuse Color"},
{"value": "specular_light", "label": "Specular Light"},
{"value": "specular_color", "label": "Specular Color"},
{"value": "volume_light", "label": "Volume Light"},
{"value": "emission", "label": "Emission"},
{"value": "environment", "label": "Environment"},
{"value": "shadow", "label": "Shadow"},
{"value": "ao", "label": "Ambient Occlusion"},
{"value": "denoising", "label": "Denoising"},
{"value": "volume_direct", "label": "Direct Volumetric Scattering"},
{"value": "volume_indirect", "label": "Indirect Volumetric Scattering"}
]
def custom_passes_types_enum():
return [
{"value": "COLOR", "label": "Color"},
{"value": "VALUE", "label": "Value"},
]
class CustomPassesModel(BaseSettingsModel):
"""Custom Passes"""
_layout = "compact"
attribute: str = Field("", title="Attribute name")
value: str = Field(
"COLOR",
title="Type",
enum_resolver=custom_passes_types_enum
)
class RenderSettingsModel(BaseSettingsModel):
default_render_image_folder: str = Field(
title="Default Render Image Folder"
)
aov_separator: str = Field(
"underscore",
title="AOV Separator Character",
enum_resolver=aov_separators_enum
)
image_format: str = Field(
"exr",
title="Image Format",
enum_resolver=image_format_enum
)
multilayer_exr: bool = Field(
title="Multilayer (EXR)"
)
aov_list: list[str] = Field(
default_factory=list,
enum_resolver=aov_list_enum,
title="AOVs to create"
)
custom_passes: list[CustomPassesModel] = Field(
default_factory=list,
title="Custom Passes",
description=(
"Add custom AOVs. They are added to the view layer and in the "
"Compositing Nodetree,\nbut they need to be added manually to "
"the Shader Nodetree."
)
)
DEFAULT_RENDER_SETTINGS = {
"default_render_image_folder": "renders/blender",
"aov_separator": "underscore",
"image_format": "exr",
"multilayer_exr": True,
"aov_list": [],
"custom_passes": []
}

View file

@ -1 +1 @@
__version__ = "0.1.1"
__version__ = "0.1.3"

View file

@ -124,6 +124,24 @@ class LimitGroupsSubmodel(BaseSettingsModel):
)
def fusion_deadline_plugin_enum():
"""Return a list of value/label dicts for the enumerator.
Returning a list of dicts is used to allow for a custom label to be
displayed in the UI.
"""
return [
{
"value": "Fusion",
"label": "Fusion"
},
{
"value": "FusionCmd",
"label": "FusionCmd"
}
]
class FusionSubmitDeadlineModel(BaseSettingsModel):
enabled: bool = Field(True, title="Enabled")
optional: bool = Field(False, title="Optional")
@ -132,6 +150,9 @@ class FusionSubmitDeadlineModel(BaseSettingsModel):
chunk_size: int = Field(10, title="Frame per Task")
concurrent_tasks: int = Field(1, title="Number of concurrent tasks")
group: str = Field("", title="Group Name")
plugin: str = Field("Fusion",
enum_resolver=fusion_deadline_plugin_enum,
title="Deadline Plugin")
class NukeSubmitDeadlineModel(BaseSettingsModel):
@ -208,6 +229,16 @@ class CelactionSubmitDeadlineModel(BaseSettingsModel):
)
class BlenderSubmitDeadlineModel(BaseSettingsModel):
enabled: bool = Field(True)
optional: bool = Field(title="Optional")
active: bool = Field(title="Active")
use_published: bool = Field(title="Use Published scene")
priority: int = Field(title="Priority")
chunk_size: int = Field(title="Frame per Task")
group: str = Field("", title="Group Name")
class AOVFilterSubmodel(BaseSettingsModel):
_layout = "expanded"
name: str = Field(title="Host")
@ -276,8 +307,10 @@ class PublishPluginsModel(BaseSettingsModel):
title="After Effects to deadline")
CelactionSubmitDeadline: CelactionSubmitDeadlineModel = Field(
default_factory=CelactionSubmitDeadlineModel,
title="Celaction Submit Deadline"
)
title="Celaction Submit Deadline")
BlenderSubmitDeadline: BlenderSubmitDeadlineModel = Field(
default_factory=BlenderSubmitDeadlineModel,
title="Blender Submit Deadline")
ProcessSubmittedJobOnFarm: ProcessSubmittedJobOnFarmModel = Field(
default_factory=ProcessSubmittedJobOnFarmModel,
title="Process submitted job on farm.")
@ -384,6 +417,15 @@ DEFAULT_DEADLINE_PLUGINS_SETTINGS = {
"deadline_chunk_size": 10,
"deadline_job_delay": "00:00:00:00"
},
"BlenderSubmitDeadline": {
"enabled": True,
"optional": False,
"active": True,
"use_published": True,
"priority": 50,
"chunk_size": 10,
"group": "none"
},
"ProcessSubmittedJobOnFarm": {
"enabled": True,
"deadline_department": "",
@ -400,6 +442,12 @@ DEFAULT_DEADLINE_PLUGINS_SETTINGS = {
".*([Bb]eauty).*"
]
},
{
"name": "blender",
"value": [
".*([Bb]eauty).*"
]
},
{
"name": "aftereffects",
"value": [

View file

@ -1 +1 @@
__version__ = "0.1.1"
__version__ = "0.1.2"

View file

@ -0,0 +1,45 @@
from pydantic import Field
from ayon_server.settings import BaseSettingsModel
class HoudiniVarModel(BaseSettingsModel):
_layout = "expanded"
var: str = Field("", title="Var")
value: str = Field("", title="Value")
is_directory: bool = Field(False, title="Treat as directory")
class UpdateHoudiniVarcontextModel(BaseSettingsModel):
"""Sync vars with context changes.
If a value is treated as a directory on update
it will be ensured the folder exists.
"""
enabled: bool = Field(title="Enabled")
# TODO this was dynamic dictionary '{var: path}'
houdini_vars: list[HoudiniVarModel] = Field(
default_factory=list,
title="Houdini Vars"
)
class GeneralSettingsModel(BaseSettingsModel):
update_houdini_var_context: UpdateHoudiniVarcontextModel = Field(
default_factory=UpdateHoudiniVarcontextModel,
title="Update Houdini Vars on context change"
)
DEFAULT_GENERAL_SETTINGS = {
"update_houdini_var_context": {
"enabled": True,
"houdini_vars": [
{
"var": "JOB",
"value": "{root[work]}/{project[name]}/{hierarchy}/{asset}/work/{task[name]}", # noqa
"is_directory": True
}
]
}
}

View file

@ -4,7 +4,10 @@ from ayon_server.settings import (
MultiplatformPathModel,
MultiplatformPathListModel,
)
from .general import (
GeneralSettingsModel,
DEFAULT_GENERAL_SETTINGS
)
from .imageio import HoudiniImageIOModel
from .publish_plugins import (
PublishPluginsModel,
@ -52,6 +55,10 @@ class ShelvesModel(BaseSettingsModel):
class HoudiniSettings(BaseSettingsModel):
general: GeneralSettingsModel = Field(
default_factory=GeneralSettingsModel,
title="General"
)
imageio: HoudiniImageIOModel = Field(
default_factory=HoudiniImageIOModel,
title="Color Management (ImageIO)"
@ -73,6 +80,7 @@ class HoudiniSettings(BaseSettingsModel):
DEFAULT_VALUES = {
"general": DEFAULT_GENERAL_SETTINGS,
"shelves": [],
"create": DEFAULT_HOUDINI_CREATE_SETTINGS,
"publish": DEFAULT_HOUDINI_PUBLISH_SETTINGS

View file

@ -1 +1 @@
__version__ = "0.1.3"
__version__ = "0.1.4"

View file

@ -129,6 +129,10 @@ class CollectMayaRenderModel(BaseSettingsModel):
)
class CollectFbxAnimationModel(BaseSettingsModel):
enabled: bool = Field(title="Collect Fbx Animation")
class CollectFbxCameraModel(BaseSettingsModel):
enabled: bool = Field(title="CollectFbxCamera")
@ -364,6 +368,10 @@ class PublishersModel(BaseSettingsModel):
title="Collect Render Layers",
section="Collectors"
)
CollectFbxAnimation: CollectFbxAnimationModel = Field(
default_factory=CollectFbxAnimationModel,
title="Collect FBX Animation",
)
CollectFbxCamera: CollectFbxCameraModel = Field(
default_factory=CollectFbxCameraModel,
title="Collect Camera for FBX export",
@ -644,6 +652,10 @@ class PublishersModel(BaseSettingsModel):
default_factory=BasicValidateModel,
title="Validate Rig Controllers",
)
ValidateAnimatedReferenceRig: BasicValidateModel = Field(
default_factory=BasicValidateModel,
title="Validate Animated Reference Rig",
)
ValidateAnimationContent: BasicValidateModel = Field(
default_factory=BasicValidateModel,
title="Validate Animation Content",
@ -660,14 +672,34 @@ class PublishersModel(BaseSettingsModel):
default_factory=BasicValidateModel,
title="Validate Skeletal Mesh Top Node",
)
ValidateSkeletonRigContents: BasicValidateModel = Field(
default_factory=BasicValidateModel,
title="Validate Skeleton Rig Contents"
)
ValidateSkeletonRigControllers: BasicValidateModel = Field(
default_factory=BasicValidateModel,
title="Validate Skeleton Rig Controllers"
)
ValidateSkinclusterDeformerSet: BasicValidateModel = Field(
default_factory=BasicValidateModel,
title="Validate Skincluster Deformer Relationships",
)
ValidateSkeletonRigOutputIds: BasicValidateModel = Field(
default_factory=BasicValidateModel,
title="Validate Skeleton Rig Output Ids"
)
ValidateSkeletonTopGroupHierarchy: BasicValidateModel = Field(
default_factory=BasicValidateModel,
title="Validate Skeleton Top Group Hierarchy",
)
ValidateRigOutSetNodeIds: ValidateRigOutSetNodeIdsModel = Field(
default_factory=ValidateRigOutSetNodeIdsModel,
title="Validate Rig Out Set Node Ids",
)
ValidateSkeletonRigOutSetNodeIds: ValidateRigOutSetNodeIdsModel = Field(
default_factory=ValidateRigOutSetNodeIdsModel,
title="Validate Skeleton Rig Out Set Node Ids",
)
# Rig - END
ValidateCameraAttributes: BasicValidateModel = Field(
default_factory=BasicValidateModel,
@ -748,6 +780,9 @@ DEFAULT_PUBLISH_SETTINGS = {
"CollectMayaRender": {
"sync_workfile_version": False
},
"CollectFbxAnimation": {
"enabled": True
},
"CollectFbxCamera": {
"enabled": False
},
@ -1143,6 +1178,11 @@ DEFAULT_PUBLISH_SETTINGS = {
"optional": True,
"active": True
},
"ValidateAnimatedReferenceRig": {
"enabled": True,
"optional": False,
"active": True
},
"ValidateAnimationContent": {
"enabled": True,
"optional": False,
@ -1163,6 +1203,16 @@ DEFAULT_PUBLISH_SETTINGS = {
"optional": False,
"active": True
},
"ValidateSkeletonRigContents": {
"enabled": True,
"optional": True,
"active": True
},
"ValidateSkeletonRigControllers": {
"enabled": False,
"optional": True,
"active": True
},
"ValidateSkinclusterDeformerSet": {
"enabled": True,
"optional": False,
@ -1173,6 +1223,21 @@ DEFAULT_PUBLISH_SETTINGS = {
"optional": False,
"allow_history_only": False
},
"ValidateSkeletonRigOutSetNodeIds": {
"enabled": False,
"optional": False,
"allow_history_only": False
},
"ValidateSkeletonRigOutputIds": {
"enabled": False,
"optional": True,
"active": True
},
"ValidateSkeletonTopGroupHierarchy": {
"enabled": True,
"optional": True,
"active": True
},
"ValidateCameraAttributes": {
"enabled": False,
"optional": True,

Some files were not shown because too many files have changed in this diff Show more