Merge branch 'enhancement/usd_workflow' of https://github.com/BigRoy/ayon-core into enhancement/usd_workflow

This commit is contained in:
Roy Nieterau 2024-05-23 12:19:51 +02:00
commit 00bd27e1b1
297 changed files with 10154 additions and 4947 deletions

View file

@ -50,7 +50,7 @@ IGNORED_MODULES_IN_AYON = set()
# When addon was moved from ayon-core codebase
# - this is used to log the missing addon
MOVED_ADDON_MILESTONE_VERSIONS = {
"applications": VersionInfo(2, 0, 0),
"applications": VersionInfo(0, 2, 0),
}
# Inherit from `object` for Python 2 hosts

View file

@ -1,7 +1,7 @@
from ayon_applications import PreLaunchHook
from ayon_core.pipeline.colorspace import get_imageio_config
from ayon_core.pipeline.template_data import get_template_data_with_names
from ayon_core.pipeline.colorspace import get_imageio_config_preset
from ayon_core.pipeline.template_data import get_template_data
class OCIOEnvHook(PreLaunchHook):
@ -26,32 +26,38 @@ class OCIOEnvHook(PreLaunchHook):
def execute(self):
"""Hook entry method."""
template_data = get_template_data_with_names(
project_name=self.data["project_name"],
folder_path=self.data["folder_path"],
task_name=self.data["task_name"],
folder_entity = self.data["folder_entity"]
template_data = get_template_data(
self.data["project_entity"],
folder_entity=folder_entity,
task_entity=self.data["task_entity"],
host_name=self.host_name,
settings=self.data["project_settings"]
settings=self.data["project_settings"],
)
config_data = get_imageio_config(
project_name=self.data["project_name"],
host_name=self.host_name,
project_settings=self.data["project_settings"],
anatomy_data=template_data,
config_data = get_imageio_config_preset(
self.data["project_name"],
self.data["folder_path"],
self.data["task_name"],
self.host_name,
anatomy=self.data["anatomy"],
project_settings=self.data["project_settings"],
template_data=template_data,
env=self.launch_context.env,
folder_id=folder_entity["id"],
)
if config_data:
ocio_path = config_data["path"]
if self.host_name in ["nuke", "hiero"]:
ocio_path = ocio_path.replace("\\", "/")
self.log.info(
f"Setting OCIO environment to config path: {ocio_path}")
self.launch_context.env["OCIO"] = ocio_path
else:
if not config_data:
self.log.debug("OCIO not set or enabled")
return
ocio_path = config_data["path"]
if self.host_name in ["nuke", "hiero"]:
ocio_path = ocio_path.replace("\\", "/")
self.log.info(
f"Setting OCIO environment to config path: {ocio_path}")
self.launch_context.env["OCIO"] = ocio_path

View file

@ -60,7 +60,7 @@ def main(*subprocess_args):
)
)
elif os.environ.get("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", True):
elif os.environ.get("AVALON_AFTEREFFECTS_WORKFILES_ON_LAUNCH", True):
save = False
if os.getenv("WORKFILES_SAVE_AS"):
save = True

View file

@ -8,14 +8,11 @@ from ayon_core.lib import Logger, register_event_callback
from ayon_core.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
register_workfile_build_plugin_path,
AVALON_CONTAINER_ID,
AVALON_INSTANCE_ID,
AYON_INSTANCE_ID,
)
from ayon_core.hosts.aftereffects.api.workfile_template_builder import (
AEPlaceholderLoadPlugin,
AEPlaceholderCreatePlugin
)
from ayon_core.pipeline.load import any_outdated_containers
import ayon_core.hosts.aftereffects
@ -40,6 +37,7 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build")
class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
@ -76,6 +74,7 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
register_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
register_event_callback("application.launched", application_launch)
@ -118,12 +117,6 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
item["id"] = "publish_context"
self.stub.imprint(item["id"], item)
def get_workfile_build_placeholder_plugins(self):
return [
AEPlaceholderLoadPlugin,
AEPlaceholderCreatePlugin
]
# created instances section
def list_instances(self):
"""List all created instances from current workfile which

View file

@ -1,6 +1,7 @@
import os.path
import uuid
import shutil
from abc import abstractmethod
from ayon_core.pipeline import registered_host
from ayon_core.tools.workfile_template_build import (
@ -9,13 +10,9 @@ from ayon_core.tools.workfile_template_build import (
from ayon_core.pipeline.workfile.workfile_template_builder import (
AbstractTemplateBuilder,
PlaceholderPlugin,
LoadPlaceholderItem,
CreatePlaceholderItem,
PlaceholderLoadMixin,
PlaceholderCreateMixin
PlaceholderItem
)
from ayon_core.hosts.aftereffects.api import get_stub
from ayon_core.hosts.aftereffects.api.lib import set_settings
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
PLACEHOLDER_ID = "openpype.placeholder"
@ -51,6 +48,10 @@ class AETemplateBuilder(AbstractTemplateBuilder):
class AEPlaceholderPlugin(PlaceholderPlugin):
"""Contains generic methods for all PlaceholderPlugins."""
@abstractmethod
def _create_placeholder_item(self, item_data: dict) -> PlaceholderItem:
pass
def collect_placeholders(self):
"""Collect info from file metadata about created placeholders.
@ -63,17 +64,7 @@ class AEPlaceholderPlugin(PlaceholderPlugin):
if item.get("plugin_identifier") != self.identifier:
continue
if isinstance(self, AEPlaceholderLoadPlugin):
item = LoadPlaceholderItem(item["uuid"],
item["data"],
self)
elif isinstance(self, AEPlaceholderCreatePlugin):
item = CreatePlaceholderItem(item["uuid"],
item["data"],
self)
else:
raise NotImplementedError(f"Not implemented for {type(self)}")
item = self._create_placeholder_item(item)
output.append(item)
return output
@ -135,87 +126,6 @@ class AEPlaceholderPlugin(PlaceholderPlugin):
stub.imprint(item_id, container_data)
class AEPlaceholderCreatePlugin(AEPlaceholderPlugin, PlaceholderCreateMixin):
"""Adds Create placeholder.
This adds composition and runs Create
"""
identifier = "aftereffects.create"
label = "AfterEffects create"
def create_placeholder(self, placeholder_data):
stub = get_stub()
name = "CREATEPLACEHOLDER"
item_id = stub.add_item(name, "COMP")
self._imprint_item(item_id, name, placeholder_data, stub)
def populate_placeholder(self, placeholder):
"""Replace 'placeholder' with publishable instance.
Renames prepared composition name, creates publishable instance, sets
frame/duration settings according to DB.
"""
pre_create_data = {"use_selection": True}
item_id, item = self._get_item(placeholder)
get_stub().select_items([item_id])
self.populate_create_placeholder(placeholder, pre_create_data)
# apply settings for populated composition
item_id, metadata_item = self._get_item(placeholder)
set_settings(True, True, [item_id])
def get_placeholder_options(self, options=None):
return self.get_create_plugin_options(options)
class AEPlaceholderLoadPlugin(AEPlaceholderPlugin, PlaceholderLoadMixin):
identifier = "aftereffects.load"
label = "AfterEffects load"
def create_placeholder(self, placeholder_data):
"""Creates AE's Placeholder item in Project items list.
Sets dummy resolution/duration/fps settings, will be replaced when
populated.
"""
stub = get_stub()
name = "LOADERPLACEHOLDER"
item_id = stub.add_placeholder(name, 1920, 1060, 25, 10)
self._imprint_item(item_id, name, placeholder_data, stub)
def populate_placeholder(self, placeholder):
"""Use Openpype Loader from `placeholder` to create new FootageItems
New FootageItems are created, files are imported.
"""
self.populate_load_placeholder(placeholder)
errors = placeholder.get_errors()
stub = get_stub()
if errors:
stub.print_msg("\n".join(errors))
else:
if not placeholder.data["keep_placeholder"]:
metadata = stub.get_metadata()
for item in metadata:
if not item.get("is_placeholder"):
continue
scene_identifier = item.get("uuid")
if (scene_identifier and
scene_identifier == placeholder.scene_identifier):
stub.delete_item(item["members"][0])
stub.remove_instance(placeholder.scene_identifier, metadata)
def get_placeholder_options(self, options=None):
return self.get_load_plugin_options(options)
def load_succeed(self, placeholder, container):
placeholder_item_id, _ = self._get_item(placeholder)
item_id = container.id
get_stub().add_item_instead_placeholder(placeholder_item_id, item_id)
def build_workfile_template(*args, **kwargs):
builder = AETemplateBuilder(registered_host())
builder.build_template(*args, **kwargs)

View file

@ -24,7 +24,7 @@ class AERenderInstance(RenderInstance):
class CollectAERender(publish.AbstractCollectRender):
order = pyblish.api.CollectorOrder + 0.405
order = pyblish.api.CollectorOrder + 0.100
label = "Collect After Effects Render Layers"
hosts = ["aftereffects"]
@ -145,6 +145,7 @@ class CollectAERender(publish.AbstractCollectRender):
if "review" in instance.families:
# to skip ExtractReview locally
instance.families.remove("review")
instance.deadline = inst.data.get("deadline")
instances.append(instance)

View file

@ -0,0 +1,49 @@
from ayon_core.pipeline.workfile.workfile_template_builder import (
CreatePlaceholderItem,
PlaceholderCreateMixin
)
from ayon_core.hosts.aftereffects.api import get_stub
from ayon_core.hosts.aftereffects.api.lib import set_settings
import ayon_core.hosts.aftereffects.api.workfile_template_builder as wtb
class AEPlaceholderCreatePlugin(wtb.AEPlaceholderPlugin,
PlaceholderCreateMixin):
"""Adds Create placeholder.
This adds composition and runs Create
"""
identifier = "aftereffects.create"
label = "AfterEffects create"
def _create_placeholder_item(self, item_data) -> CreatePlaceholderItem:
return CreatePlaceholderItem(
scene_identifier=item_data["uuid"],
data=item_data["data"],
plugin=self
)
def create_placeholder(self, placeholder_data):
stub = get_stub()
name = "CREATEPLACEHOLDER"
item_id = stub.add_item(name, "COMP")
self._imprint_item(item_id, name, placeholder_data, stub)
def populate_placeholder(self, placeholder):
"""Replace 'placeholder' with publishable instance.
Renames prepared composition name, creates publishable instance, sets
frame/duration settings according to DB.
"""
pre_create_data = {"use_selection": True}
item_id, item = self._get_item(placeholder)
get_stub().select_items([item_id])
self.populate_create_placeholder(placeholder, pre_create_data)
# apply settings for populated composition
item_id, metadata_item = self._get_item(placeholder)
set_settings(True, True, [item_id])
def get_placeholder_options(self, options=None):
return self.get_create_plugin_options(options)

View file

@ -0,0 +1,60 @@
from ayon_core.pipeline.workfile.workfile_template_builder import (
LoadPlaceholderItem,
PlaceholderLoadMixin
)
from ayon_core.hosts.aftereffects.api import get_stub
import ayon_core.hosts.aftereffects.api.workfile_template_builder as wtb
class AEPlaceholderLoadPlugin(wtb.AEPlaceholderPlugin, PlaceholderLoadMixin):
identifier = "aftereffects.load"
label = "AfterEffects load"
def _create_placeholder_item(self, item_data) -> LoadPlaceholderItem:
return LoadPlaceholderItem(
scene_identifier=item_data["uuid"],
data=item_data["data"],
plugin=self
)
def create_placeholder(self, placeholder_data):
"""Creates AE's Placeholder item in Project items list.
Sets dummy resolution/duration/fps settings, will be replaced when
populated.
"""
stub = get_stub()
name = "LOADERPLACEHOLDER"
item_id = stub.add_placeholder(name, 1920, 1060, 25, 10)
self._imprint_item(item_id, name, placeholder_data, stub)
def populate_placeholder(self, placeholder):
"""Use Openpype Loader from `placeholder` to create new FootageItems
New FootageItems are created, files are imported.
"""
self.populate_load_placeholder(placeholder)
errors = placeholder.get_errors()
stub = get_stub()
if errors:
stub.print_msg("\n".join(errors))
else:
if not placeholder.data["keep_placeholder"]:
metadata = stub.get_metadata()
for item in metadata:
if not item.get("is_placeholder"):
continue
scene_identifier = item.get("uuid")
if (scene_identifier and
scene_identifier == placeholder.scene_identifier):
stub.delete_item(item["members"][0])
stub.remove_instance(placeholder.scene_identifier, metadata)
def get_placeholder_options(self, options=None):
return self.get_load_plugin_options(options)
def load_succeed(self, placeholder, container):
placeholder_item_id, _ = self._get_item(placeholder)
item_id = container.id
get_stub().add_item_instead_placeholder(placeholder_item_id, item_id)

View file

@ -33,7 +33,7 @@ def load_scripts(paths):
if register:
try:
register()
except:
except: # noqa E722
traceback.print_exc()
else:
print("\nWarning! '%s' has no register function, "
@ -45,7 +45,7 @@ def load_scripts(paths):
if unregister:
try:
unregister()
except:
except: # noqa E722
traceback.print_exc()
def test_reload(mod):
@ -57,7 +57,7 @@ def load_scripts(paths):
try:
return importlib.reload(mod)
except:
except: # noqa E722
traceback.print_exc()
def test_register(mod):

View file

@ -143,13 +143,19 @@ def deselect_all():
if obj.mode != 'OBJECT':
modes.append((obj, obj.mode))
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='OBJECT')
context_override = create_blender_context(active=obj)
with bpy.context.temp_override(**context_override):
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
context_override = create_blender_context()
with bpy.context.temp_override(**context_override):
bpy.ops.object.select_all(action='DESELECT')
for p in modes:
bpy.context.view_layer.objects.active = p[0]
bpy.ops.object.mode_set(mode=p[1])
context_override = create_blender_context(active=p[0])
with bpy.context.temp_override(**context_override):
bpy.ops.object.mode_set(mode=p[1])
bpy.context.view_layer.objects.active = active

View file

@ -43,7 +43,10 @@ class AbcCameraLoader(plugin.AssetLoader):
def _process(self, libpath, asset_group, group_name):
plugin.deselect_all()
bpy.ops.wm.alembic_import(filepath=libpath)
# Force the creation of the transform cache even if the camera
# doesn't have an animation. We use the cache to update the camera.
bpy.ops.wm.alembic_import(
filepath=libpath, always_add_cache_reader=True)
objects = lib.get_selection()
@ -178,12 +181,33 @@ class AbcCameraLoader(plugin.AssetLoader):
self.log.info("Library already loaded, not updating...")
return
mat = asset_group.matrix_basis.copy()
for obj in asset_group.children:
found = False
for constraint in obj.constraints:
if constraint.type == "TRANSFORM_CACHE":
constraint.cache_file.filepath = libpath.as_posix()
found = True
break
if not found:
# This is to keep compatibility with cameras loaded with
# the old loader
# Create a new constraint for the cache file
constraint = obj.constraints.new("TRANSFORM_CACHE")
bpy.ops.cachefile.open(filepath=libpath.as_posix())
constraint.cache_file = bpy.data.cache_files[-1]
constraint.cache_file.scale = 1.0
self._remove(asset_group)
self._process(str(libpath), asset_group, object_name)
# This is a workaround to set the object path. Blender doesn't
# load the list of object paths until the object is evaluated.
# This is a hack to force the object to be evaluated.
# The modifier doesn't need to be removed because camera
# objects don't have modifiers.
obj.modifiers.new(
name='MeshSequenceCache', type='MESH_SEQUENCE_CACHE')
bpy.context.evaluated_depsgraph_get()
asset_group.matrix_basis = mat
constraint.object_path = (
constraint.cache_file.object_paths[0].path)
metadata["libpath"] = str(libpath)
metadata["representation"] = repre_entity["id"]

View file

@ -2,6 +2,7 @@ import os
import bpy
from ayon_core.lib import BoolDef
from ayon_core.pipeline import publish
from ayon_core.hosts.blender.api import plugin
@ -17,6 +18,8 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
if not self.is_active(instance.data):
return
attr_values = self.get_attr_values_from_data(instance.data)
# Define extract output file path
stagingdir = self.staging_dir(instance)
folder_name = instance.data["folderEntity"]["name"]
@ -46,7 +49,8 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
bpy.ops.wm.alembic_export(
filepath=filepath,
selected=True,
flatten=False
flatten=False,
subdiv_schema=attr_values.get("subdiv_schema", False)
)
plugin.deselect_all()
@ -65,6 +69,21 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
self.log.debug("Extracted instance '%s' to: %s",
instance.name, representation)
@classmethod
def get_attribute_defs(cls):
return [
BoolDef(
"subdiv_schema",
label="Alembic Mesh Subdiv Schema",
tooltip="Export Meshes using Alembic's subdivision schema.\n"
"Enabling this includes creases with the export but "
"excludes the mesh's normals.\n"
"Enabling this usually result in smaller file size "
"due to lack of normals.",
default=False
)
]
class ExtractModelABC(ExtractABC):
"""Extract model as ABC."""

View file

@ -58,3 +58,55 @@ class SelectInvalidAction(pyblish.api.Action):
self.log.info(
"Selecting invalid tools: %s" % ", ".join(sorted(names))
)
class SelectToolAction(pyblish.api.Action):
"""Select invalid output tool in Fusion when plug-in failed.
"""
label = "Select saver"
on = "failed" # This action is only available on a failed plug-in
icon = "search" # Icon from Awesome Icon
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(
context,
plugin=plugin,
)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes..")
tools = []
for instance in errored_instances:
tool = instance.data.get("tool")
if tool is not None:
tools.append(tool)
else:
self.log.warning(
"Plug-in returned to be invalid, "
f"but has no saver for instance {instance.name}."
)
if not tools:
# Assume relevant comp is current comp and clear selection
self.log.info("No invalid tools found.")
comp = get_current_comp()
flow = comp.CurrentFrame.FlowView
flow.Select() # No args equals clearing selection
return
# Assume a single comp
first_tool = tools[0]
comp = first_tool.Comp()
flow = comp.CurrentFrame.FlowView
flow.Select() # No args equals clearing selection
names = set()
for tool in tools:
flow.Select(tool, True)
comp.SetActiveTool(tool)
names.add(tool.Name)
self.log.info(
"Selecting invalid tools: %s" % ", ".join(sorted(names))
)

View file

@ -169,7 +169,7 @@ def validate_comp_prefs(comp=None, force_repair=False):
def _on_repair():
attributes = dict()
for key, comp_key, _label in validations:
value = folder_value[key]
value = folder_attributes[key]
comp_key_full = "Comp.FrameFormat.{}".format(comp_key)
attributes[comp_key_full] = value
comp.SetPrefs(attributes)

View file

@ -1,5 +1,5 @@
import os
from ayon_core.lib import PreLaunchHook
from ayon_applications import PreLaunchHook
from ayon_core.hosts.fusion import FUSION_HOST_DIR

View file

@ -52,7 +52,7 @@ class CollectFusionRender(
if product_type not in ["render", "image"]:
continue
task_name = context.data["task"]
task_name = inst.data["task"]
tool = inst.data["transientData"]["tool"]
instance_families = inst.data.get("families", [])
@ -115,6 +115,7 @@ class CollectFusionRender(
if "review" in instance.families:
# to skip ExtractReview locally
instance.families.remove("review")
instance.deadline = inst.data.get("deadline")
instances.append(instance)

View file

@ -0,0 +1,80 @@
# -*- coding: utf-8 -*-
"""Validate if instance context is the same as publish context."""
import pyblish.api
from ayon_core.hosts.fusion.api.action import SelectToolAction
from ayon_core.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
PublishValidationError,
OptionalPyblishPluginMixin
)
class ValidateInstanceInContextFusion(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""Validator to check if instance context matches context of publish.
When working in per-shot style you always publish data in context of
current asset (shot). This validator checks if this is so. It is optional
so it can be disabled when needed.
"""
# Similar to maya and houdini-equivalent `ValidateInstanceInContext`
order = ValidateContentsOrder
label = "Instance in same Context"
optional = True
hosts = ["fusion"]
actions = [SelectToolAction, RepairAction]
def process(self, instance):
if not self.is_active(instance.data):
return
instance_context = self.get_context(instance.data)
context = self.get_context(instance.context.data)
if instance_context != context:
context_label = "{} > {}".format(*context)
instance_label = "{} > {}".format(*instance_context)
raise PublishValidationError(
message=(
"Instance '{}' publishes to different asset than current "
"context: {}. Current context: {}".format(
instance.name, instance_label, context_label
)
),
description=(
"## Publishing to a different asset\n"
"There are publish instances present which are publishing "
"into a different asset than your current context.\n\n"
"Usually this is not what you want but there can be cases "
"where you might want to publish into another asset or "
"shot. If that's the case you can disable the validation "
"on the instance to ignore it."
)
)
@classmethod
def repair(cls, instance):
create_context = instance.context.data["create_context"]
instance_id = instance.data.get("instance_id")
created_instance = create_context.get_instance_by_id(
instance_id
)
if created_instance is None:
raise RuntimeError(
f"No CreatedInstances found with id '{instance_id} "
f"in {create_context.instances_by_id}"
)
context_asset, context_task = cls.get_context(instance.context.data)
created_instance["folderPath"] = context_asset
created_instance["task"] = context_task
create_context.save_changes()
@staticmethod
def get_context(data):
"""Return asset, task from publishing context data"""
return data["folderPath"], data["task"]

View file

@ -177,7 +177,10 @@ class CollectFarmRender(publish.AbstractCollectRender):
outputFormat=info[1],
outputStartFrame=info[3],
leadingZeros=info[2],
ignoreFrameHandleCheck=True
ignoreFrameHandleCheck=True,
#todo: inst is not available, must be determined, fix when
#reworking to Publisher
# deadline=inst.data.get("deadline")
)
render_instance.context = context

View file

@ -8,6 +8,7 @@ from .lib import (
sync_avalon_data_to_workfile,
launch_workfiles_app,
before_project_save,
apply_colorspace_project
)
from .tags import add_tags_to_workfile
from .menu import update_menu_task_label
@ -44,6 +45,8 @@ def afterNewProjectCreated(event):
# reset workfiles startup not to open any more in session
os.environ["WORKFILES_STARTUP"] = "0"
apply_colorspace_project()
def beforeProjectLoad(event):
log.info("before project load event...")
@ -122,6 +125,7 @@ def register_hiero_events():
except RuntimeError:
pass
def register_events():
"""
Adding all callbacks.

View file

@ -11,7 +11,6 @@ import warnings
import json
import ast
import secrets
import shutil
import hiero
from qtpy import QtWidgets, QtCore
@ -36,9 +35,6 @@ from .constants import (
DEFAULT_SEQUENCE_NAME,
DEFAULT_BIN_NAME
)
from ayon_core.pipeline.colorspace import (
get_imageio_config
)
class _CTX:
@ -105,9 +101,9 @@ def flatten(list_):
def get_current_project(remove_untitled=False):
projects = flatten(hiero.core.projects())
projects = hiero.core.projects()
if not remove_untitled:
return next(iter(projects))
return projects[0]
# if remove_untitled
for proj in projects:
@ -1050,30 +1046,84 @@ def _set_hrox_project_knobs(doc, **knobs):
def apply_colorspace_project():
project_name = get_current_project_name()
# get path the the active projects
project = get_current_project(remove_untitled=True)
current_file = project.path()
# close the active project
project.close()
"""Apply colorspaces from settings.
Due to not being able to set the project settings through the Python API,
we need to do use some dubious code to find the widgets and set them. It is
possible to set the project settings without traversing through the widgets
but it involves reading the hrox files from disk with XML, so no in-memory
support. See https://community.foundry.com/discuss/topic/137771/change-a-project-s-default-color-transform-with-python # noqa
for more details.
"""
# get presets for hiero
project_name = get_current_project_name()
imageio = get_project_settings(project_name)["hiero"]["imageio"]
presets = imageio.get("workfile")
# Open Project Settings UI.
for act in hiero.ui.registeredActions():
if act.objectName() == "foundry.project.settings":
act.trigger()
# Find widgets from their sibling label.
labels = {
"Working Space:": "workingSpace",
"Viewer:": "viewerLut",
"Thumbnails:": "thumbnailLut",
"Monitor Out:": "monitorOutLut",
"8 Bit Files:": "eightBitLut",
"16 Bit Files:": "sixteenBitLut",
"Log Files:": "logLut",
"Floating Point Files:": "floatLut"
}
widgets = {x: None for x in labels.values()}
def _recursive_children(widget, labels, widgets):
children = widget.children()
for count, child in enumerate(children):
if isinstance(child, QtWidgets.QLabel):
if child.text() in labels.keys():
widgets[labels[child.text()]] = children[count + 1]
_recursive_children(child, labels, widgets)
app = QtWidgets.QApplication.instance()
title = "Project Settings"
for widget in app.topLevelWidgets():
if isinstance(widget, QtWidgets.QMainWindow):
if widget.windowTitle() != title:
continue
_recursive_children(widget, labels, widgets)
widget.close()
msg = "Setting value \"{}\" is not a valid option for \"{}\""
for key, widget in widgets.items():
options = [widget.itemText(i) for i in range(widget.count())]
setting_value = presets[key]
assert setting_value in options, msg.format(setting_value, key)
widget.setCurrentText(presets[key])
# This code block is for setting up project colorspaces for files on disk.
# Due to not having Python API access to set the project settings, the
# Foundry recommended way is to modify the hrox files on disk with XML. See
# this forum thread for more details;
# https://community.foundry.com/discuss/topic/137771/change-a-project-s-default-color-transform-with-python # noqa
'''
# backward compatibility layer
# TODO: remove this after some time
config_data = get_imageio_config(
project_name=get_current_project_name(),
host_name="hiero"
)
config_data = get_current_context_imageio_config_preset()
if config_data:
presets.update({
"ocioConfigName": "custom"
})
# get path the the active projects
project = get_current_project()
current_file = project.path()
msg = "The project needs to be saved to disk to apply colorspace settings."
assert current_file, msg
# save the workfile as subversion "comment:_colorspaceChange"
split_current_file = os.path.splitext(current_file)
copy_current_file = current_file
@ -1116,6 +1166,7 @@ def apply_colorspace_project():
# open the file as current project
hiero.core.openProject(copy_current_file)
'''
def apply_colorspace_clips():
@ -1125,10 +1176,8 @@ def apply_colorspace_clips():
# get presets for hiero
imageio = get_project_settings(project_name)["hiero"]["imageio"]
from pprint import pprint
presets = imageio.get("regexInputs", {}).get("inputs", {})
pprint(presets)
for clip in clips:
clip_media_source_path = clip.mediaSource().firstpath()
clip_name = clip.name()

View file

@ -144,7 +144,7 @@ def add_tags_to_workfile():
# Get project task types.
project_name = get_current_project_name()
project_entity = ayon_api.get_project(project_name)
task_types = project_entity["taskType"]
task_types = project_entity["taskTypes"]
nks_pres_tags["[Tasks]"] = {}
log.debug("__ tasks: {}".format(task_types))
for task_type in task_types:

View file

@ -51,13 +51,12 @@ def open_file(filepath):
project = hiero.core.projects()[-1]
# open project file
hiero.core.openProject(filepath.replace(os.path.sep, "/"))
# close previous project
project.close()
# Close previous project if its different to the current project.
filepath = filepath.replace(os.path.sep, "/")
if project.path().replace(os.path.sep, "/") != filepath:
# open project file
hiero.core.openProject(filepath)
project.close()
return True

View file

@ -833,6 +833,43 @@ def get_current_context_template_data_with_folder_attrs():
return template_data
def set_review_color_space(opengl_node, review_color_space="", log=None):
"""Set ociocolorspace parameter for the given OpenGL node.
Set `ociocolorspace` parameter of the given OpenGl node
to to the given review_color_space value.
If review_color_space is empty, a default colorspace corresponding to
the display & view of the current Houdini session will be used.
Args:
opengl_node (hou.Node): ROP node to set its ociocolorspace parm.
review_color_space (str): Colorspace value for ociocolorspace parm.
log (logging.Logger): Logger to log to.
"""
if log is None:
log = self.log
# Set Color Correction parameter to OpenColorIO
colorcorrect_parm = opengl_node.parm("colorcorrect")
if colorcorrect_parm.eval() != 2:
colorcorrect_parm.set(2)
log.debug(
"'Color Correction' parm on '{}' has been set to"
" 'OpenColorIO'".format(opengl_node.path())
)
opengl_node.setParms(
{"ociocolorspace": review_color_space}
)
log.debug(
"'OCIO Colorspace' parm on '{}' has been set to "
"the view color space '{}'"
.format(opengl_node, review_color_space)
)
def get_context_var_changes():
"""get context var changes."""

View file

@ -0,0 +1,64 @@
from ayon_applications import PreLaunchHook, LaunchTypes
class SetDefaultDisplayView(PreLaunchHook):
"""Set default view and default display for houdini via OpenColorIO.
Houdini's defaultDisplay and defaultView are set by
setting 'OCIO_ACTIVE_DISPLAYS' and 'OCIO_ACTIVE_VIEWS'
environment variables respectively.
More info: https://www.sidefx.com/docs/houdini/io/ocio.html#set-up
"""
app_groups = {"houdini"}
launch_types = {LaunchTypes.local}
def execute(self):
OCIO = self.launch_context.env.get("OCIO")
# This is a cheap way to skip this hook if either global color
# management or houdini color management was disabled because the
# OCIO var would be set by the global OCIOEnvHook
if not OCIO:
return
# workfile settings added in '0.2.13'
houdini_color_settings = \
self.data["project_settings"]["houdini"]["imageio"].get("workfile")
if not houdini_color_settings:
self.log.info("Hook 'SetDefaultDisplayView' requires Houdini "
"addon version >= '0.2.13'")
return
if not houdini_color_settings["enabled"]:
self.log.info(
"Houdini workfile color management is disabled."
)
return
# 'OCIO_ACTIVE_DISPLAYS', 'OCIO_ACTIVE_VIEWS' are checked
# as Admins can add them in Ayon env vars or Ayon tools.
default_display = houdini_color_settings["default_display"]
if default_display:
# get 'OCIO_ACTIVE_DISPLAYS' value if exists.
self._set_context_env("OCIO_ACTIVE_DISPLAYS", default_display)
default_view = houdini_color_settings["default_view"]
if default_view:
# get 'OCIO_ACTIVE_VIEWS' value if exists.
self._set_context_env("OCIO_ACTIVE_VIEWS", default_view)
def _set_context_env(self, env_var, default_value):
env_value = self.launch_context.env.get(env_var, "")
new_value = ":".join(
key for key in [default_value, env_value] if key
)
self.log.info(
"Setting {} environment to: {}"
.format(env_var, new_value)
)
self.launch_context.env[env_var] = new_value

View file

@ -13,11 +13,17 @@ class CreateArnoldRop(plugin.HoudiniCreator):
# Default extension
ext = "exr"
# Default to split export and render jobs
export_job = True
# Default render target
render_target = "farm_split"
def create(self, product_name, instance_data, pre_create_data):
import hou
# Transfer settings from pre create to instance
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
for key in ["render_target", "review"]:
if key in pre_create_data:
creator_attributes[key] = pre_create_data[key]
# Remove the active, we are checking the bypass flag of the nodes
instance_data.pop("active", None)
@ -25,8 +31,6 @@ class CreateArnoldRop(plugin.HoudiniCreator):
# Add chunk size attribute
instance_data["chunkSize"] = 1
# Submit for job publishing
instance_data["farm"] = pre_create_data.get("farm")
instance = super(CreateArnoldRop, self).create(
product_name,
@ -51,7 +55,7 @@ class CreateArnoldRop(plugin.HoudiniCreator):
"ar_exr_half_precision": 1 # half precision
}
if pre_create_data.get("export_job"):
if pre_create_data.get("render_target") == "farm_split":
ass_filepath = \
"{export_dir}{product_name}/{product_name}.$F4.ass".format(
export_dir=hou.text.expandString("$HIP/pyblish/ass/"),
@ -66,23 +70,41 @@ class CreateArnoldRop(plugin.HoudiniCreator):
to_lock = ["productType", "id"]
self.lock_parameters(instance_node, to_lock)
def get_pre_create_attr_defs(self):
attrs = super(CreateArnoldRop, self).get_pre_create_attr_defs()
def get_instance_attr_defs(self):
"""get instance attribute definitions.
Attributes defined in this method are exposed in
publish tab in the publisher UI.
"""
render_target_items = {
"local": "Local machine rendering",
"local_no_render": "Use existing frames (local)",
"farm": "Farm Rendering",
"farm_split": "Farm Rendering - Split export & render jobs",
}
return [
BoolDef("review",
label="Review",
tooltip="Mark as reviewable",
default=True),
EnumDef("render_target",
items=render_target_items,
label="Render target",
default=self.render_target),
]
def get_pre_create_attr_defs(self):
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
return attrs + [
BoolDef("farm",
label="Submitting to Farm",
default=True),
BoolDef("export_job",
label="Split export and render jobs",
default=self.export_job),
attrs = [
EnumDef("image_format",
image_format_enum,
default=self.ext,
label="Image Format Options")
label="Image Format Options"),
]
return attrs + self.get_instance_attr_defs()

View file

@ -11,15 +11,23 @@ class CreateKarmaROP(plugin.HoudiniCreator):
product_type = "karma_rop"
icon = "magic"
# Default render target
render_target = "farm"
def create(self, product_name, instance_data, pre_create_data):
import hou # noqa
# Transfer settings from pre create to instance
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
for key in ["render_target", "review"]:
if key in pre_create_data:
creator_attributes[key] = pre_create_data[key]
instance_data.pop("active", None)
instance_data.update({"node_type": "karma"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
# Submit for job publishing
instance_data["farm"] = pre_create_data.get("farm")
instance = super(CreateKarmaROP, self).create(
product_name,
@ -86,18 +94,40 @@ class CreateKarmaROP(plugin.HoudiniCreator):
to_lock = ["productType", "id"]
self.lock_parameters(instance_node, to_lock)
def get_pre_create_attr_defs(self):
attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs()
def get_instance_attr_defs(self):
"""get instance attribute definitions.
Attributes defined in this method are exposed in
publish tab in the publisher UI.
"""
render_target_items = {
"local": "Local machine rendering",
"local_no_render": "Use existing frames (local)",
"farm": "Farm Rendering",
}
return [
BoolDef("review",
label="Review",
tooltip="Mark as reviewable",
default=True),
EnumDef("render_target",
items=render_target_items,
label="Render target",
default=self.render_target)
]
def get_pre_create_attr_defs(self):
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
return attrs + [
BoolDef("farm",
label="Submitting to Farm",
default=True),
attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs()
attrs += [
EnumDef("image_format",
image_format_enum,
default="exr",
@ -112,5 +142,6 @@ class CreateKarmaROP(plugin.HoudiniCreator):
decimals=0),
BoolDef("cam_res",
label="Camera Resolution",
default=False)
default=False),
]
return attrs + self.get_instance_attr_defs()

View file

@ -11,18 +11,22 @@ class CreateMantraROP(plugin.HoudiniCreator):
product_type = "mantra_rop"
icon = "magic"
# Default to split export and render jobs
export_job = True
# Default render target
render_target = "farm_split"
def create(self, product_name, instance_data, pre_create_data):
import hou # noqa
# Transfer settings from pre create to instance
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
for key in ["render_target", "review"]:
if key in pre_create_data:
creator_attributes[key] = pre_create_data[key]
instance_data.pop("active", None)
instance_data.update({"node_type": "ifd"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
# Submit for job publishing
instance_data["farm"] = pre_create_data.get("farm")
instance = super(CreateMantraROP, self).create(
product_name,
@ -46,7 +50,7 @@ class CreateMantraROP(plugin.HoudiniCreator):
"vm_picture": filepath,
}
if pre_create_data.get("export_job"):
if pre_create_data.get("render_target") == "farm_split":
ifd_filepath = \
"{export_dir}{product_name}/{product_name}.$F4.ifd".format(
export_dir=hou.text.expandString("$HIP/pyblish/ifd/"),
@ -77,21 +81,40 @@ class CreateMantraROP(plugin.HoudiniCreator):
to_lock = ["productType", "id"]
self.lock_parameters(instance_node, to_lock)
def get_pre_create_attr_defs(self):
attrs = super(CreateMantraROP, self).get_pre_create_attr_defs()
def get_instance_attr_defs(self):
"""get instance attribute definitions.
Attributes defined in this method are exposed in
publish tab in the publisher UI.
"""
render_target_items = {
"local": "Local machine rendering",
"local_no_render": "Use existing frames (local)",
"farm": "Farm Rendering",
"farm_split": "Farm Rendering - Split export & render jobs",
}
return [
BoolDef("review",
label="Review",
tooltip="Mark as reviewable",
default=True),
EnumDef("render_target",
items=render_target_items,
label="Render target",
default=self.render_target)
]
def get_pre_create_attr_defs(self):
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
return attrs + [
BoolDef("farm",
label="Submitting to Farm",
default=True),
BoolDef("export_job",
label="Split export and render jobs",
default=self.export_job),
attrs = super(CreateMantraROP, self).get_pre_create_attr_defs()
attrs += [
EnumDef("image_format",
image_format_enum,
default="exr",
@ -100,5 +123,6 @@ class CreateMantraROP(plugin.HoudiniCreator):
label="Override Camera Resolution",
tooltip="Override the current camera "
"resolution, recommended for IPR.",
default=False)
default=False),
]
return attrs + self.get_instance_attr_defs()

View file

@ -17,17 +17,21 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
ext = "exr"
multi_layered_mode = "No Multi-Layered EXR File"
# Default to split export and render jobs
split_render = True
# Default render target
render_target = "farm_split"
def create(self, product_name, instance_data, pre_create_data):
# Transfer settings from pre create to instance
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
for key in ["render_target", "review"]:
if key in pre_create_data:
creator_attributes[key] = pre_create_data[key]
instance_data.pop("active", None)
instance_data.update({"node_type": "Redshift_ROP"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
# Submit for job publishing
instance_data["farm"] = pre_create_data.get("farm")
instance = super(CreateRedshiftROP, self).create(
product_name,
@ -99,7 +103,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
rs_filepath = f"{export_dir}{product_name}/{product_name}.$F4.rs"
parms["RS_archive_file"] = rs_filepath
if pre_create_data.get("split_render", self.split_render):
if pre_create_data.get("render_target") == "farm_split":
parms["RS_archive_enable"] = 1
instance_node.setParms(parms)
@ -118,24 +122,44 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
return super(CreateRedshiftROP, self).remove_instances(instances)
def get_instance_attr_defs(self):
"""get instance attribute definitions.
Attributes defined in this method are exposed in
publish tab in the publisher UI.
"""
render_target_items = {
"local": "Local machine rendering",
"local_no_render": "Use existing frames (local)",
"farm": "Farm Rendering",
"farm_split": "Farm Rendering - Split export & render jobs",
}
return [
BoolDef("review",
label="Review",
tooltip="Mark as reviewable",
default=True),
EnumDef("render_target",
items=render_target_items,
label="Render target",
default=self.render_target)
]
def get_pre_create_attr_defs(self):
attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
image_format_enum = [
"exr", "tif", "jpg", "png",
]
multi_layered_mode = [
"No Multi-Layered EXR File",
"Full Multi-Layered EXR File"
]
return attrs + [
BoolDef("farm",
label="Submitting to Farm",
default=True),
BoolDef("split_render",
label="Split export and render jobs",
default=self.split_render),
attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
attrs += [
EnumDef("image_format",
image_format_enum,
default=self.ext,
@ -143,5 +167,6 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
EnumDef("multi_layered_mode",
multi_layered_mode,
default=self.multi_layered_mode,
label="Multi-Layered EXR")
label="Multi-Layered EXR"),
]
return attrs + self.get_instance_attr_defs()

View file

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating openGL reviews."""
from ayon_core.hosts.houdini.api import plugin
from ayon_core.hosts.houdini.api import lib, plugin
from ayon_core.lib import EnumDef, BoolDef, NumberDef
import os
@ -14,6 +14,16 @@ class CreateReview(plugin.HoudiniCreator):
label = "Review"
product_type = "review"
icon = "video-camera"
review_color_space = ""
def apply_settings(self, project_settings):
super(CreateReview, self).apply_settings(project_settings)
# workfile settings added in '0.2.13'
color_settings = project_settings["houdini"]["imageio"].get(
"workfile", {}
)
if color_settings.get("enabled"):
self.review_color_space = color_settings.get("review_color_space")
def create(self, product_name, instance_data, pre_create_data):
@ -85,10 +95,20 @@ class CreateReview(plugin.HoudiniCreator):
instance_node.setParms(parms)
# Set OCIO Colorspace to the default output colorspace
# Set OCIO Colorspace to the default colorspace
# if there's OCIO
if os.getenv("OCIO"):
self.set_colorcorrect_to_default_view_space(instance_node)
# Fall to the default value if cls.review_color_space is empty.
if not self.review_color_space:
# cls.review_color_space is an empty string
# when the imageio/workfile setting is disabled or
# when the Review colorspace setting is empty.
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
self.review_color_space = get_default_display_view_colorspace()
lib.set_review_color_space(instance_node,
self.review_color_space,
self.log)
to_lock = ["id", "productType"]
@ -131,23 +151,3 @@ class CreateReview(plugin.HoudiniCreator):
minimum=0.0001,
decimals=3)
]
def set_colorcorrect_to_default_view_space(self,
instance_node):
"""Set ociocolorspace to the default output space."""
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
# set Color Correction parameter to OpenColorIO
instance_node.setParms({"colorcorrect": 2})
# Get default view space for ociocolorspace parm.
default_view_space = get_default_display_view_colorspace()
instance_node.setParms(
{"ociocolorspace": default_view_space}
)
self.log.debug(
"'OCIO Colorspace' parm on '{}' has been set to "
"the default view color space '{}'"
.format(instance_node, default_view_space)
)

View file

@ -16,17 +16,21 @@ class CreateVrayROP(plugin.HoudiniCreator):
icon = "magic"
ext = "exr"
# Default to split export and render jobs
export_job = True
# Default render target
render_target = "farm_split"
def create(self, product_name, instance_data, pre_create_data):
# Transfer settings from pre create to instance
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
for key in ["render_target", "review"]:
if key in pre_create_data:
creator_attributes[key] = pre_create_data[key]
instance_data.pop("active", None)
instance_data.update({"node_type": "vray_renderer"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
# Submit for job publishing
instance_data["farm"] = pre_create_data.get("farm")
instance = super(CreateVrayROP, self).create(
product_name,
@ -55,7 +59,7 @@ class CreateVrayROP(plugin.HoudiniCreator):
"SettingsEXR_bits_per_channel": "16" # half precision
}
if pre_create_data.get("export_job"):
if pre_create_data.get("render_target") == "farm_split":
scene_filepath = \
"{export_dir}{product_name}/{product_name}.$F4.vrscene".format(
export_dir=hou.text.expandString("$HIP/pyblish/vrscene/"),
@ -143,20 +147,41 @@ class CreateVrayROP(plugin.HoudiniCreator):
return super(CreateVrayROP, self).remove_instances(instances)
def get_instance_attr_defs(self):
"""get instance attribute definitions.
Attributes defined in this method are exposed in
publish tab in the publisher UI.
"""
render_target_items = {
"local": "Local machine rendering",
"local_no_render": "Use existing frames (local)",
"farm": "Farm Rendering",
"farm_split": "Farm Rendering - Split export & render jobs",
}
return [
BoolDef("review",
label="Review",
tooltip="Mark as reviewable",
default=True),
EnumDef("render_target",
items=render_target_items,
label="Render target",
default=self.render_target)
]
def get_pre_create_attr_defs(self):
attrs = super(CreateVrayROP, self).get_pre_create_attr_defs()
image_format_enum = [
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
"rad", "rat", "rta", "sgi", "tga", "tif",
]
return attrs + [
BoolDef("farm",
label="Submitting to Farm",
default=True),
BoolDef("export_job",
label="Split export and render jobs",
default=self.export_job),
attrs = super(CreateVrayROP, self).get_pre_create_attr_defs()
attrs += [
EnumDef("image_format",
image_format_enum,
default=self.ext,
@ -172,3 +197,4 @@ class CreateVrayROP(plugin.HoudiniCreator):
"if enabled",
default=False)
]
return attrs + self.get_instance_attr_defs()

View file

@ -95,7 +95,7 @@ class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator):
# write workfile information to context container.
op_ctx = hou.node(CONTEXT_CONTAINER)
if not op_ctx:
op_ctx = self.create_context_node()
op_ctx = self.host.create_context_node()
workfile_data = {"workfile": current_instance.data_to_store()}
imprint(op_ctx, workfile_data)

View file

@ -45,33 +45,11 @@ class AbcLoader(load.LoaderPlugin):
alembic = container.createNode("alembic", node_name=node_name)
alembic.setParms({"fileName": file_path})
# Add unpack node
unpack_name = "unpack_{}".format(name)
unpack = container.createNode("unpack", node_name=unpack_name)
unpack.setInput(0, alembic)
unpack.setParms({"transfer_attributes": "path"})
# Position nodes nicely
container.moveToGoodPosition()
container.layoutChildren()
# Add normal to points
# Order of menu ['point', 'vertex', 'prim', 'detail']
normal_name = "normal_{}".format(name)
normal_node = container.createNode("normal", node_name=normal_name)
normal_node.setParms({"type": 0})
normal_node.setInput(0, unpack)
null = container.createNode("null", node_name="OUT")
null.setInput(0, normal_node)
# Ensure display flag is on the Alembic input node and not on the OUT
# node to optimize "debug" displaying in the viewport.
alembic.setDisplayFlag(True)
# Set new position for unpack node else it gets cluttered
nodes = [container, alembic, unpack, normal_node, null]
for nr, node in enumerate(nodes):
node.setPosition([0, (0 - nr)])
self[:] = nodes
nodes = [container, alembic]
return pipeline.containerise(
node_name,

View file

@ -40,12 +40,9 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
default_prefix = evalParmNoFrame(rop, "ar_picture")
render_products = []
# Store whether we are splitting the render job (export + render)
split_render = bool(rop.parm("ar_ass_export_enable").eval())
instance.data["splitRender"] = split_render
export_prefix = None
export_products = []
if split_render:
if instance.data["splitRender"]:
export_prefix = evalParmNoFrame(
rop, "ar_ass_file", pad_character="0"
)
@ -68,7 +65,12 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
"": self.generate_expected_files(instance, beauty_product)
}
# Assume it's a multipartExr Render.
multipartExr = True
num_aovs = rop.evalParm("ar_aovs")
# TODO: Check the following logic.
# as it always assumes that all AOV are not merged.
for index in range(1, num_aovs + 1):
# Skip disabled AOVs
if not rop.evalParm("ar_enable_aov{}".format(index)):
@ -85,6 +87,14 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
files_by_aov[label] = self.generate_expected_files(instance,
aov_product)
# Set to False as soon as we have a separated aov.
multipartExr = False
# Review Logic expects this key to exist and be True
# if render is a multipart Exr.
# As long as we have one AOV then multipartExr should be True.
instance.data["multipartExr"] = multipartExr
for product in render_products:
self.log.debug("Found render product: {}".format(product))

View file

@ -7,7 +7,8 @@ from ayon_core.hosts.houdini.api import lib
class CollectDataforCache(pyblish.api.InstancePlugin):
"""Collect data for caching to Deadline."""
order = pyblish.api.CollectorOrder + 0.04
# Run after Collect Frames
order = pyblish.api.CollectorOrder + 0.11
families = ["ass", "pointcache",
"mantraifd", "redshiftproxy",
"vdbcache"]

View file

@ -0,0 +1,35 @@
import pyblish.api
class CollectFarmInstances(pyblish.api.InstancePlugin):
"""Collect instances for farm render."""
order = pyblish.api.CollectorOrder
families = ["mantra_rop",
"karma_rop",
"redshift_rop",
"arnold_rop",
"vray_rop"]
hosts = ["houdini"]
targets = ["local", "remote"]
label = "Collect farm instances"
def process(self, instance):
creator_attribute = instance.data["creator_attributes"]
# Collect Render Target
if creator_attribute.get("render_target") not in {
"farm_split", "farm"
}:
instance.data["farm"] = False
instance.data["splitRender"] = False
self.log.debug("Render on farm is disabled. "
"Skipping farm collecting.")
return
instance.data["farm"] = True
instance.data["splitRender"] = (
creator_attribute.get("render_target") == "farm_split"
)

View file

@ -17,7 +17,7 @@ class CollectFrames(pyblish.api.InstancePlugin):
label = "Collect Frames"
families = ["vdbcache", "imagesequence", "ass",
"mantraifd", "redshiftproxy", "review",
"bgeo"]
"pointcache"]
def process(self, instance):

View file

@ -55,6 +55,12 @@ class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin):
beauty_product)
}
# Review Logic expects this key to exist and be True
# if render is a multipart Exr.
# As long as we have one AOV then multipartExr should be True.
# By default karma render is a multipart Exr.
instance.data["multipartExr"] = True
filenames = list(render_products)
instance.data["files"] = filenames
instance.data["renderProducts"] = colorspace.ARenderProduct()

View file

@ -0,0 +1,137 @@
import os
import pyblish.api
from ayon_core.pipeline.create import get_product_name
from ayon_core.pipeline.farm.patterning import match_aov_pattern
from ayon_core.pipeline.publish import (
get_plugin_settings,
apply_plugin_settings_automatically
)
class CollectLocalRenderInstances(pyblish.api.InstancePlugin):
"""Collect instances for local render.
Agnostic Local Render Collector.
"""
# this plugin runs after Collect Render Products
order = pyblish.api.CollectorOrder + 0.12
families = ["mantra_rop",
"karma_rop",
"redshift_rop",
"arnold_rop",
"vray_rop"]
hosts = ["houdini"]
label = "Collect local render instances"
use_deadline_aov_filter = False
aov_filter = {"host_name": "houdini",
"value": [".*([Bb]eauty).*"]}
@classmethod
def apply_settings(cls, project_settings):
# Preserve automatic settings applying logic
settings = get_plugin_settings(plugin=cls,
project_settings=project_settings,
log=cls.log,
category="houdini")
apply_plugin_settings_automatically(cls, settings, logger=cls.log)
if not cls.use_deadline_aov_filter:
# get aov_filter from collector settings
# and restructure it as match_aov_pattern requires.
cls.aov_filter = {
cls.aov_filter["host_name"]: cls.aov_filter["value"]
}
else:
# get aov_filter from deadline settings
cls.aov_filter = project_settings["deadline"]["publish"]["ProcessSubmittedJobOnFarm"]["aov_filter"]
cls.aov_filter = {
item["name"]: item["value"]
for item in cls.aov_filter
}
def process(self, instance):
if instance.data["farm"]:
self.log.debug("Render on farm is enabled. "
"Skipping local render collecting.")
return
# Create Instance for each AOV.
context = instance.context
expectedFiles = next(iter(instance.data["expectedFiles"]), {})
product_type = "render" # is always render
product_group = get_product_name(
context.data["projectName"],
context.data["taskEntity"]["name"],
context.data["taskEntity"]["taskType"],
context.data["hostName"],
product_type,
instance.data["productName"]
)
for aov_name, aov_filepaths in expectedFiles.items():
product_name = product_group
if aov_name:
product_name = "{}_{}".format(product_name, aov_name)
# Create instance for each AOV
aov_instance = context.create_instance(product_name)
# Prepare Representation for each AOV
aov_filenames = [os.path.basename(path) for path in aov_filepaths]
staging_dir = os.path.dirname(aov_filepaths[0])
ext = aov_filepaths[0].split(".")[-1]
# Decide if instance is reviewable
preview = False
if instance.data.get("multipartExr", False):
# Add preview tag because its multipartExr.
preview = True
else:
# Add Preview tag if the AOV matches the filter.
preview = match_aov_pattern(
"houdini", self.aov_filter, aov_filenames[0]
)
preview = preview and instance.data.get("review", False)
# Support Single frame.
# The integrator wants single files to be a single
# filename instead of a list.
# More info: https://github.com/ynput/ayon-core/issues/238
if len(aov_filenames) == 1:
aov_filenames = aov_filenames[0]
aov_instance.data.update({
# 'label': label,
"task": instance.data["task"],
"folderPath": instance.data["folderPath"],
"frameStart": instance.data["frameStartHandle"],
"frameEnd": instance.data["frameEndHandle"],
"productType": product_type,
"family": product_type,
"productName": product_name,
"productGroup": product_group,
"families": ["render.local.hou", "review"],
"instance_node": instance.data["instance_node"],
"representations": [
{
"stagingDir": staging_dir,
"ext": ext,
"name": ext,
"tags": ["review"] if preview else [],
"files": aov_filenames,
"frameStart": instance.data["frameStartHandle"],
"frameEnd": instance.data["frameEndHandle"]
}
]
})
# Skip integrating original render instance.
# We are not removing it because it's used to trigger the render.
instance.data["integrate"] = False

View file

@ -44,12 +44,9 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
default_prefix = evalParmNoFrame(rop, "vm_picture")
render_products = []
# Store whether we are splitting the render job (export + render)
split_render = bool(rop.parm("soho_outputmode").eval())
instance.data["splitRender"] = split_render
export_prefix = None
export_products = []
if split_render:
if instance.data["splitRender"]:
export_prefix = evalParmNoFrame(
rop, "soho_diskfile", pad_character="0"
)
@ -74,6 +71,11 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
beauty_product)
}
# Assume it's a multipartExr Render.
multipartExr = True
# TODO: This logic doesn't take into considerations
# cryptomatte defined in 'Images > Cryptomatte'
aov_numbers = rop.evalParm("vm_numaux")
if aov_numbers > 0:
# get the filenames of the AOVs
@ -93,6 +95,14 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
files_by_aov[var] = self.generate_expected_files(instance, aov_product) # noqa
# Set to False as soon as we have a separated aov.
multipartExr = False
# Review Logic expects this key to exist and be True
# if render is a multipart Exr.
# As long as we have one AOV then multipartExr should be True.
instance.data["multipartExr"] = multipartExr
for product in render_products:
self.log.debug("Found render product: %s" % product)

View file

@ -42,11 +42,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
default_prefix = evalParmNoFrame(rop, "RS_outputFileNamePrefix")
beauty_suffix = rop.evalParm("RS_outputBeautyAOVSuffix")
# Store whether we are splitting the render job (export + render)
split_render = bool(rop.parm("RS_archive_enable").eval())
instance.data["splitRender"] = split_render
export_products = []
if split_render:
if instance.data["splitRender"]:
export_prefix = evalParmNoFrame(
rop, "RS_archive_file", pad_character="0"
)
@ -63,9 +61,12 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
full_exr_mode = (rop.evalParm("RS_outputMultilayerMode") == "2")
if full_exr_mode:
# Ignore beauty suffix if full mode is enabled
# As this is what the rop does.
# As this is what the rop does.
beauty_suffix = ""
# Assume it's a multipartExr Render.
multipartExr = True
# Default beauty/main layer AOV
beauty_product = self.get_render_product_name(
prefix=default_prefix, suffix=beauty_suffix
@ -75,7 +76,7 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
beauty_suffix: self.generate_expected_files(instance,
beauty_product)
}
aovs_rop = rop.parm("RS_aovGetFromNode").evalAsNode()
if aovs_rop:
rop = aovs_rop
@ -98,13 +99,21 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
if rop.parm(f"RS_aovID_{i}").evalAsString() == "CRYPTOMATTE" or \
not full_exr_mode:
aov_product = self.get_render_product_name(aov_prefix, aov_suffix)
render_products.append(aov_product)
files_by_aov[aov_suffix] = self.generate_expected_files(instance,
aov_product) # noqa
# Set to False as soon as we have a separated aov.
multipartExr = False
# Review Logic expects this key to exist and be True
# if render is a multipart Exr.
# As long as we have one AOV then multipartExr should be True.
instance.data["multipartExr"] = multipartExr
for product in render_products:
self.log.debug("Found render product: %s" % product)

View file

@ -8,7 +8,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
label = "Collect Review Data"
# This specific order value is used so that
# this plugin runs after CollectRopFrameRange
order = pyblish.api.CollectorOrder + 0.1
# Also after CollectLocalRenderInstances
order = pyblish.api.CollectorOrder + 0.13
hosts = ["houdini"]
families = ["review"]
@ -28,7 +29,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
ropnode_path = instance.data["instance_node"]
ropnode = hou.node(ropnode_path)
camera_path = ropnode.parm("camera").eval()
# Get camera based on the instance_node type.
camera_path = self._get_camera_path(ropnode)
camera_node = hou.node(camera_path)
if not camera_node:
self.log.warning("No valid camera node found on review node: "
@ -55,3 +57,29 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
# Store focal length in `burninDataMembers`
burnin_members = instance.data.setdefault("burninDataMembers", {})
burnin_members["focalLength"] = focal_length
def _get_camera_path(self, ropnode):
"""Get the camera path associated with the given rop node.
This function evaluates the camera parameter according to the
type of the given rop node.
Returns:
Union[str, None]: Camera path or None.
This function can return empty string if the camera
path is empty i.e. no camera path.
"""
if ropnode.type().name() in {
"opengl", "karma", "ifd", "arnold"
}:
return ropnode.parm("camera").eval()
elif ropnode.type().name() == "Redshift_ROP":
return ropnode.parm("RS_renderCamera").eval()
elif ropnode.type().name() == "vray_renderer":
return ropnode.parm("render_camera").eval()
return None

View file

@ -0,0 +1,22 @@
import pyblish.api
class CollectReviewableInstances(pyblish.api.InstancePlugin):
"""Collect Reviewable Instances.
Basically, all instances of the specified families
with creator_attribure["review"]
"""
order = pyblish.api.CollectorOrder
label = "Collect Reviewable Instances"
families = ["mantra_rop",
"karma_rop",
"redshift_rop",
"arnold_rop",
"vray_rop"]
def process(self, instance):
creator_attribute = instance.data["creator_attributes"]
instance.data["review"] = creator_attribute.get("review", False)

View file

@ -45,12 +45,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
render_products = []
# TODO: add render elements if render element
# Store whether we are splitting the render job in an export + render
split_render = rop.parm("render_export_mode").eval() == "2"
instance.data["splitRender"] = split_render
export_prefix = None
export_products = []
if split_render:
if instance.data["splitRender"]:
export_prefix = evalParmNoFrame(
rop, "render_export_filepath", pad_character="0"
)
@ -70,6 +67,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
"": self.generate_expected_files(instance,
beauty_product)}
# Assume it's a multipartExr Render.
multipartExr = True
if instance.data.get("RenderElement", True):
render_element = self.get_render_element_name(rop, default_prefix)
if render_element:
@ -77,7 +77,13 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
render_products.append(renderpass)
files_by_aov[aov] = self.generate_expected_files(
instance, renderpass)
# Set to False as soon as we have a separated aov.
multipartExr = False
# Review Logic expects this key to exist and be True
# if render is a multipart Exr.
# As long as we have one AOV then multipartExr should be True.
instance.data["multipartExr"] = multipartExr
for product in render_products:
self.log.debug("Found render product: %s" % product)

View file

@ -28,10 +28,15 @@ class ExtractAlembic(publish.Extractor):
staging_dir = os.path.dirname(output)
instance.data["stagingDir"] = staging_dir
file_name = os.path.basename(output)
if instance.data.get("frames"):
# list of files
files = instance.data["frames"]
else:
# single file
files = os.path.basename(output)
# We run the render
self.log.info("Writing alembic '%s' to '%s'" % (file_name,
self.log.info("Writing alembic '%s' to '%s'" % (files,
staging_dir))
render_rop(ropnode)
@ -42,7 +47,7 @@ class ExtractAlembic(publish.Extractor):
representation = {
'name': 'abc',
'ext': 'abc',
'files': file_name,
'files': files,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)

View file

@ -7,7 +7,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop, splitext
import hou
class ExtractComposite(publish.Extractor):
class ExtractComposite(publish.Extractor,
publish.ColormanagedPyblishPluginMixin):
order = pyblish.api.ExtractorOrder
label = "Extract Composite (Image Sequence)"
@ -45,8 +46,14 @@ class ExtractComposite(publish.Extractor):
"frameEnd": instance.data["frameEndHandle"],
}
from pprint import pformat
self.log.info(pformat(representation))
if ext.lower() == "exr":
# Inject colorspace with 'scene_linear' as that's the
# default Houdini working colorspace and all extracted
# OpenEXR images should be in that colorspace.
# https://www.sidefx.com/docs/houdini/render/linear.html#image-formats
self.set_representation_colorspace(
representation, instance.context,
colorspace="scene_linear"
)
instance.data["representations"].append(representation)

View file

@ -8,7 +8,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop
import hou
class ExtractOpenGL(publish.Extractor):
class ExtractOpenGL(publish.Extractor,
publish.ColormanagedPyblishPluginMixin):
order = pyblish.api.ExtractorOrder - 0.01
label = "Extract OpenGL"
@ -18,6 +19,16 @@ class ExtractOpenGL(publish.Extractor):
def process(self, instance):
ropnode = hou.node(instance.data.get("instance_node"))
# This plugin is triggered when marking render as reviewable.
# Therefore, this plugin will run on over wrong instances.
# TODO: Don't run this plugin on wrong instances.
# This plugin should run only on review product type
# with instance node of opengl type.
if ropnode.type().name() != "opengl":
self.log.debug("Skipping OpenGl extraction. Rop node {} "
"is not an OpenGl node.".format(ropnode.path()))
return
output = ropnode.evalParm("picture")
staging_dir = os.path.normpath(os.path.dirname(output))
instance.data["stagingDir"] = staging_dir
@ -46,6 +57,14 @@ class ExtractOpenGL(publish.Extractor):
"camera_name": instance.data.get("review_camera")
}
if ropnode.evalParm("colorcorrect") == 2: # OpenColorIO enabled
colorspace = ropnode.evalParm("ociocolorspace")
# inject colorspace data
self.set_representation_colorspace(
representation, instance.context,
colorspace=colorspace
)
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["representations"].append(representation)

View file

@ -0,0 +1,74 @@
import pyblish.api
from ayon_core.pipeline import publish
from ayon_core.hosts.houdini.api.lib import render_rop
import hou
import os
class ExtractRender(publish.Extractor):
order = pyblish.api.ExtractorOrder
label = "Extract Render"
hosts = ["houdini"]
families = ["mantra_rop",
"karma_rop",
"redshift_rop",
"arnold_rop",
"vray_rop"]
def process(self, instance):
creator_attribute = instance.data["creator_attributes"]
product_type = instance.data["productType"]
rop_node = hou.node(instance.data.get("instance_node"))
# Align split parameter value on rop node to the render target.
if instance.data["splitRender"]:
if product_type == "arnold_rop":
rop_node.setParms({"ar_ass_export_enable": 1})
elif product_type == "mantra_rop":
rop_node.setParms({"soho_outputmode": 1})
elif product_type == "redshift_rop":
rop_node.setParms({"RS_archive_enable": 1})
elif product_type == "vray_rop":
rop_node.setParms({"render_export_mode": "2"})
else:
if product_type == "arnold_rop":
rop_node.setParms({"ar_ass_export_enable": 0})
elif product_type == "mantra_rop":
rop_node.setParms({"soho_outputmode": 0})
elif product_type == "redshift_rop":
rop_node.setParms({"RS_archive_enable": 0})
elif product_type == "vray_rop":
rop_node.setParms({"render_export_mode": "1"})
if instance.data.get("farm"):
self.log.debug("Render should be processed on farm, skipping local render.")
return
if creator_attribute.get("render_target") == "local":
ropnode = hou.node(instance.data.get("instance_node"))
render_rop(ropnode)
# `ExpectedFiles` is a list that includes one dict.
expected_files = instance.data["expectedFiles"][0]
# Each key in that dict is a list of files.
# Combine lists of files into one big list.
all_frames = []
for value in expected_files.values():
if isinstance(value, str):
all_frames.append(value)
elif isinstance(value, list):
all_frames.extend(value)
# Check missing frames.
# Frames won't exist if user cancels the render.
missing_frames = [
frame
for frame in all_frames
if not os.path.exists(frame)
]
if missing_frames:
# TODO: Use user friendly error reporting.
raise RuntimeError("Failed to complete render extraction. "
"Missing output files: {}".format(
missing_frames))

View file

@ -17,11 +17,13 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin):
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["houdini"]
families = ["workfile",
"redshift_rop",
"arnold_rop",
"usdrender",
"mantra_rop",
"karma_rop",
"usdrender",
"redshift_rop",
"arnold_rop",
"vray_rop",
"render.local.hou",
"publish.hou"]
optional = True

View file

@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
import sys
import hou
import pyblish.api
import six
from ayon_core.pipeline import PublishValidationError
@ -26,28 +25,21 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin):
invalid = self.get_invalid(instance)
if invalid:
raise PublishValidationError(
("Output node(s) `{}` are incorrect. "
"See plug-in log for details.").format(invalid),
title=self.label
"Output node '{}' is incorrect. "
"See plug-in log for details.".format(invalid),
title=self.label,
description=(
"### Invalid COP output node\n\n"
"The output node path for the instance must be set to a "
"valid COP node path.\n\nSee the log for more details."
)
)
@classmethod
def get_invalid(cls, instance):
output_node = instance.data.get("output_node")
import hou
try:
output_node = instance.data["output_node"]
except KeyError:
six.reraise(
PublishValidationError,
PublishValidationError(
"Can't determine COP output node.",
title=cls.__name__),
sys.exc_info()[2]
)
if output_node is None:
if not output_node:
node = hou.node(instance.data.get("instance_node"))
cls.log.error(
"COP Output node in '%s' does not exist. "
@ -61,8 +53,8 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin):
cls.log.error(
"Output node %s is not a COP node. "
"COP Path must point to a COP node, "
"instead found category type: %s"
% (output_node.path(), output_node.type().category().name())
"instead found category type: %s",
output_node.path(), output_node.type().category().name()
)
return [output_node.path()]
@ -70,9 +62,7 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin):
# is Cop2 to avoid potential edge case scenarios even though
# the isinstance check above should be stricter than this category
if output_node.type().category().name() != "Cop2":
raise PublishValidationError(
(
"Output node {} is not of category Cop2."
" This is a bug..."
).format(output_node.path()),
title=cls.label)
cls.log.error(
"Output node %s is not of category Cop2.", output_node.path()
)
return [output_node.path()]

View file

@ -4,15 +4,19 @@ from ayon_core.pipeline import (
PublishValidationError,
OptionalPyblishPluginMixin
)
from ayon_core.pipeline.publish import RepairAction
from ayon_core.pipeline.publish import (
RepairAction,
get_plugin_settings,
apply_plugin_settings_automatically
)
from ayon_core.hosts.houdini.api.action import SelectROPAction
import os
import hou
class SetDefaultViewSpaceAction(RepairAction):
label = "Set default view colorspace"
class ResetViewSpaceAction(RepairAction):
label = "Reset OCIO colorspace parm"
icon = "mdi.monitor"
@ -27,12 +31,43 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
families = ["review"]
hosts = ["houdini"]
label = "Validate Review Colorspace"
actions = [SetDefaultViewSpaceAction, SelectROPAction]
actions = [ResetViewSpaceAction, SelectROPAction]
optional = True
review_color_space = ""
@classmethod
def apply_settings(cls, project_settings):
# Preserve automatic settings applying logic
settings = get_plugin_settings(plugin=cls,
project_settings=project_settings,
log=cls.log,
category="houdini")
apply_plugin_settings_automatically(cls, settings, logger=cls.log)
# workfile settings added in '0.2.13'
color_settings = project_settings["houdini"]["imageio"].get(
"workfile", {}
)
# Add review color settings
if color_settings.get("enabled"):
cls.review_color_space = color_settings.get("review_color_space")
def process(self, instance):
rop_node = hou.node(instance.data["instance_node"])
# This plugin is triggered when marking render as reviewable.
# Therefore, this plugin will run on over wrong instances.
# TODO: Don't run this plugin on wrong instances.
# This plugin should run only on review product type
# with instance node of opengl type.
if rop_node.type().name() != "opengl":
self.log.debug("Skipping Validation. Rop node {} "
"is not an OpenGl node.".format(rop_node.path()))
return
if not self.is_active(instance.data):
return
@ -43,7 +78,6 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
)
return
rop_node = hou.node(instance.data["instance_node"])
if rop_node.evalParm("colorcorrect") != 2:
# any colorspace settings other than default requires
# 'Color Correct' parm to be set to 'OpenColorIO'
@ -52,39 +86,54 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
" 'OpenColorIO'".format(rop_node.path())
)
if rop_node.evalParm("ociocolorspace") not in \
hou.Color.ocio_spaces():
current_color_space = rop_node.evalParm("ociocolorspace")
if current_color_space not in hou.Color.ocio_spaces():
raise PublishValidationError(
"Invalid value: Colorspace name doesn't exist.\n"
"Check 'OCIO Colorspace' parameter on '{}' ROP"
.format(rop_node.path())
)
@classmethod
def repair(cls, instance):
"""Set Default View Space Action.
# if houdini/imageio/workfile is enabled and
# Review colorspace setting is empty then this check should
# actually check if the current_color_space setting equals
# the default colorspace value.
# However, it will make the black cmd screen show up more often
# which is very annoying.
if self.review_color_space and \
self.review_color_space != current_color_space:
It is a helper action more than a repair action,
used to set colorspace on opengl node to the default view.
"""
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
rop_node = hou.node(instance.data["instance_node"])
if rop_node.evalParm("colorcorrect") != 2:
rop_node.setParms({"colorcorrect": 2})
cls.log.debug(
"'Color Correction' parm on '{}' has been set to"
" 'OpenColorIO'".format(rop_node.path())
raise PublishValidationError(
"Invalid value: Colorspace name doesn't match"
"the Colorspace specified in settings."
)
# Get default view colorspace name
default_view_space = get_default_display_view_colorspace()
@classmethod
def repair(cls, instance):
"""Reset view colorspace.
rop_node.setParms({"ociocolorspace": default_view_space})
cls.log.info(
"'OCIO Colorspace' parm on '{}' has been set to "
"the default view color space '{}'"
.format(rop_node, default_view_space)
)
It is used to set colorspace on opengl node.
It uses the colorspace value specified in the Houdini addon settings.
If the value in the Houdini addon settings is empty,
it will fall to the default colorspace.
Note:
This repair action assumes that OCIO is enabled.
As if OCIO is disabled the whole validation is skipped
and this repair action won't show up.
"""
from ayon_core.hosts.houdini.api.lib import set_review_color_space
# Fall to the default value if cls.review_color_space is empty.
if not cls.review_color_space:
# cls.review_color_space is an empty string
# when the imageio/workfile setting is disabled or
# when the Review colorspace setting is empty.
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
cls.review_color_space = get_default_display_view_colorspace()
rop_node = hou.node(instance.data["instance_node"])
set_review_color_space(rop_node,
cls.review_color_space,
cls.log)

View file

@ -20,6 +20,16 @@ class ValidateSceneReview(pyblish.api.InstancePlugin):
report = []
instance_node = hou.node(instance.data.get("instance_node"))
# This plugin is triggered when marking render as reviewable.
# Therefore, this plugin will run on over wrong instances.
# TODO: Don't run this plugin on wrong instances.
# This plugin should run only on review product type
# with instance node of opengl type.
if instance_node.type().name() != "opengl":
self.log.debug("Skipping Validation. Rop node {} "
"is not an OpenGl node.".format(instance_node.path()))
return
invalid = self.get_invalid_scene_path(instance_node)
if invalid:
report.append(invalid)

View file

@ -0,0 +1,29 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- OPMenu Stencil.
It's used to extend the OPMenu.
-->
<menuDocument>
<menu>
<!-- Operator type and asset options. -->
<subMenu id="opmenu.vhda_options_create">
<insertBefore>opmenu.unsynchronize</insertBefore>
<scriptItem id="opmenu.vhda_create_ayon">
<insertAfter>opmenu.vhda_create</insertAfter>
<label>Create New (AYON)...</label>
<context>
</context>
<scriptCode>
<![CDATA[
from ayon_core.hosts.houdini.api.creator_node_shelves import create_interactive
node = kwargs["node"]
if node not in hou.selectedNodes():
node.setSelected(True)
create_interactive("io.openpype.creators.houdini.hda", **kwargs)
]]>
</scriptCode>
</scriptItem>
</subMenu>
</menu>
</menuDocument>

View file

@ -6,12 +6,14 @@ import json
from typing import Any, Dict, Union
import six
import ayon_api
from ayon_core.pipeline import get_current_project_name, colorspace
from ayon_core.pipeline import (
get_current_project_name,
colorspace
)
from ayon_core.settings import get_project_settings
from ayon_core.pipeline.context_tools import (
get_current_folder_entity,
get_current_task_entity
)
from ayon_core.style import load_stylesheet
from pymxs import runtime as rt
@ -221,41 +223,30 @@ def reset_scene_resolution():
scene resolution can be overwritten by a folder if the folder.attrib
contains any information regarding scene resolution.
"""
folder_entity = get_current_folder_entity(
fields={"attrib.resolutionWidth", "attrib.resolutionHeight"}
)
folder_attributes = folder_entity["attrib"]
width = int(folder_attributes["resolutionWidth"])
height = int(folder_attributes["resolutionHeight"])
task_attributes = get_current_task_entity(fields={"attrib"})["attrib"]
width = int(task_attributes["resolutionWidth"])
height = int(task_attributes["resolutionHeight"])
set_scene_resolution(width, height)
def get_frame_range(folder_entiy=None) -> Union[Dict[str, Any], None]:
"""Get the current folder frame range and handles.
def get_frame_range(task_entity=None) -> Union[Dict[str, Any], None]:
"""Get the current task frame range and handles
Args:
folder_entiy (dict): Folder eneity.
task_entity (dict): Task Entity.
Returns:
dict: with frame start, frame end, handle start, handle end.
"""
# Set frame start/end
if folder_entiy is None:
folder_entiy = get_current_folder_entity()
folder_attributes = folder_entiy["attrib"]
frame_start = folder_attributes.get("frameStart")
frame_end = folder_attributes.get("frameEnd")
if frame_start is None or frame_end is None:
return {}
frame_start = int(frame_start)
frame_end = int(frame_end)
handle_start = int(folder_attributes.get("handleStart", 0))
handle_end = int(folder_attributes.get("handleEnd", 0))
if task_entity is None:
task_entity = get_current_task_entity(fields={"attrib"})
task_attributes = task_entity["attrib"]
frame_start = int(task_attributes["frameStart"])
frame_end = int(task_attributes["frameEnd"])
handle_start = int(task_attributes["handleStart"])
handle_end = int(task_attributes["handleEnd"])
frame_start_handle = frame_start - handle_start
frame_end_handle = frame_end + handle_end
@ -281,9 +272,9 @@ def reset_frame_range(fps: bool = True):
scene frame rate in frames-per-second.
"""
if fps:
project_name = get_current_project_name()
project_entity = ayon_api.get_project(project_name)
fps_number = float(project_entity["attrib"].get("fps"))
task_entity = get_current_task_entity()
task_attributes = task_entity["attrib"]
fps_number = float(task_attributes["fps"])
rt.frameRate = fps_number
frame_range = get_frame_range()
@ -378,12 +369,8 @@ def reset_colorspace():
"""
if int(get_max_version()) < 2024:
return
project_name = get_current_project_name()
colorspace_mgr = rt.ColorPipelineMgr
project_settings = get_project_settings(project_name)
max_config_data = colorspace.get_imageio_config(
project_name, "max", project_settings)
max_config_data = colorspace.get_current_context_imageio_config_preset()
if max_config_data:
ocio_config_path = max_config_data["path"]
colorspace_mgr = rt.ColorPipelineMgr
@ -398,10 +385,7 @@ def check_colorspace():
"because Max main window can't be found.")
if int(get_max_version()) >= 2024:
color_mgr = rt.ColorPipelineMgr
project_name = get_current_project_name()
project_settings = get_project_settings(project_name)
max_config_data = colorspace.get_imageio_config(
project_name, "max", project_settings)
max_config_data = colorspace.get_current_context_imageio_config_preset()
if max_config_data and color_mgr.Mode != rt.Name("OCIO_Custom"):
if not is_headless():
from ayon_core.tools.utils import SimplePopup
@ -502,9 +486,9 @@ def object_transform_set(container_children):
"""
transform_set = {}
for node in container_children:
name = f"{node.name}.transform"
name = f"{node}.transform"
transform_set[name] = node.pos
name = f"{node.name}.scale"
name = f"{node}.scale"
transform_set[name] = node.scale
return transform_set
@ -525,6 +509,36 @@ def get_plugins() -> list:
return plugin_info_list
def update_modifier_node_names(event, node):
"""Update the name of the nodes after renaming
Args:
event (pymxs.MXSWrapperBase): Event Name (
Mandatory argument for rt.NodeEventCallback)
node (list): Event Number (
Mandatory argument for rt.NodeEventCallback)
"""
containers = [
obj
for obj in rt.Objects
if (
rt.ClassOf(obj) == rt.Container
and rt.getUserProp(obj, "id") == "pyblish.avalon.instance"
and rt.getUserProp(obj, "productType") not in {
"workfile", "tyflow"
}
)
]
if not containers:
return
for container in containers:
ayon_data = container.modifiers[0].openPypeData
updated_node_names = [str(node.node) for node
in ayon_data.all_handles]
rt.setProperty(ayon_data, "sel_list", updated_node_names)
@contextlib.contextmanager
def render_resolution(width, height):
"""Set render resolution option during context

View file

@ -52,17 +52,15 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
self._has_been_setup = True
def context_setting():
return lib.set_context_setting()
rt.callbacks.addScript(rt.Name('systemPostNew'),
context_setting)
rt.callbacks.addScript(rt.Name('systemPostNew'), on_new)
rt.callbacks.addScript(rt.Name('filePostOpen'),
lib.check_colorspace)
rt.callbacks.addScript(rt.Name('postWorkspaceChange'),
self._deferred_menu_creation)
rt.NodeEventCallback(
nameChanged=lib.update_modifier_node_names)
def workfile_has_unsaved_changes(self):
return rt.getSaveRequired()
@ -161,6 +159,14 @@ def ls() -> list:
yield lib.read(container)
def on_new():
lib.set_context_setting()
if rt.checkForSave():
rt.resetMaxFile(rt.Name("noPrompt"))
rt.clearUndoBuffer()
rt.redrawViews()
def containerise(name: str, nodes: list, context,
namespace=None, loader=None, suffix="_CON"):
data = {

View file

@ -117,7 +117,7 @@ class MaxSceneLoader(load.LoaderPlugin):
)
for max_obj, obj_name in zip(max_objects, max_object_names):
max_obj.name = f"{namespace}:{obj_name}"
max_container.append(rt.getNodeByName(max_obj.name))
max_container.append(max_obj)
return containerise(
name, max_container, context,
namespace, loader=self.__class__.__name__)
@ -158,11 +158,11 @@ class MaxSceneLoader(load.LoaderPlugin):
current_max_object_names):
max_obj.name = f"{namespace}:{obj_name}"
max_objects.append(max_obj)
max_transform = f"{max_obj.name}.transform"
max_transform = f"{max_obj}.transform"
if max_transform in transform_data.keys():
max_obj.pos = transform_data[max_transform] or 0
max_obj.scale = transform_data[
f"{max_obj.name}.scale"] or 0
f"{max_obj}.scale"] or 0
update_custom_attribute_data(node, max_objects)
lib.imprint(container["instance_node"], {

View file

@ -76,11 +76,11 @@ class FbxModelLoader(load.LoaderPlugin):
for fbx_object in current_fbx_objects:
fbx_object.name = f"{namespace}:{fbx_object.name}"
fbx_objects.append(fbx_object)
fbx_transform = f"{fbx_object.name}.transform"
fbx_transform = f"{fbx_object}.transform"
if fbx_transform in transform_data.keys():
fbx_object.pos = transform_data[fbx_transform] or 0
fbx_object.scale = transform_data[
f"{fbx_object.name}.scale"] or 0
f"{fbx_object}.scale"] or 0
with maintained_selection():
rt.Select(node)

View file

@ -67,11 +67,11 @@ class ObjLoader(load.LoaderPlugin):
selections = rt.GetCurrentSelection()
for selection in selections:
selection.name = f"{namespace}:{selection.name}"
selection_transform = f"{selection.name}.transform"
selection_transform = f"{selection}.transform"
if selection_transform in transform_data.keys():
selection.pos = transform_data[selection_transform] or 0
selection.scale = transform_data[
f"{selection.name}.scale"] or 0
f"{selection}.scale"] or 0
update_custom_attribute_data(node, selections)
with maintained_selection():
rt.Select(node)

View file

@ -95,11 +95,11 @@ class ModelUSDLoader(load.LoaderPlugin):
for children in asset.Children:
children.name = f"{namespace}:{children.name}"
usd_objects.append(children)
children_transform = f"{children.name}.transform"
children_transform = f"{children}.transform"
if children_transform in transform_data.keys():
children.pos = transform_data[children_transform] or 0
children.scale = transform_data[
f"{children.name}.scale"] or 0
f"{children}.scale"] or 0
asset.name = f"{namespace}:{asset.name}"
usd_objects.append(asset)

View file

@ -92,10 +92,10 @@ class OxAbcLoader(load.LoaderPlugin):
abc.Parent = container
abc.name = f"{namespace}:{abc.name}"
ox_abc_objects.append(abc)
ox_transform = f"{abc.name}.transform"
ox_transform = f"{abc}.transform"
if ox_transform in transform_data.keys():
abc.pos = transform_data[ox_transform] or 0
abc.scale = transform_data[f"{abc.name}.scale"] or 0
abc.scale = transform_data[f"{abc}.scale"] or 0
update_custom_attribute_data(node, ox_abc_objects)
lib.imprint(
container["instance_node"],

View file

@ -53,6 +53,7 @@ class ExtractAlembic(publish.Extractor,
hosts = ["max"]
families = ["pointcache"]
optional = True
active = True
def process(self, instance):
if not self.is_active(instance.data):
@ -102,24 +103,27 @@ class ExtractAlembic(publish.Extractor,
@classmethod
def get_attribute_defs(cls):
return [
defs = super(ExtractAlembic, cls).get_attribute_defs()
defs.extend([
BoolDef("custom_attrs",
label="Custom Attributes",
default=False),
]
])
return defs
class ExtractCameraAlembic(ExtractAlembic):
"""Extract Camera with AlembicExport."""
label = "Extract Alembic Camera"
families = ["camera"]
optional = True
class ExtractModel(ExtractAlembic):
class ExtractModelAlembic(ExtractAlembic):
"""Extract Geometry in Alembic Format"""
label = "Extract Geometry (Alembic)"
families = ["model"]
optional = True
def _set_abc_attributes(self, instance):
attr_values = self.get_attr_values_from_data(instance.data)

View file

@ -42,7 +42,7 @@ class ValidateFrameRange(pyblish.api.InstancePlugin,
return
frame_range = get_frame_range(
instance.data["folderEntity"])
instance.data["taskEntity"])
inst_frame_start = instance.data.get("frameStartHandle")
inst_frame_end = instance.data.get("frameEndHandle")

View file

@ -38,7 +38,7 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin,
context_label = "{} > {}".format(*context)
instance_label = "{} > {}".format(folderPath, task)
message = (
"Instance '{}' publishes to different folder or task "
"Instance '{}' publishes to different context(folder or task) "
"than current context: {}. Current context: {}".format(
instance.name, instance_label, context_label
)
@ -46,7 +46,7 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin,
raise PublishValidationError(
message=message,
description=(
"## Publishing to a different context folder or task\n"
"## Publishing to a different context data(folder or task)\n"
"There are publish instances present which are publishing "
"into a different folder path or task than your current context.\n\n"
"Usually this is not what you want but there can be cases "

View file

@ -7,7 +7,10 @@ from ayon_core.pipeline.publish import (
RepairAction,
PublishValidationError
)
from ayon_core.hosts.max.api.lib import reset_scene_resolution
from ayon_core.hosts.max.api.lib import (
reset_scene_resolution,
imprint
)
class ValidateResolutionSetting(pyblish.api.InstancePlugin,
@ -25,8 +28,10 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
if not self.is_active(instance.data):
return
width, height = self.get_folder_resolution(instance)
current_width = rt.renderWidth
current_height = rt.renderHeight
current_width, current_height = (
self.get_current_resolution(instance)
)
if current_width != width and current_height != height:
raise PublishValidationError("Resolution Setting "
"not matching resolution "
@ -41,12 +46,16 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
"not matching resolution set "
"on asset or shot.")
def get_folder_resolution(self, instance):
folder_entity = instance.data["folderEntity"]
if folder_entity:
folder_attributes = folder_entity["attrib"]
width = folder_attributes["resolutionWidth"]
height = folder_attributes["resolutionHeight"]
def get_current_resolution(self, instance):
return rt.renderWidth, rt.renderHeight
@classmethod
def get_folder_resolution(cls, instance):
task_entity = instance.data.get("taskEntity")
if task_entity:
task_attributes = task_entity["attrib"]
width = task_attributes["resolutionWidth"]
height = task_attributes["resolutionHeight"]
return int(width), int(height)
# Defaults if not found in folder entity
@ -55,3 +64,29 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
@classmethod
def repair(cls, instance):
reset_scene_resolution()
class ValidateReviewResolutionSetting(ValidateResolutionSetting):
families = ["review"]
optional = True
actions = [RepairAction]
def get_current_resolution(self, instance):
current_width = instance.data["review_width"]
current_height = instance.data["review_height"]
return current_width, current_height
@classmethod
def repair(cls, instance):
context_width, context_height = (
cls.get_folder_resolution(instance)
)
creator_attrs = instance.data["creator_attributes"]
creator_attrs["review_width"] = context_width
creator_attrs["review_height"] = context_height
creator_attrs_data = {
"creator_attributes": creator_attrs
}
# update the width and height of review
# data in creator_attributes
imprint(instance.data["instance_node"], creator_attrs_data)

View file

@ -12,4 +12,4 @@
max create mode
python.ExecuteFile startup
)
)

View file

@ -0,0 +1,350 @@
import json
import logging
import os
from maya import cmds # noqa
from ayon_core.hosts.maya.api.lib import evaluation
log = logging.getLogger(__name__)
# The maya alembic export types
ALEMBIC_ARGS = {
"attr": (list, tuple),
"attrPrefix": (list, tuple),
"autoSubd": bool,
"dataFormat": str,
"endFrame": float,
"eulerFilter": bool,
"frameRange": str, # "start end"; overrides startFrame & endFrame
"frameRelativeSample": float,
"melPerFrameCallback": str,
"melPostJobCallback": str,
"noNormals": bool,
"preRoll": bool,
"pythonPerFrameCallback": str,
"pythonPostJobCallback": str,
"renderableOnly": bool,
"root": (list, tuple),
"selection": bool,
"startFrame": float,
"step": float,
"stripNamespaces": bool,
"userAttr": (list, tuple),
"userAttrPrefix": (list, tuple),
"uvWrite": bool,
"uvsOnly": bool,
"verbose": bool,
"wholeFrameGeo": bool,
"worldSpace": bool,
"writeColorSets": bool,
"writeCreases": bool, # Maya 2015 Ext1+
"writeFaceSets": bool,
"writeUVSets": bool, # Maya 2017+
"writeVisibility": bool,
}
def extract_alembic(
file,
attr=None,
attrPrefix=None,
dataFormat="ogawa",
endFrame=None,
eulerFilter=True,
frameRange="",
melPerFrameCallback=None,
melPostJobCallback=None,
noNormals=False,
preRoll=False,
preRollStartFrame=0,
pythonPerFrameCallback=None,
pythonPostJobCallback=None,
renderableOnly=False,
root=None,
selection=True,
startFrame=None,
step=1.0,
stripNamespaces=True,
userAttr=None,
userAttrPrefix=None,
uvsOnly=False,
uvWrite=True,
verbose=False,
wholeFrameGeo=False,
worldSpace=False,
writeColorSets=False,
writeCreases=False,
writeFaceSets=False,
writeUVSets=False,
writeVisibility=False
):
"""Extract a single Alembic Cache.
This extracts an Alembic cache using the `-selection` flag to minimize
the extracted content to solely what was Collected into the instance.
Arguments:
file (str): The filepath to write the alembic file to.
attr (list of str, optional): A specific geometric attribute to write
out. Defaults to [].
attrPrefix (list of str, optional): Prefix filter for determining which
geometric attributes to write out. Defaults to ["ABC_"].
dataFormat (str): The data format to use for the cache,
defaults to "ogawa"
endFrame (float): End frame of output. Ignored if `frameRange`
provided.
eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with
an Euler filter. Euler filtering helps resolve irregularities in
rotations especially if X, Y, and Z rotations exceed 360 degrees.
Defaults to True.
frameRange (tuple or str): Two-tuple with start and end frame or a
string formatted as: "startFrame endFrame". This argument
overrides `startFrame` and `endFrame` arguments.
melPerFrameCallback (Optional[str]): MEL callback run per frame.
melPostJobCallback (Optional[str]): MEL callback after last frame is
written.
noNormals (bool): When on, normal data from the original polygon
objects is not included in the exported Alembic cache file.
preRoll (bool): This frame range will not be sampled.
Defaults to False.
preRollStartFrame (float): The frame to start scene
evaluation at. This is used to set the starting frame for time
dependent translations and can be used to evaluate run-up that
isn't actually translated. Defaults to 0.
pythonPerFrameCallback (Optional[str]): Python callback run per frame.
pythonPostJobCallback (Optional[str]): Python callback after last frame
is written.
renderableOnly (bool): When on, any non-renderable nodes or hierarchy,
such as hidden objects, are not included in the Alembic file.
Defaults to False.
root (list of str): Maya dag path which will be parented to
the root of the Alembic file. Defaults to [], which means the
entire scene will be written out.
selection (bool): Write out all all selected nodes from the
active selection list that are descendents of the roots specified
with -root. Defaults to False.
startFrame (float): Start frame of output. Ignored if `frameRange`
provided.
step (float): The time interval (expressed in frames) at
which the frame range is sampled. Additional samples around each
frame can be specified with -frs. Defaults to 1.0.
stripNamespaces (bool): When on, any namespaces associated with the
exported objects are removed from the Alembic file. For example, an
object with the namespace taco:foo:bar appears as bar in the
Alembic file.
userAttr (list of str, optional): A specific user defined attribute to
write out. Defaults to [].
userAttrPrefix (list of str, optional): Prefix filter for determining
which user defined attributes to write out. Defaults to [].
uvsOnly (bool): When on, only uv data for PolyMesh and SubD shapes
will be written to the Alembic file.
uvWrite (bool): When on, UV data from polygon meshes and subdivision
objects are written to the Alembic file. Only the current UV map is
included.
verbose (bool): When on, outputs frame number information to the
Script Editor or output window during extraction.
wholeFrameGeo (bool): Data for geometry will only be written
out on whole frames. Defaults to False.
worldSpace (bool): When on, the top node in the node hierarchy is
stored as world space. By default, these nodes are stored as local
space. Defaults to False.
writeColorSets (bool): Write all color sets on MFnMeshes as
color 3 or color 4 indexed geometry parameters with face varying
scope. Defaults to False.
writeCreases (bool): If the mesh has crease edges or crease
vertices, the mesh (OPolyMesh) would now be written out as an OSubD
and crease info will be stored in the Alembic file. Otherwise,
creases info won't be preserved in Alembic file unless a custom
Boolean attribute SubDivisionMesh has been added to mesh node and
its value is true. Defaults to False.
writeFaceSets (bool): Write all Face sets on MFnMeshes.
Defaults to False.
writeUVSets (bool): Write all uv sets on MFnMeshes as vector
2 indexed geometry parameters with face varying scope. Defaults to
False.
writeVisibility (bool): Visibility state will be stored in
the Alembic file. Otherwise everything written out is treated as
visible. Defaults to False.
"""
# Ensure alembic exporter is loaded
cmds.loadPlugin('AbcExport', quiet=True)
# Alembic Exporter requires forward slashes
file = file.replace('\\', '/')
# Ensure list arguments are valid.
attr = attr or []
attrPrefix = attrPrefix or []
userAttr = userAttr or []
userAttrPrefix = userAttrPrefix or []
root = root or []
# Pass the start and end frame on as `frameRange` so that it
# never conflicts with that argument
if not frameRange:
# Fallback to maya timeline if no start or end frame provided.
if startFrame is None:
startFrame = cmds.playbackOptions(query=True,
animationStartTime=True)
if endFrame is None:
endFrame = cmds.playbackOptions(query=True,
animationEndTime=True)
# Ensure valid types are converted to frame range
assert isinstance(startFrame, ALEMBIC_ARGS["startFrame"])
assert isinstance(endFrame, ALEMBIC_ARGS["endFrame"])
frameRange = "{0} {1}".format(startFrame, endFrame)
else:
# Allow conversion from tuple for `frameRange`
if isinstance(frameRange, (list, tuple)):
assert len(frameRange) == 2
frameRange = "{0} {1}".format(frameRange[0], frameRange[1])
# Assemble options
options = {
"selection": selection,
"frameRange": frameRange,
"eulerFilter": eulerFilter,
"noNormals": noNormals,
"preRoll": preRoll,
"root": root,
"renderableOnly": renderableOnly,
"uvWrite": uvWrite,
"uvsOnly": uvsOnly,
"writeColorSets": writeColorSets,
"writeFaceSets": writeFaceSets,
"wholeFrameGeo": wholeFrameGeo,
"worldSpace": worldSpace,
"writeVisibility": writeVisibility,
"writeUVSets": writeUVSets,
"writeCreases": writeCreases,
"dataFormat": dataFormat,
"step": step,
"attr": attr,
"attrPrefix": attrPrefix,
"userAttr": userAttr,
"userAttrPrefix": userAttrPrefix,
"stripNamespaces": stripNamespaces,
"verbose": verbose
}
# Validate options
for key, value in options.copy().items():
# Discard unknown options
if key not in ALEMBIC_ARGS:
log.warning("extract_alembic() does not support option '%s'. "
"Flag will be ignored..", key)
options.pop(key)
continue
# Validate value type
valid_types = ALEMBIC_ARGS[key]
if not isinstance(value, valid_types):
raise TypeError("Alembic option unsupported type: "
"{0} (expected {1})".format(value, valid_types))
# Ignore empty values, like an empty string, since they mess up how
# job arguments are built
if isinstance(value, (list, tuple)):
value = [x for x in value if x.strip()]
# Ignore option completely if no values remaining
if not value:
options.pop(key)
continue
options[key] = value
# The `writeCreases` argument was changed to `autoSubd` in Maya 2018+
maya_version = int(cmds.about(version=True))
if maya_version >= 2018:
options['autoSubd'] = options.pop('writeCreases', False)
# Only add callbacks if they are set so that we're not passing `None`
callbacks = {
"melPerFrameCallback": melPerFrameCallback,
"melPostJobCallback": melPostJobCallback,
"pythonPerFrameCallback": pythonPerFrameCallback,
"pythonPostJobCallback": pythonPostJobCallback,
}
for key, callback in callbacks.items():
if callback:
options[key] = str(callback)
# Format the job string from options
job_args = list()
for key, value in options.items():
if isinstance(value, (list, tuple)):
for entry in value:
job_args.append("-{} {}".format(key, entry))
elif isinstance(value, bool):
# Add only when state is set to True
if value:
job_args.append("-{0}".format(key))
else:
job_args.append("-{0} {1}".format(key, value))
job_str = " ".join(job_args)
job_str += ' -file "%s"' % file
# Ensure output directory exists
parent_dir = os.path.dirname(file)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
if verbose:
log.debug("Preparing Alembic export with options: %s",
json.dumps(options, indent=4))
log.debug("Extracting Alembic with job arguments: %s", job_str)
# Perform extraction
print("Alembic Job Arguments : {}".format(job_str))
# Disable the parallel evaluation temporarily to ensure no buggy
# exports are made. (PLN-31)
# TODO: Make sure this actually fixes the issues
with evaluation("off"):
cmds.AbcExport(
j=job_str,
verbose=verbose,
preRollStartFrame=preRollStartFrame
)
if verbose:
log.debug("Extracted Alembic to: %s", file)
return file

View file

@ -47,7 +47,7 @@ class FBXExtractor:
"smoothMesh": bool,
"instances": bool,
# "referencedContainersContent": bool, # deprecated in Maya 2016+
"bakeComplexAnimation": int,
"bakeComplexAnimation": bool,
"bakeComplexStart": int,
"bakeComplexEnd": int,
"bakeComplexStep": int,
@ -59,6 +59,7 @@ class FBXExtractor:
"constraints": bool,
"lights": bool,
"embeddedTextures": bool,
"includeChildren": bool,
"inputConnections": bool,
"upAxis": str, # x, y or z,
"triangulate": bool,
@ -102,6 +103,7 @@ class FBXExtractor:
"constraints": False,
"lights": True,
"embeddedTextures": False,
"includeChildren": True,
"inputConnections": True,
"upAxis": "y",
"triangulate": False,

View file

@ -70,37 +70,6 @@ DEFAULT_MATRIX = [1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0]
# The maya alembic export types
_alembic_options = {
"startFrame": float,
"endFrame": float,
"frameRange": str, # "start end"; overrides startFrame & endFrame
"eulerFilter": bool,
"frameRelativeSample": float,
"noNormals": bool,
"renderableOnly": bool,
"step": float,
"stripNamespaces": bool,
"uvWrite": bool,
"wholeFrameGeo": bool,
"worldSpace": bool,
"writeVisibility": bool,
"writeColorSets": bool,
"writeFaceSets": bool,
"writeCreases": bool, # Maya 2015 Ext1+
"writeUVSets": bool, # Maya 2017+
"dataFormat": str,
"root": (list, tuple),
"attr": (list, tuple),
"attrPrefix": (list, tuple),
"userAttr": (list, tuple),
"melPerFrameCallback": str,
"melPostJobCallback": str,
"pythonPerFrameCallback": str,
"pythonPostJobCallback": str,
"selection": bool
}
INT_FPS = {15, 24, 25, 30, 48, 50, 60, 44100, 48000}
FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94}
@ -1330,7 +1299,7 @@ def is_visible(node,
override_enabled = cmds.getAttr('{}.overrideEnabled'.format(node))
override_visibility = cmds.getAttr('{}.overrideVisibility'.format(
node))
if override_enabled and override_visibility:
if override_enabled and not override_visibility:
return False
if parentHidden:
@ -1346,178 +1315,6 @@ def is_visible(node,
return True
def extract_alembic(file,
startFrame=None,
endFrame=None,
selection=True,
uvWrite=True,
eulerFilter=True,
dataFormat="ogawa",
verbose=False,
**kwargs):
"""Extract a single Alembic Cache.
This extracts an Alembic cache using the `-selection` flag to minimize
the extracted content to solely what was Collected into the instance.
Arguments:
startFrame (float): Start frame of output. Ignored if `frameRange`
provided.
endFrame (float): End frame of output. Ignored if `frameRange`
provided.
frameRange (tuple or str): Two-tuple with start and end frame or a
string formatted as: "startFrame endFrame". This argument
overrides `startFrame` and `endFrame` arguments.
dataFormat (str): The data format to use for the cache,
defaults to "ogawa"
verbose (bool): When on, outputs frame number information to the
Script Editor or output window during extraction.
noNormals (bool): When on, normal data from the original polygon
objects is not included in the exported Alembic cache file.
renderableOnly (bool): When on, any non-renderable nodes or hierarchy,
such as hidden objects, are not included in the Alembic file.
Defaults to False.
stripNamespaces (bool): When on, any namespaces associated with the
exported objects are removed from the Alembic file. For example, an
object with the namespace taco:foo:bar appears as bar in the
Alembic file.
uvWrite (bool): When on, UV data from polygon meshes and subdivision
objects are written to the Alembic file. Only the current UV map is
included.
worldSpace (bool): When on, the top node in the node hierarchy is
stored as world space. By default, these nodes are stored as local
space. Defaults to False.
eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with
an Euler filter. Euler filtering helps resolve irregularities in
rotations especially if X, Y, and Z rotations exceed 360 degrees.
Defaults to True.
"""
# Ensure alembic exporter is loaded
cmds.loadPlugin('AbcExport', quiet=True)
# Alembic Exporter requires forward slashes
file = file.replace('\\', '/')
# Pass the start and end frame on as `frameRange` so that it
# never conflicts with that argument
if "frameRange" not in kwargs:
# Fallback to maya timeline if no start or end frame provided.
if startFrame is None:
startFrame = cmds.playbackOptions(query=True,
animationStartTime=True)
if endFrame is None:
endFrame = cmds.playbackOptions(query=True,
animationEndTime=True)
# Ensure valid types are converted to frame range
assert isinstance(startFrame, _alembic_options["startFrame"])
assert isinstance(endFrame, _alembic_options["endFrame"])
kwargs["frameRange"] = "{0} {1}".format(startFrame, endFrame)
else:
# Allow conversion from tuple for `frameRange`
frame_range = kwargs["frameRange"]
if isinstance(frame_range, (list, tuple)):
assert len(frame_range) == 2
kwargs["frameRange"] = "{0} {1}".format(frame_range[0],
frame_range[1])
# Assemble options
options = {
"selection": selection,
"uvWrite": uvWrite,
"eulerFilter": eulerFilter,
"dataFormat": dataFormat
}
options.update(kwargs)
# Validate options
for key, value in options.copy().items():
# Discard unknown options
if key not in _alembic_options:
log.warning("extract_alembic() does not support option '%s'. "
"Flag will be ignored..", key)
options.pop(key)
continue
# Validate value type
valid_types = _alembic_options[key]
if not isinstance(value, valid_types):
raise TypeError("Alembic option unsupported type: "
"{0} (expected {1})".format(value, valid_types))
# Ignore empty values, like an empty string, since they mess up how
# job arguments are built
if isinstance(value, (list, tuple)):
value = [x for x in value if x.strip()]
# Ignore option completely if no values remaining
if not value:
options.pop(key)
continue
options[key] = value
# The `writeCreases` argument was changed to `autoSubd` in Maya 2018+
maya_version = int(cmds.about(version=True))
if maya_version >= 2018:
options['autoSubd'] = options.pop('writeCreases', False)
# Format the job string from options
job_args = list()
for key, value in options.items():
if isinstance(value, (list, tuple)):
for entry in value:
job_args.append("-{} {}".format(key, entry))
elif isinstance(value, bool):
# Add only when state is set to True
if value:
job_args.append("-{0}".format(key))
else:
job_args.append("-{0} {1}".format(key, value))
job_str = " ".join(job_args)
job_str += ' -file "%s"' % file
# Ensure output directory exists
parent_dir = os.path.dirname(file)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
if verbose:
log.debug("Preparing Alembic export with options: %s",
json.dumps(options, indent=4))
log.debug("Extracting Alembic with job arguments: %s", job_str)
# Perform extraction
print("Alembic Job Arguments : {}".format(job_str))
# Disable the parallel evaluation temporarily to ensure no buggy
# exports are made. (PLN-31)
# TODO: Make sure this actually fixes the issues
with evaluation("off"):
cmds.AbcExport(j=job_str, verbose=verbose)
if verbose:
log.debug("Extracted Alembic to: %s", file)
return file
# region ID
def get_id_required_nodes(referenced_nodes=False,
nodes=None,
@ -2520,7 +2317,16 @@ def set_scene_fps(fps, update=True):
"""
fps_mapping = {
'2': '2fps',
'3': '3fps',
'4': '4fps',
'5': '5fps',
'6': '6fps',
'8': '8fps',
'10': '10fps',
'12': '12fps',
'15': 'game',
'16': '16fps',
'24': 'film',
'25': 'pal',
'30': 'ntsc',
@ -2612,21 +2418,24 @@ def get_fps_for_current_context():
Returns:
Union[int, float]: FPS value.
"""
project_name = get_current_project_name()
folder_path = get_current_folder_path()
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path, fields={"attrib.fps"}
) or {}
fps = folder_entity.get("attrib", {}).get("fps")
task_entity = get_current_task_entity(fields={"attrib"})
fps = task_entity.get("attrib", {}).get("fps")
if not fps:
project_entity = ayon_api.get_project(
project_name, fields=["attrib.fps"]
project_name = get_current_project_name()
folder_path = get_current_folder_path()
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path, fields={"attrib.fps"}
) or {}
fps = project_entity.get("attrib", {}).get("fps")
fps = folder_entity.get("attrib", {}).get("fps")
if not fps:
fps = 25
project_entity = ayon_api.get_project(
project_name, fields=["attrib.fps"]
) or {}
fps = project_entity.get("attrib", {}).get("fps")
if not fps:
fps = 25
return convert_to_maya_fps(fps)
@ -4403,3 +4212,23 @@ def create_rig_animation_instance(
variant=namespace,
pre_create_data={"use_selection": True}
)
def get_node_index_under_parent(node: str) -> int:
"""Return the index of a DAG node under its parent.
Arguments:
node (str): A DAG Node path.
Returns:
int: The DAG node's index under its parents or world
"""
node = cmds.ls(node, long=True)[0] # enforce long names
parent = node.rsplit("|", 1)[0]
if not parent:
return cmds.ls(assemblies=True, long=True).index(node)
else:
return cmds.listRelatives(parent,
children=True,
fullPath=True).index(node)

View file

@ -720,7 +720,8 @@ class RenderProductsArnold(ARenderProducts):
# AOVs > Legacy > Maya Render View > Mode
aovs_enabled = bool(
self._get_attr("defaultArnoldRenderOptions.aovMode")
self._get_attr(
"defaultArnoldRenderOptions.aovMode", as_string=False)
)
if not aovs_enabled:
return beauty_products

View file

@ -30,9 +30,11 @@ from ayon_core.pipeline import (
register_loader_plugin_path,
register_inventory_action_path,
register_creator_plugin_path,
register_workfile_build_plugin_path,
deregister_loader_plugin_path,
deregister_inventory_action_path,
deregister_creator_plugin_path,
deregister_workfile_build_plugin_path,
AYON_CONTAINER_ID,
AVALON_CONTAINER_ID,
)
@ -47,7 +49,6 @@ from ayon_core.hosts.maya import MAYA_ROOT_DIR
from ayon_core.hosts.maya.lib import create_workspace_mel
from . import menu, lib
from .workfile_template_builder import MayaPlaceholderLoadPlugin
from .workio import (
open_file,
save_file,
@ -64,6 +65,7 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build")
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
@ -93,7 +95,7 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
register_inventory_action_path(INVENTORY_PATH)
self.log.info(PUBLISH_PATH)
register_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
self.log.info("Installing callbacks ... ")
register_event_callback("init", on_init)
@ -148,11 +150,6 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
def get_containers(self):
return ls()
def get_workfile_build_placeholder_plugins(self):
return [
MayaPlaceholderLoadPlugin
]
@contextlib.contextmanager
def maintained_selection(self):
with lib.maintained_selection():
@ -338,6 +335,7 @@ def uninstall():
deregister_loader_plugin_path(LOAD_PATH)
deregister_creator_plugin_path(CREATE_PATH)
deregister_inventory_action_path(INVENTORY_PATH)
deregister_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
menu.uninstall()

View file

@ -19,7 +19,7 @@ from .lib import pairwise
@contextlib.contextmanager
def _allow_export_from_render_setup_layer():
def allow_export_from_render_setup_layer():
"""Context manager to override Maya settings to allow RS layer export"""
try:
@ -102,7 +102,7 @@ def export_in_rs_layer(path, nodes, export=None):
cmds.disconnectAttr(src, dest)
# Export Selected
with _allow_export_from_render_setup_layer():
with allow_export_from_render_setup_layer():
cmds.select(nodes, noExpand=True)
if export:
export()

View file

@ -12,14 +12,13 @@ from ayon_core.pipeline.workfile.workfile_template_builder import (
TemplateAlreadyImported,
AbstractTemplateBuilder,
PlaceholderPlugin,
LoadPlaceholderItem,
PlaceholderLoadMixin,
PlaceholderItem,
)
from ayon_core.tools.workfile_template_build import (
WorkfileBuildPlaceholderDialog,
)
from .lib import read, imprint, get_reference_node, get_main_window
from .lib import read, imprint, get_main_window
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
@ -91,170 +90,102 @@ class MayaTemplateBuilder(AbstractTemplateBuilder):
return True
class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
identifier = "maya.load"
label = "Maya load"
class MayaPlaceholderPlugin(PlaceholderPlugin):
"""Base Placeholder Plugin for Maya with one unified cache.
def _collect_scene_placeholders(self):
# Cache placeholder data to shared data
placeholder_nodes = self.builder.get_shared_populate_data(
"placeholder_nodes"
)
if placeholder_nodes is None:
attributes = cmds.ls("*.plugin_identifier", long=True)
placeholder_nodes = {}
for attribute in attributes:
node_name = attribute.rpartition(".")[0]
placeholder_nodes[node_name] = (
self._parse_placeholder_node_data(node_name)
)
Creates a locator as placeholder node, which during populate provide
all of its attributes defined on the locator's transform in
`placeholder.data` and where `placeholder.scene_identifier` is the
full path to the node.
self.builder.set_shared_populate_data(
"placeholder_nodes", placeholder_nodes
)
return placeholder_nodes
Inherited classes must still implement `populate_placeholder`
def _parse_placeholder_node_data(self, node_name):
placeholder_data = read(node_name)
parent_name = (
cmds.getAttr(node_name + ".parent", asString=True)
or node_name.rpartition("|")[0]
or ""
)
if parent_name:
siblings = cmds.listRelatives(parent_name, children=True)
else:
siblings = cmds.ls(assemblies=True)
node_shortname = node_name.rpartition("|")[2]
current_index = cmds.getAttr(node_name + ".index", asString=True)
if current_index < 0:
current_index = siblings.index(node_shortname)
"""
placeholder_data.update({
"parent": parent_name,
"index": current_index
})
return placeholder_data
use_selection_as_parent = True
item_class = PlaceholderItem
def _create_placeholder_name(self, placeholder_data):
placeholder_name_parts = placeholder_data["builder_type"].split("_")
return self.identifier.replace(".", "_")
pos = 1
placeholder_product_type = placeholder_data.get("product_type")
if placeholder_product_type is None:
placeholder_product_type = placeholder_data.get("family")
if placeholder_product_type:
placeholder_name_parts.insert(pos, placeholder_product_type)
pos += 1
# add loader arguments if any
loader_args = placeholder_data["loader_args"]
if loader_args:
loader_args = json.loads(loader_args.replace('\'', '\"'))
values = [v for v in loader_args.values()]
for value in values:
placeholder_name_parts.insert(pos, value)
pos += 1
placeholder_name = "_".join(placeholder_name_parts)
return placeholder_name.capitalize()
def _get_loaded_repre_ids(self):
loaded_representation_ids = self.builder.get_shared_populate_data(
"loaded_representation_ids"
def _collect_scene_placeholders(self):
nodes_by_identifier = self.builder.get_shared_populate_data(
"placeholder_nodes"
)
if loaded_representation_ids is None:
try:
containers = cmds.sets("AVALON_CONTAINERS", q=True)
except ValueError:
containers = []
if nodes_by_identifier is None:
# Cache placeholder data to shared data
nodes = cmds.ls("*.plugin_identifier", long=True, objectsOnly=True)
loaded_representation_ids = {
cmds.getAttr(container + ".representation")
for container in containers
}
nodes_by_identifier = {}
for node in nodes:
identifier = cmds.getAttr("{}.plugin_identifier".format(node))
nodes_by_identifier.setdefault(identifier, []).append(node)
# Set the cache
self.builder.set_shared_populate_data(
"loaded_representation_ids", loaded_representation_ids
"placeholder_nodes", nodes_by_identifier
)
return loaded_representation_ids
return nodes_by_identifier
def create_placeholder(self, placeholder_data):
selection = cmds.ls(selection=True)
if len(selection) > 1:
raise ValueError("More then one item are selected")
parent = selection[0] if selection else None
parent = None
if self.use_selection_as_parent:
selection = cmds.ls(selection=True)
if len(selection) > 1:
raise ValueError(
"More than one node is selected. "
"Please select only one to define the parent."
)
parent = selection[0] if selection else None
placeholder_data["plugin_identifier"] = self.identifier
placeholder_name = self._create_placeholder_name(placeholder_data)
placeholder = cmds.spaceLocator(name=placeholder_name)[0]
if parent:
placeholder = cmds.parent(placeholder, selection[0])[0]
imprint(placeholder, placeholder_data)
# Add helper attributes to keep placeholder info
cmds.addAttr(
placeholder,
longName="parent",
hidden=True,
dataType="string"
)
cmds.addAttr(
placeholder,
longName="index",
hidden=True,
attributeType="short",
defaultValue=-1
)
cmds.setAttr(placeholder + ".parent", "", type="string")
self.imprint(placeholder, placeholder_data)
def update_placeholder(self, placeholder_item, placeholder_data):
node_name = placeholder_item.scene_identifier
new_values = {}
changed_values = {}
for key, value in placeholder_data.items():
placeholder_value = placeholder_item.data.get(key)
if value != placeholder_value:
new_values[key] = value
placeholder_item.data[key] = value
if value != placeholder_item.data.get(key):
changed_values[key] = value
for key in new_values.keys():
cmds.deleteAttr(node_name + "." + key)
# Delete attributes to ensure we imprint new data with correct type
for key in changed_values.keys():
placeholder_item.data[key] = value
if cmds.attributeQuery(key, node=node_name, exists=True):
attribute = "{}.{}".format(node_name, key)
cmds.deleteAttr(attribute)
imprint(node_name, new_values)
self.imprint(node_name, changed_values)
def collect_placeholders(self):
output = []
scene_placeholders = self._collect_scene_placeholders()
for node_name, placeholder_data in scene_placeholders.items():
if placeholder_data.get("plugin_identifier") != self.identifier:
continue
placeholders = []
nodes_by_identifier = self._collect_scene_placeholders()
for node in nodes_by_identifier.get(self.identifier, []):
# TODO do data validations and maybe upgrades if they are invalid
output.append(
LoadPlaceholderItem(node_name, placeholder_data, self)
placeholder_data = self.read(node)
placeholders.append(
self.item_class(scene_identifier=node,
data=placeholder_data,
plugin=self)
)
return output
def populate_placeholder(self, placeholder):
self.populate_load_placeholder(placeholder)
def repopulate_placeholder(self, placeholder):
repre_ids = self._get_loaded_repre_ids()
self.populate_load_placeholder(placeholder, repre_ids)
def get_placeholder_options(self, options=None):
return self.get_load_plugin_options(options)
return placeholders
def post_placeholder_process(self, placeholder, failed):
"""Cleanup placeholder after load of its corresponding representations.
Hide placeholder, add them to placeholder set.
Used only by PlaceholderCreateMixin and PlaceholderLoadMixin
Args:
placeholder (PlaceholderItem): Item which was just used to load
representation.
@ -263,81 +194,56 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
# Hide placeholder and add them to placeholder set
node = placeholder.scene_identifier
# If we just populate the placeholders from current scene, the
# placeholder set will not be created so account for that.
if not cmds.objExists(PLACEHOLDER_SET):
cmds.sets(name=PLACEHOLDER_SET, empty=True)
cmds.sets(node, addElement=PLACEHOLDER_SET)
cmds.hide(node)
cmds.setAttr(node + ".hiddenInOutliner", True)
cmds.setAttr("{}.hiddenInOutliner".format(node), True)
def delete_placeholder(self, placeholder):
"""Remove placeholder if building was successful"""
cmds.delete(placeholder.scene_identifier)
"""Remove placeholder if building was successful
def load_succeed(self, placeholder, container):
self._parent_in_hierarchy(placeholder, container)
def _parent_in_hierarchy(self, placeholder, container):
"""Parent loaded container to placeholder's parent.
ie : Set loaded content as placeholder's sibling
Args:
container (str): Placeholder loaded containers
Used only by PlaceholderCreateMixin and PlaceholderLoadMixin.
"""
node = placeholder.scene_identifier
if not container:
return
# To avoid that deleting a placeholder node will have Maya delete
# any objectSets the node was a member of we will first remove it
# from any sets it was a member of. This way the `PLACEHOLDERS_SET`
# will survive long enough
sets = cmds.listSets(o=node) or []
for object_set in sets:
cmds.sets(node, remove=object_set)
roots = cmds.sets(container, q=True) or []
ref_node = None
try:
ref_node = get_reference_node(roots)
except AssertionError as e:
self.log.info(e.args[0])
cmds.delete(node)
nodes_to_parent = []
for root in roots:
if ref_node:
ref_root = cmds.referenceQuery(root, nodes=True)[0]
ref_root = (
cmds.listRelatives(ref_root, parent=True, path=True) or
[ref_root]
)
nodes_to_parent.extend(ref_root)
continue
if root.endswith("_RN"):
# Backwards compatibility for hardcoded reference names.
refRoot = cmds.referenceQuery(root, n=True)[0]
refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot]
nodes_to_parent.extend(refRoot)
elif root not in cmds.listSets(allSets=True):
nodes_to_parent.append(root)
def imprint(self, node, data):
"""Imprint call for placeholder node"""
elif not cmds.sets(root, q=True):
return
# Complicated data that can't be represented as flat maya attributes
# we write to json strings, e.g. multiselection EnumDef
for key, value in data.items():
if isinstance(value, (list, tuple, dict)):
data[key] = "JSON::{}".format(json.dumps(value))
# Move loaded nodes to correct index in outliner hierarchy
placeholder_form = cmds.xform(
placeholder.scene_identifier,
q=True,
matrix=True,
worldSpace=True
)
scene_parent = cmds.listRelatives(
placeholder.scene_identifier, parent=True, fullPath=True
)
for node in set(nodes_to_parent):
cmds.reorder(node, front=True)
cmds.reorder(node, relative=placeholder.data["index"])
cmds.xform(node, matrix=placeholder_form, ws=True)
if scene_parent:
cmds.parent(node, scene_parent)
else:
cmds.parent(node, world=True)
imprint(node, data)
holding_sets = cmds.listSets(object=placeholder.scene_identifier)
if not holding_sets:
return
for holding_set in holding_sets:
cmds.sets(roots, forceElement=holding_set)
def read(self, node):
"""Read call for placeholder node"""
data = read(node)
# Complicated data that can't be represented as flat maya attributes
# we read from json strings, e.g. multiselection EnumDef
for key, value in data.items():
if isinstance(value, str) and value.startswith("JSON::"):
value = value[len("JSON::"):] # strip of JSON:: prefix
data[key] = json.loads(value)
return data
def build_workfile_template(*args):

View file

@ -0,0 +1,101 @@
from typing import List
from maya import cmds
def get_yeti_user_variables(yeti_shape_node: str) -> List[str]:
"""Get user defined yeti user variables for a `pgYetiMaya` shape node.
Arguments:
yeti_shape_node (str): The `pgYetiMaya` shape node.
Returns:
list: Attribute names (for a vector attribute it only lists the top
parent attribute, not the attribute per axis)
"""
attrs = cmds.listAttr(yeti_shape_node,
userDefined=True,
string=("yetiVariableV_*",
"yetiVariableF_*")) or []
valid_attrs = []
for attr in attrs:
attr_type = cmds.attributeQuery(attr, node=yeti_shape_node,
attributeType=True)
if attr.startswith("yetiVariableV_") and attr_type == "double3":
# vector
valid_attrs.append(attr)
elif attr.startswith("yetiVariableF_") and attr_type == "double":
valid_attrs.append(attr)
return valid_attrs
def create_yeti_variable(yeti_shape_node: str,
attr_name: str,
value=None,
force_value: bool = False) -> bool:
"""Get user defined yeti user variables for a `pgYetiMaya` shape node.
Arguments:
yeti_shape_node (str): The `pgYetiMaya` shape node.
attr_name (str): The fully qualified yeti variable name, e.g.
"yetiVariableF_myfloat" or "yetiVariableV_myvector"
value (object): The value to set (must match the type of the attribute)
When value is None it will ignored and not be set.
force_value (bool): Whether to set the value if the attribute already
exists or not.
Returns:
bool: Whether the attribute value was set or not.
"""
exists = cmds.attributeQuery(attr_name, node=yeti_shape_node, exists=True)
if not exists:
if attr_name.startswith("yetiVariableV_"):
_create_vector_yeti_user_variable(yeti_shape_node, attr_name)
if attr_name.startswith("yetiVariableF_"):
_create_float_yeti_user_variable(yeti_shape_node, attr_name)
if value is not None and (not exists or force_value):
plug = "{}.{}".format(yeti_shape_node, attr_name)
if (
isinstance(value, (list, tuple))
and attr_name.startswith("yetiVariableV_")
):
cmds.setAttr(plug, *value, type="double3")
else:
cmds.setAttr(plug, value)
return True
return False
def _create_vector_yeti_user_variable(yeti_shape_node: str, attr_name: str):
if not attr_name.startswith("yetiVariableV_"):
raise ValueError("Must start with yetiVariableV_")
cmds.addAttr(yeti_shape_node,
longName=attr_name,
attributeType="double3",
cachedInternally=True,
keyable=True)
for axis in "XYZ":
cmds.addAttr(yeti_shape_node,
longName="{}{}".format(attr_name, axis),
attributeType="double",
parent=attr_name,
cachedInternally=True,
keyable=True)
def _create_float_yeti_user_variable(yeti_node: str, attr_name: str):
if not attr_name.startswith("yetiVariableF_"):
raise ValueError("Must start with yetiVariableF_")
cmds.addAttr(yeti_node,
longName=attr_name,
attributeType="double",
cachedInternally=True,
softMinValue=0,
softMaxValue=100,
keyable=True)

View file

@ -1,89 +0,0 @@
from ayon_core.hosts.maya.api import (
lib,
plugin
)
from ayon_core.lib import (
BoolDef,
TextDef
)
class CreateAnimation(plugin.MayaHiddenCreator):
"""Animation output for character rigs
We hide the animation creator from the UI since the creation of it is
automated upon loading a rig. There's an inventory action to recreate it
for loaded rigs if by chance someone deleted the animation instance.
"""
identifier = "io.openpype.creators.maya.animation"
name = "animationDefault"
label = "Animation"
product_type = "animation"
icon = "male"
write_color_sets = False
write_face_sets = False
include_parent_hierarchy = False
include_user_defined_attributes = False
def get_instance_attr_defs(self):
defs = lib.collect_animation_defs()
defs.extend([
BoolDef("writeColorSets",
label="Write vertex colors",
tooltip="Write vertex colors with the geometry",
default=self.write_color_sets),
BoolDef("writeFaceSets",
label="Write face sets",
tooltip="Write face sets with the geometry",
default=self.write_face_sets),
BoolDef("writeNormals",
label="Write normals",
tooltip="Write normals with the deforming geometry",
default=True),
BoolDef("renderableOnly",
label="Renderable Only",
tooltip="Only export renderable visible shapes",
default=False),
BoolDef("visibleOnly",
label="Visible Only",
tooltip="Only export dag objects visible during "
"frame range",
default=False),
BoolDef("includeParentHierarchy",
label="Include Parent Hierarchy",
tooltip="Whether to include parent hierarchy of nodes in "
"the publish instance",
default=self.include_parent_hierarchy),
BoolDef("worldSpace",
label="World-Space Export",
default=True),
BoolDef("includeUserDefinedAttributes",
label="Include User Defined Attributes",
default=self.include_user_defined_attributes),
TextDef("attr",
label="Custom Attributes",
default="",
placeholder="attr1, attr2"),
TextDef("attrPrefix",
label="Custom Attributes Prefix",
placeholder="prefix1, prefix2")
])
# TODO: Implement these on a Deadline plug-in instead?
"""
# Default to not send to farm.
self.data["farm"] = False
self.data["priority"] = 50
"""
return defs
def apply_settings(self, project_settings):
super(CreateAnimation, self).apply_settings(project_settings)
# Hardcoding creator to be enabled due to existing settings would
# disable the creator causing the creator plugin to not be
# discoverable.
self.enabled = True

View file

@ -0,0 +1,138 @@
from maya import cmds
from ayon_core.hosts.maya.api import lib, plugin
from ayon_core.lib import (
BoolDef,
NumberDef,
)
def _get_animation_attr_defs(cls):
"""Get Animation generic definitions."""
defs = lib.collect_animation_defs()
defs.extend(
[
BoolDef("farm", label="Submit to Farm"),
NumberDef("priority", label="Farm job Priority", default=50),
BoolDef("refresh", label="Refresh viewport during export"),
BoolDef(
"includeParentHierarchy",
label="Include Parent Hierarchy",
tooltip=(
"Whether to include parent hierarchy of nodes in the "
"publish instance."
)
),
BoolDef(
"includeUserDefinedAttributes",
label="Include User Defined Attributes",
tooltip=(
"Whether to include all custom maya attributes found "
"on nodes as attributes in the Alembic data."
)
),
]
)
return defs
def convert_legacy_alembic_creator_attributes(node_data, class_name):
"""This is a legacy transfer of creator attributes to publish attributes
for ExtractAlembic/ExtractAnimation plugin.
"""
publish_attributes = node_data["publish_attributes"]
if class_name in publish_attributes:
return node_data
attributes = [
"attr",
"attrPrefix",
"visibleOnly",
"writeColorSets",
"writeFaceSets",
"writeNormals",
"renderableOnly",
"visibleOnly",
"worldSpace",
"renderableOnly"
]
plugin_attributes = {}
for attr in attributes:
if attr not in node_data["creator_attributes"]:
continue
value = node_data["creator_attributes"].pop(attr)
plugin_attributes[attr] = value
publish_attributes[class_name] = plugin_attributes
return node_data
class CreateAnimation(plugin.MayaHiddenCreator):
"""Animation output for character rigs
We hide the animation creator from the UI since the creation of it is
automated upon loading a rig. There's an inventory action to recreate it
for loaded rigs if by chance someone deleted the animation instance.
"""
identifier = "io.openpype.creators.maya.animation"
name = "animationDefault"
label = "Animation"
product_type = "animation"
icon = "male"
write_color_sets = False
write_face_sets = False
include_parent_hierarchy = False
include_user_defined_attributes = False
def read_instance_node(self, node):
node_data = super(CreateAnimation, self).read_instance_node(node)
node_data = convert_legacy_alembic_creator_attributes(
node_data, "ExtractAnimation"
)
return node_data
def get_instance_attr_defs(self):
defs = super(CreateAnimation, self).get_instance_attr_defs()
defs += _get_animation_attr_defs(self)
return defs
class CreatePointCache(plugin.MayaCreator):
"""Alembic pointcache for animated data"""
identifier = "io.openpype.creators.maya.pointcache"
label = "Pointcache"
product_type = "pointcache"
icon = "gears"
write_color_sets = False
write_face_sets = False
include_user_defined_attributes = False
def read_instance_node(self, node):
node_data = super(CreatePointCache, self).read_instance_node(node)
node_data = convert_legacy_alembic_creator_attributes(
node_data, "ExtractAlembic"
)
return node_data
def get_instance_attr_defs(self):
defs = super(CreatePointCache, self).get_instance_attr_defs()
defs += _get_animation_attr_defs(self)
return defs
def create(self, product_name, instance_data, pre_create_data):
instance = super(CreatePointCache, self).create(
product_name, instance_data, pre_create_data
)
instance_node = instance.get("instance_node")
# For Arnold standin proxy
proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True)
cmds.sets(proxy_set, forceElement=instance_node)

View file

@ -1,3 +1,5 @@
from maya import cmds
from ayon_core.hosts.maya.api import (
lib,
plugin
@ -87,16 +89,24 @@ class CreateArnoldSceneSource(plugin.MayaCreator):
return defs
class CreateArnoldSceneSourceProxy(CreateArnoldSceneSource):
"""Arnold Scene Source Proxy
This product type facilitates working with proxy geometry in the viewport.
"""
identifier = "io.openpype.creators.maya.assproxy"
label = "Arnold Scene Source Proxy"
product_type = "assProxy"
icon = "cube"
def create(self, product_name, instance_data, pre_create_data):
from maya import cmds
instance = super(CreateArnoldSceneSource, self).create(
product_name, instance_data, pre_create_data
)
instance_node = instance.get("instance_node")
content = cmds.sets(name=instance_node + "_content_SET", empty=True)
proxy = cmds.sets(name=instance_node + "_proxy_SET", empty=True)
cmds.sets([content, proxy], forceElement=instance_node)
cmds.sets([proxy], forceElement=instance_node)

View file

@ -1,88 +0,0 @@
from maya import cmds
from ayon_core.hosts.maya.api import (
lib,
plugin
)
from ayon_core.lib import (
BoolDef,
TextDef
)
class CreatePointCache(plugin.MayaCreator):
"""Alembic pointcache for animated data"""
identifier = "io.openpype.creators.maya.pointcache"
label = "Pointcache"
product_type = "pointcache"
icon = "gears"
write_color_sets = False
write_face_sets = False
include_user_defined_attributes = False
def get_instance_attr_defs(self):
defs = lib.collect_animation_defs()
defs.extend([
BoolDef("writeColorSets",
label="Write vertex colors",
tooltip="Write vertex colors with the geometry",
default=False),
BoolDef("writeFaceSets",
label="Write face sets",
tooltip="Write face sets with the geometry",
default=False),
BoolDef("renderableOnly",
label="Renderable Only",
tooltip="Only export renderable visible shapes",
default=False),
BoolDef("visibleOnly",
label="Visible Only",
tooltip="Only export dag objects visible during "
"frame range",
default=False),
BoolDef("includeParentHierarchy",
label="Include Parent Hierarchy",
tooltip="Whether to include parent hierarchy of nodes in "
"the publish instance",
default=False),
BoolDef("worldSpace",
label="World-Space Export",
default=True),
BoolDef("refresh",
label="Refresh viewport during export",
default=False),
BoolDef("includeUserDefinedAttributes",
label="Include User Defined Attributes",
default=self.include_user_defined_attributes),
TextDef("attr",
label="Custom Attributes",
default="",
placeholder="attr1, attr2"),
TextDef("attrPrefix",
label="Custom Attributes Prefix",
default="",
placeholder="prefix1, prefix2")
])
# TODO: Implement these on a Deadline plug-in instead?
"""
# Default to not send to farm.
self.data["farm"] = False
self.data["priority"] = 50
"""
return defs
def create(self, product_name, instance_data, pre_create_data):
instance = super(CreatePointCache, self).create(
product_name, instance_data, pre_create_data
)
instance_node = instance.get("instance_node")
# For Arnold standin proxy
proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True)
cmds.sets(proxy_set, forceElement=instance_node)

View file

@ -37,7 +37,7 @@ class ConnectGeometry(InventoryAction):
repre_id = container["representation"]
repre_context = repre_contexts_by_id[repre_id]
product_type = repre_context["prouct"]["productType"]
product_type = repre_context["product"]["productType"]
containers_by_product_type.setdefault(product_type, [])
containers_by_product_type[product_type].append(container)

View file

@ -36,7 +36,7 @@ class ConnectXgen(InventoryAction):
repre_id = container["representation"]
repre_context = repre_contexts_by_id[repre_id]
product_type = repre_context["prouct"]["productType"]
product_type = repre_context["product"]["productType"]
containers_by_product_type.setdefault(product_type, [])
containers_by_product_type[product_type].append(container)

View file

@ -39,7 +39,7 @@ class ConnectYetiRig(InventoryAction):
repre_id = container["representation"]
repre_context = repre_contexts_by_id[repre_id]
product_type = repre_context["prouct"]["productType"]
product_type = repre_context["product"]["productType"]
containers_by_product_type.setdefault(product_type, [])
containers_by_product_type[product_type].append(container)

View file

@ -12,6 +12,7 @@ from ayon_core.hosts.maya.api.lib import (
unique_namespace,
get_attribute_input,
maintained_selection,
get_fps_for_current_context
)
from ayon_core.hosts.maya.api.pipeline import containerise
from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type
@ -29,7 +30,13 @@ class ArnoldStandinLoader(load.LoaderPlugin):
"""Load as Arnold standin"""
product_types = {
"ass", "animation", "model", "proxyAbc", "pointcache", "usd"
"ass",
"assProxy",
"animation",
"model",
"proxyAbc",
"pointcache",
"usd"
}
representations = {"ass", "abc", "usda", "usdc", "usd"}
@ -95,8 +102,10 @@ class ArnoldStandinLoader(load.LoaderPlugin):
sequence = is_sequence(os.listdir(os.path.dirname(repre_path)))
cmds.setAttr(standin_shape + ".useFrameExtension", sequence)
fps = float(version_attributes.get("fps")) or 25
cmds.setAttr(standin_shape + ".abcFPS", fps)
fps = (
version_attributes.get("fps") or get_fps_for_current_context()
)
cmds.setAttr(standin_shape + ".abcFPS", float(fps))
nodes = [root, standin, standin_shape]
if operator is not None:
@ -128,6 +137,18 @@ class ArnoldStandinLoader(load.LoaderPlugin):
proxy_path = "/".join([os.path.dirname(path), proxy_basename])
return proxy_basename, proxy_path
def _update_operators(self, string_replace_operator, proxy_basename, path):
cmds.setAttr(
string_replace_operator + ".match",
proxy_basename.split(".")[0],
type="string"
)
cmds.setAttr(
string_replace_operator + ".replace",
os.path.basename(path).split(".")[0],
type="string"
)
def _setup_proxy(self, shape, path, namespace):
proxy_basename, proxy_path = self._get_proxy_path(path)
@ -150,16 +171,7 @@ class ArnoldStandinLoader(load.LoaderPlugin):
"*.(@node=='{}')".format(node_type),
type="string"
)
cmds.setAttr(
string_replace_operator + ".match",
proxy_basename,
type="string"
)
cmds.setAttr(
string_replace_operator + ".replace",
os.path.basename(path),
type="string"
)
self._update_operators(string_replace_operator, proxy_basename, path)
cmds.connectAttr(
string_replace_operator + ".out",
@ -194,18 +206,9 @@ class ArnoldStandinLoader(load.LoaderPlugin):
path = get_representation_path(repre_entity)
proxy_basename, proxy_path = self._get_proxy_path(path)
# Whether there is proxy or so, we still update the string operator.
# Whether there is proxy or not, we still update the string operator.
# If no proxy exists, the string operator won't replace anything.
cmds.setAttr(
string_replace_operator + ".match",
proxy_basename,
type="string"
)
cmds.setAttr(
string_replace_operator + ".replace",
os.path.basename(path),
type="string"
)
self._update_operators(string_replace_operator, proxy_basename, path)
dso_path = path
if os.path.exists(proxy_path):

View file

@ -0,0 +1,39 @@
from ayon_core.lib import (
BoolDef
)
from ayon_core.pipeline import (
load,
registered_host
)
from ayon_core.hosts.maya.api.workfile_template_builder import (
MayaTemplateBuilder
)
class LoadAsTemplate(load.LoaderPlugin):
"""Load workfile as a template """
product_types = {"workfile", "mayaScene"}
label = "Load as template"
representations = ["ma", "mb"]
icon = "wrench"
color = "#775555"
order = 10
options = [
BoolDef("keep_placeholders",
label="Keep Placeholders",
default=False),
BoolDef("create_first_version",
label="Create First Version",
default=False),
]
def load(self, context, name, namespace, data):
keep_placeholders = data.get("keep_placeholders", False)
create_first_version = data.get("create_first_version", False)
path = self.filepath_from_context(context)
builder = MayaTemplateBuilder(registered_host())
builder.build_template(template_path=path,
keep_placeholders=keep_placeholders,
create_first_version=create_first_version)

View file

@ -8,7 +8,7 @@ from ayon_core.pipeline import (
from ayon_core.pipeline.load.utils import get_representation_path_from_context
from ayon_core.pipeline.colorspace import (
get_imageio_file_rules_colorspace_from_filepath,
get_imageio_config,
get_current_context_imageio_config_preset,
get_imageio_file_rules
)
from ayon_core.settings import get_project_settings
@ -270,8 +270,7 @@ class FileNodeLoader(load.LoaderPlugin):
host_name = get_current_host_name()
project_settings = get_project_settings(project_name)
config_data = get_imageio_config(
project_name, host_name,
config_data = get_current_context_imageio_config_preset(
project_settings=project_settings
)

View file

@ -12,6 +12,7 @@ from ayon_core.pipeline import (
get_representation_path
)
from ayon_core.hosts.maya.api import lib
from ayon_core.hosts.maya.api.yeti import create_yeti_variable
from ayon_core.hosts.maya.api.pipeline import containerise
from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type
@ -23,8 +24,19 @@ SKIP_UPDATE_ATTRS = {
"viewportDensity",
"viewportWidth",
"viewportLength",
"renderDensity",
"renderWidth",
"renderLength",
"increaseRenderBounds"
}
SKIP_ATTR_MESSAGE = (
"Skipping updating %s.%s to %s because it "
"is considered a local overridable attribute. "
"Either set manually or the load the cache "
"anew."
)
def set_attribute(node, attr, value):
"""Wrapper of set attribute which ignores None values"""
@ -209,9 +221,31 @@ class YetiCacheLoader(load.LoaderPlugin):
for attr, value in node_settings["attrs"].items():
if attr in SKIP_UPDATE_ATTRS:
self.log.info(
SKIP_ATTR_MESSAGE, yeti_node, attr, value
)
continue
set_attribute(attr, value, yeti_node)
# Set up user defined attributes
user_variables = node_settings.get("user_variables", {})
for attr, value in user_variables.items():
was_value_set = create_yeti_variable(
yeti_shape_node=yeti_node,
attr_name=attr,
value=value,
# We do not want to update the
# value if it already exists so
# that any local overrides that
# may have been applied still
# persist
force_value=False
)
if not was_value_set:
self.log.info(
SKIP_ATTR_MESSAGE, yeti_node, attr, value
)
cmds.setAttr("{}.representation".format(container_node),
repre_entity["id"],
typ="string")
@ -332,6 +366,13 @@ class YetiCacheLoader(load.LoaderPlugin):
for attr, value in attributes.items():
set_attribute(attr, value, yeti_node)
# Set up user defined attributes
user_variables = node_settings.get("user_variables", {})
for attr, value in user_variables.items():
create_yeti_variable(yeti_shape_node=yeti_node,
attr_name=attr,
value=value)
# Connect to the time node
cmds.connectAttr("time1.outTime", "%s.currentTime" % yeti_node)

View file

@ -1,8 +1,13 @@
from typing import List
import maya.cmds as cmds
from ayon_core.hosts.maya.api import plugin
from ayon_core.hosts.maya.api import lib
from ayon_core.pipeline import registered_host
from ayon_core.pipeline.create import CreateContext
class YetiRigLoader(plugin.ReferenceLoader):
"""This loader will load Yeti rig."""
@ -15,6 +20,9 @@ class YetiRigLoader(plugin.ReferenceLoader):
icon = "code-fork"
color = "orange"
# From settings
create_cache_instance_on_load = True
def process_reference(
self, context, name=None, namespace=None, options=None
):
@ -49,4 +57,41 @@ class YetiRigLoader(plugin.ReferenceLoader):
)
self[:] = nodes
if self.create_cache_instance_on_load:
# Automatically create in instance to allow publishing the loaded
# yeti rig into a yeti cache
self._create_yeti_cache_instance(nodes, variant=namespace)
return nodes
def _create_yeti_cache_instance(self, nodes: List[str], variant: str):
"""Create a yeticache product type instance to publish the output.
This is similar to how loading animation rig will automatically create
an animation instance for publishing any loaded character rigs, but
then for yeti rigs.
Args:
nodes (List[str]): Nodes generated on load.
variant (str): Variant for the yeti cache instance to create.
"""
# Find the roots amongst the loaded nodes
yeti_nodes = cmds.ls(nodes, type="pgYetiMaya", long=True)
assert yeti_nodes, "No pgYetiMaya nodes in rig, this is a bug."
self.log.info("Creating variant: {}".format(variant))
creator_identifier = "io.openpype.creators.maya.yeticache"
host = registered_host()
create_context = CreateContext(host)
with lib.maintained_selection():
cmds.select(yeti_nodes, noExpand=True)
create_context.create(
creator_identifier=creator_identifier,
variant=variant,
pre_create_data={"use_selection": True}
)

View file

@ -58,4 +58,3 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin):
if instance.data.get("farm"):
instance.data["families"].append("publish.farm")

View file

@ -10,21 +10,23 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
# Offset to be after renderable camera collection.
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Arnold Scene Source"
families = ["ass"]
families = ["ass", "assProxy"]
def process(self, instance):
objsets = instance.data["setMembers"]
instance.data["members"] = []
for set_member in instance.data["setMembers"]:
if cmds.nodeType(set_member) != "objectSet":
instance.data["members"].extend(self.get_hierarchy(set_member))
continue
for objset in objsets:
objset = str(objset)
members = cmds.sets(objset, query=True)
members = cmds.sets(set_member, query=True)
members = cmds.ls(members, long=True)
if members is None:
self.log.warning("Skipped empty instance: \"%s\" " % objset)
self.log.warning(
"Skipped empty instance: \"%s\" " % set_member
)
continue
if objset.endswith("content_SET"):
instance.data["contentMembers"] = self.get_hierarchy(members)
if objset.endswith("proxy_SET"):
if set_member.endswith("proxy_SET"):
instance.data["proxy"] = self.get_hierarchy(members)
# Use camera in object set if present else default to render globals
@ -33,7 +35,7 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)]
if renderable:
camera = renderable[0]
for node in instance.data["contentMembers"]:
for node in instance.data["members"]:
camera_shapes = cmds.listRelatives(
node, shapes=True, type="camera"
)
@ -46,18 +48,11 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
self.log.debug("data: {}".format(instance.data))
def get_hierarchy(self, nodes):
"""Return nodes with all their children.
Arguments:
nodes (List[str]): List of nodes to collect children hierarchy for
Returns:
list: Input nodes with their children hierarchy
"""
"""Return nodes with all their children"""
nodes = cmds.ls(nodes, long=True)
if not nodes:
return []
children = get_all_children(nodes, ignore_intermediate_objects=True)
return list(children.union(nodes))
children = get_all_children(nodes)
# Make sure nodes merged with children only
# contains unique entries
return list(set(nodes + list(children)))

View file

@ -12,7 +12,7 @@ class CollectFileDependencies(pyblish.api.ContextPlugin):
families = ["renderlayer"]
@classmethod
def apply_settings(cls, project_settings, system_settings):
def apply_settings(cls, project_settings):
# Disable plug-in if not used for deadline submission anyway
settings = project_settings["deadline"]["publish"]["MayaSubmitDeadline"] # noqa
cls.enabled = settings.get("asset_dependencies", True)

View file

@ -14,7 +14,9 @@ class CollectUserDefinedAttributes(pyblish.api.InstancePlugin):
def process(self, instance):
# Collect user defined attributes.
if not instance.data.get("includeUserDefinedAttributes", False):
if not instance.data["creator_attributes"].get(
"includeUserDefinedAttributes"
):
return
if "out_hierarchy" in instance.data:

View file

@ -3,6 +3,7 @@ from maya import cmds
import pyblish.api
from ayon_core.hosts.maya.api import lib
from ayon_core.hosts.maya.api.yeti import get_yeti_user_variables
SETTINGS = {
@ -34,7 +35,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
- "increaseRenderBounds"
- "imageSearchPath"
Other information is the name of the transform and it's Colorbleed ID
Other information is the name of the transform and its `cbId`
"""
order = pyblish.api.CollectorOrder + 0.45
@ -54,6 +55,16 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
# Get specific node attributes
attr_data = {}
for attr in SETTINGS:
# Ignore non-existing attributes with a warning, e.g. cbId
# if they have not been generated yet
if not cmds.attributeQuery(attr, node=shape, exists=True):
self.log.warning(
"Attribute '{}' not found on Yeti node: {}".format(
attr, shape
)
)
continue
current = cmds.getAttr("%s.%s" % (shape, attr))
# change None to empty string as Maya doesn't support
# NoneType in attributes
@ -61,6 +72,12 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
current = ""
attr_data[attr] = current
# Get user variable attributes
user_variable_attrs = {
attr: lib.get_attribute("{}.{}".format(shape, attr))
for attr in get_yeti_user_variables(shape)
}
# Get transform data
parent = cmds.listRelatives(shape, parent=True)[0]
transform_data = {"name": parent, "cbId": lib.get_id(parent)}
@ -70,6 +87,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
"name": shape,
"cbId": lib.get_id(shape),
"attrs": attr_data,
"user_variables": user_variable_attrs
}
settings["nodes"].append(shape_data)

View file

@ -17,8 +17,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
families = ["ass"]
asciiAss = False
def process(self, instance):
staging_dir = self.staging_dir(instance)
def _pre_process(self, instance, staging_dir):
file_path = os.path.join(staging_dir, "{}.ass".format(instance.name))
# Mask
@ -70,24 +69,38 @@ class ExtractArnoldSceneSource(publish.Extractor):
"mask": mask
}
filenames, nodes_by_id = self._extract(
instance.data["contentMembers"], attribute_data, kwargs
)
if "representations" not in instance.data:
instance.data["representations"] = []
return attribute_data, kwargs
def process(self, instance):
staging_dir = self.staging_dir(instance)
attribute_data, kwargs = self._pre_process(instance, staging_dir)
filenames = self._extract(
instance.data["members"], attribute_data, kwargs
)
self._post_process(
instance, filenames, staging_dir, kwargs["startFrame"]
)
def _post_process(self, instance, filenames, staging_dir, frame_start):
nodes_by_id = self._nodes_by_id(instance[:])
representation = {
"name": "ass",
"ext": "ass",
"files": filenames if len(filenames) > 1 else filenames[0],
"stagingDir": staging_dir,
"frameStart": kwargs["startFrame"]
"frameStart": frame_start
}
instance.data["representations"].append(representation)
json_path = os.path.join(staging_dir, "{}.json".format(instance.name))
json_path = os.path.join(
staging_dir, "{}.json".format(instance.name)
)
with open(json_path, "w") as f:
json.dump(nodes_by_id, f)
@ -104,13 +117,68 @@ class ExtractArnoldSceneSource(publish.Extractor):
"Extracted instance {} to: {}".format(instance.name, staging_dir)
)
# Extract proxy.
if not instance.data.get("proxy", []):
return
def _nodes_by_id(self, nodes):
nodes_by_id = defaultdict(list)
kwargs["filename"] = file_path.replace(".ass", "_proxy.ass")
for node in nodes:
id = lib.get_id(node)
filenames, _ = self._extract(
if id is None:
continue
# Converting Maya hierarchy separator "|" to Arnold separator "/".
nodes_by_id[id].append(node.replace("|", "/"))
return nodes_by_id
def _extract(self, nodes, attribute_data, kwargs):
filenames = []
with lib.attribute_values(attribute_data):
with lib.maintained_selection():
self.log.debug(
"Writing: {}".format(nodes)
)
cmds.select(nodes, noExpand=True)
self.log.debug(
"Extracting ass sequence with: {}".format(kwargs)
)
exported_files = cmds.arnoldExportAss(**kwargs)
for file in exported_files:
filenames.append(os.path.split(file)[1])
self.log.debug("Exported: {}".format(filenames))
return filenames
class ExtractArnoldSceneSourceProxy(ExtractArnoldSceneSource):
"""Extract the content of the instance to an Arnold Scene Source file."""
label = "Extract Arnold Scene Source Proxy"
hosts = ["maya"]
families = ["assProxy"]
asciiAss = True
def process(self, instance):
staging_dir = self.staging_dir(instance)
attribute_data, kwargs = self._pre_process(instance, staging_dir)
filenames, _ = self._duplicate_extract(
instance.data["members"], attribute_data, kwargs
)
self._post_process(
instance, filenames, staging_dir, kwargs["startFrame"]
)
kwargs["filename"] = os.path.join(
staging_dir, "{}_proxy.ass".format(instance.name)
)
filenames, _ = self._duplicate_extract(
instance.data["proxy"], attribute_data, kwargs
)
@ -125,12 +193,11 @@ class ExtractArnoldSceneSource(publish.Extractor):
instance.data["representations"].append(representation)
def _extract(self, nodes, attribute_data, kwargs):
def _duplicate_extract(self, nodes, attribute_data, kwargs):
self.log.debug(
"Writing {} with:\n{}".format(kwargs["filename"], kwargs)
)
filenames = []
nodes_by_id = defaultdict(list)
# Duplicating nodes so they are direct children of the world. This
# makes the hierarchy of any exported ass file the same.
with lib.delete_after() as delete_bin:
@ -147,7 +214,9 @@ class ExtractArnoldSceneSource(publish.Extractor):
if not shapes:
continue
duplicate_transform = cmds.duplicate(node)[0]
basename = cmds.duplicate(node)[0]
parents = cmds.ls(node, long=True)[0].split("|")[:-1]
duplicate_transform = "|".join(parents + [basename])
if cmds.listRelatives(duplicate_transform, parent=True):
duplicate_transform = cmds.parent(
@ -172,28 +241,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
duplicate_nodes.extend(shapes)
delete_bin.append(duplicate_transform)
# Copy cbId to mtoa_constant.
for node in duplicate_nodes:
# Converting Maya hierarchy separator "|" to Arnold
# separator "/".
nodes_by_id[lib.get_id(node)].append(node.replace("|", "/"))
with lib.attribute_values(attribute_data):
with lib.maintained_selection():
self.log.debug(
"Writing: {}".format(duplicate_nodes)
)
cmds.select(duplicate_nodes, noExpand=True)
self.log.debug(
"Extracting ass sequence with: {}".format(kwargs)
)
exported_files = cmds.arnoldExportAss(**kwargs)
for file in exported_files:
filenames.append(os.path.split(file)[1])
self.log.debug("Exported: {}".format(filenames))
nodes_by_id = self._nodes_by_id(duplicate_nodes)
filenames = self._extract(duplicate_nodes, attribute_data, kwargs)
return filenames, nodes_by_id

View file

@ -2,7 +2,7 @@ import os
import json
from ayon_core.pipeline import publish
from ayon_core.hosts.maya.api.lib import extract_alembic
from ayon_core.hosts.maya.api.alembic import extract_alembic
from maya import cmds

View file

@ -35,7 +35,8 @@ class ExtractFBXAnimation(publish.Extractor):
fbx_exporter = fbx.FBXExtractor(log=self.log)
out_members = instance.data.get("animated_skeleton", [])
# Export
instance.data["constraints"] = True
# TODO: need to set up the options for users to set up
# the flags they intended to export
instance.data["skeletonDefinitions"] = True
instance.data["referencedAssetsContent"] = True
fbx_exporter.set_options_from_instance(instance)

View file

@ -1,17 +1,29 @@
import os
from collections import OrderedDict
from maya import cmds
from ayon_core.pipeline import publish
from ayon_core.hosts.maya.api.alembic import extract_alembic
from ayon_core.hosts.maya.api.lib import (
extract_alembic,
get_all_children,
suspended_refresh,
maintained_selection,
iter_visible_nodes_in_range
)
from ayon_core.lib import (
BoolDef,
TextDef,
NumberDef,
EnumDef,
UISeparatorDef,
UILabelDef,
)
from ayon_core.pipeline.publish import AYONPyblishPluginMixin
from ayon_core.pipeline import KnownPublishError
class ExtractAlembic(publish.Extractor):
class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
"""Produce an alembic of just point positions and normals.
Positions and normals, uvs, creases are preserved, but nothing more,
@ -27,8 +39,35 @@ class ExtractAlembic(publish.Extractor):
targets = ["local", "remote"]
# From settings
attr = []
attrPrefix = []
bake_attributes = []
bake_attribute_prefixes = []
dataFormat = "ogawa"
eulerFilter = False
melPerFrameCallback = ""
melPostJobCallback = ""
overrides = []
preRoll = False
preRollStartFrame = 0
pythonPerFrameCallback = ""
pythonPostJobCallback = ""
renderableOnly = False
stripNamespaces = True
uvsOnly = False
uvWrite = False
userAttr = ""
userAttrPrefix = ""
verbose = False
visibleOnly = False
wholeFrameGeo = False
worldSpace = True
writeColorSets = False
writeCreases = False
writeFaceSets = False
writeNormals = True
writeUVSets = False
writeVisibility = False
def process(self, instance):
if instance.data.get("farm"):
@ -41,16 +80,38 @@ class ExtractAlembic(publish.Extractor):
start = float(instance.data.get("frameStartHandle", 1))
end = float(instance.data.get("frameEndHandle", 1))
attrs = instance.data.get("attr", "").split(";")
attrs = [value for value in attrs if value.strip()]
attribute_values = self.get_attr_values_from_data(
instance.data
)
attrs = [
attr.strip()
for attr in attribute_values.get("attr", "").split(";")
if attr.strip()
]
attrs += instance.data.get("userDefinedAttributes", [])
attrs += self.bake_attributes
attrs += ["cbId"]
attr_prefixes = instance.data.get("attrPrefix", "").split(";")
attr_prefixes = [value for value in attr_prefixes if value.strip()]
attr_prefixes = [
attr.strip()
for attr in attribute_values.get("attrPrefix", "").split(";")
if attr.strip()
]
attr_prefixes += self.bake_attribute_prefixes
user_attrs = [
attr.strip()
for attr in attribute_values.get("userAttr", "").split(";")
if attr.strip()
]
user_attr_prefixes = [
attr.strip()
for attr in attribute_values.get("userAttrPrefix", "").split(";")
if attr.strip()
]
self.log.debug("Extracting pointcache..")
dirname = self.staging_dir(instance)
@ -58,28 +119,82 @@ class ExtractAlembic(publish.Extractor):
filename = "{name}.abc".format(**instance.data)
path = os.path.join(parent_dir, filename)
options = {
"step": instance.data.get("step", 1.0),
"attr": attrs,
"attrPrefix": attr_prefixes,
"writeVisibility": True,
"writeCreases": True,
"writeColorSets": instance.data.get("writeColorSets", False),
"writeFaceSets": instance.data.get("writeFaceSets", False),
"uvWrite": True,
"selection": True,
"worldSpace": instance.data.get("worldSpace", True)
}
root = None
if not instance.data.get("includeParentHierarchy", True):
# Set the root nodes if we don't want to include parents
# The roots are to be considered the ones that are the actual
# direct members of the set
options["root"] = roots
root = roots
if int(cmds.about(version=True)) >= 2017:
# Since Maya 2017 alembic supports multiple uv sets - write them.
options["writeUVSets"] = True
kwargs = {
"file": path,
"attr": attrs,
"attrPrefix": attr_prefixes,
"userAttr": user_attrs,
"userAttrPrefix": user_attr_prefixes,
"dataFormat": attribute_values.get("dataFormat", self.dataFormat),
"endFrame": end,
"eulerFilter": attribute_values.get(
"eulerFilter", self.eulerFilter
),
"preRoll": attribute_values.get("preRoll", self.preRoll),
"preRollStartFrame": attribute_values.get(
"preRollStartFrame", self.preRollStartFrame
),
"renderableOnly": attribute_values.get(
"renderableOnly", self.renderableOnly
),
"root": root,
"selection": True,
"startFrame": start,
"step": instance.data.get(
"creator_attributes", {}
).get("step", 1.0),
"stripNamespaces": attribute_values.get(
"stripNamespaces", self.stripNamespaces
),
"uvWrite": attribute_values.get("uvWrite", self.uvWrite),
"verbose": attribute_values.get("verbose", self.verbose),
"wholeFrameGeo": attribute_values.get(
"wholeFrameGeo", self.wholeFrameGeo
),
"worldSpace": attribute_values.get("worldSpace", self.worldSpace),
"writeColorSets": attribute_values.get(
"writeColorSets", self.writeColorSets
),
"writeCreases": attribute_values.get(
"writeCreases", self.writeCreases
),
"writeFaceSets": attribute_values.get(
"writeFaceSets", self.writeFaceSets
),
"writeUVSets": attribute_values.get(
"writeUVSets", self.writeUVSets
),
"writeVisibility": attribute_values.get(
"writeVisibility", self.writeVisibility
),
"uvsOnly": attribute_values.get(
"uvsOnly", self.uvsOnly
),
"melPerFrameCallback": attribute_values.get(
"melPerFrameCallback", self.melPerFrameCallback
),
"melPostJobCallback": attribute_values.get(
"melPostJobCallback", self.melPostJobCallback
),
"pythonPerFrameCallback": attribute_values.get(
"pythonPerFrameCallback", self.pythonPostJobCallback
),
"pythonPostJobCallback": attribute_values.get(
"pythonPostJobCallback", self.pythonPostJobCallback
),
# Note that this converts `writeNormals` to `noNormals` for the
# `AbcExport` equivalent in `extract_alembic`
"noNormals": not attribute_values.get(
"writeNormals", self.writeNormals
),
}
if instance.data.get("visibleOnly", False):
# If we only want to include nodes that are visible in the frame
@ -87,20 +202,19 @@ class ExtractAlembic(publish.Extractor):
# flag does not filter out those that are only hidden on some
# frames as it counts "animated" or "connected" visibilities as
# if it's always visible.
nodes = list(iter_visible_nodes_in_range(nodes,
start=start,
end=end))
nodes = list(
iter_visible_nodes_in_range(nodes, start=start, end=end)
)
suspend = not instance.data.get("refresh", False)
with suspended_refresh(suspend=suspend):
with maintained_selection():
cmds.select(nodes, noExpand=True)
extract_alembic(
file=path,
startFrame=start,
endFrame=end,
**options
self.log.debug(
"Running `extract_alembic` with the keyword arguments: "
"{}".format(kwargs)
)
extract_alembic(**kwargs)
if "representations" not in instance.data:
instance.data["representations"] = []
@ -124,22 +238,17 @@ class ExtractAlembic(publish.Extractor):
return
path = path.replace(".abc", "_proxy.abc")
kwargs["file"] = path
if not instance.data.get("includeParentHierarchy", True):
# Set the root nodes if we don't want to include parents
# The roots are to be considered the ones that are the actual
# direct members of the set
options["root"] = instance.data["proxyRoots"]
kwargs["root"] = instance.data["proxyRoots"]
with suspended_refresh(suspend=suspend):
with maintained_selection():
cmds.select(instance.data["proxy"])
extract_alembic(
file=path,
startFrame=start,
endFrame=end,
**options
)
extract_alembic(**kwargs)
representation = {
"name": "proxy",
"ext": "abc",
@ -152,24 +261,265 @@ class ExtractAlembic(publish.Extractor):
def get_members_and_roots(self, instance):
return instance[:], instance.data.get("setMembers")
@classmethod
def get_attribute_defs(cls):
if not cls.overrides:
return []
override_defs = OrderedDict({
"eulerFilter": BoolDef(
"eulerFilter",
label="Euler Filter",
default=cls.eulerFilter,
tooltip="Apply Euler filter while sampling rotations."
),
"renderableOnly": BoolDef(
"renderableOnly",
label="Renderable Only",
default=cls.renderableOnly,
tooltip="Only export renderable visible shapes."
),
"stripNamespaces": BoolDef(
"stripNamespaces",
label="Strip Namespaces",
default=cls.stripNamespaces,
tooltip=(
"Namespaces will be stripped off of the node before being "
"written to Alembic."
)
),
"uvsOnly": BoolDef(
"uvsOnly",
label="UVs Only",
default=cls.uvsOnly,
tooltip=(
"If this flag is present, only uv data for PolyMesh and "
"SubD shapes will be written to the Alembic file."
)
),
"uvWrite": BoolDef(
"uvWrite",
label="UV Write",
default=cls.uvWrite,
tooltip=(
"Uv data for PolyMesh and SubD shapes will be written to "
"the Alembic file."
)
),
"verbose": BoolDef(
"verbose",
label="Verbose",
default=cls.verbose,
tooltip="Prints the current frame that is being evaluated."
),
"visibleOnly": BoolDef(
"visibleOnly",
label="Visible Only",
default=cls.visibleOnly,
tooltip="Only export dag objects visible during frame range."
),
"wholeFrameGeo": BoolDef(
"wholeFrameGeo",
label="Whole Frame Geo",
default=cls.wholeFrameGeo,
tooltip=(
"Data for geometry will only be written out on whole "
"frames."
)
),
"worldSpace": BoolDef(
"worldSpace",
label="World Space",
default=cls.worldSpace,
tooltip="Any root nodes will be stored in world space."
),
"writeColorSets": BoolDef(
"writeColorSets",
label="Write Color Sets",
default=cls.writeColorSets,
tooltip="Write vertex colors with the geometry."
),
"writeCreases": BoolDef(
"writeCreases",
label="Write Creases",
default=cls.writeCreases,
tooltip="Write the geometry's edge and vertex crease "
"information."
),
"writeFaceSets": BoolDef(
"writeFaceSets",
label="Write Face Sets",
default=cls.writeFaceSets,
tooltip="Write face sets with the geometry."
),
"writeNormals": BoolDef(
"writeNormals",
label="Write Normals",
default=cls.writeNormals,
tooltip="Write normals with the deforming geometry."
),
"writeUVSets": BoolDef(
"writeUVSets",
label="Write UV Sets",
default=cls.writeUVSets,
tooltip=(
"Write all uv sets on MFnMeshes as vector 2 indexed "
"geometry parameters with face varying scope."
)
),
"writeVisibility": BoolDef(
"writeVisibility",
label="Write Visibility",
default=cls.writeVisibility,
tooltip=(
"Visibility state will be stored in the Alembic file. "
"Otherwise everything written out is treated as visible."
)
),
"preRoll": BoolDef(
"preRoll",
label="Pre Roll",
default=cls.preRoll,
tooltip="This frame range will not be sampled."
),
"preRollStartFrame": NumberDef(
"preRollStartFrame",
label="Pre Roll Start Frame",
tooltip=(
"The frame to start scene evaluation at. This is used"
" to set the starting frame for time dependent "
"translations and can be used to evaluate run-up that"
" isn't actually translated."
),
default=cls.preRollStartFrame
),
"dataFormat": EnumDef(
"dataFormat",
label="Data Format",
items=["ogawa", "HDF"],
default=cls.dataFormat,
tooltip="The data format to use to write the file."
),
"attr": TextDef(
"attr",
label="Custom Attributes",
placeholder="attr1; attr2; ...",
default=cls.attr,
tooltip=(
"Attributes matching by name will be included in the "
"Alembic export. Attributes should be separated by "
"semi-colon `;`"
)
),
"attrPrefix": TextDef(
"attrPrefix",
label="Custom Attributes Prefix",
placeholder="prefix1; prefix2; ...",
default=cls.attrPrefix,
tooltip=(
"Attributes starting with these prefixes will be included "
"in the Alembic export. Attributes should be separated by "
"semi-colon `;`"
)
),
"userAttr": TextDef(
"userAttr",
label="User Attr",
placeholder="attr1; attr2; ...",
default=cls.userAttr,
tooltip=(
"Attributes matching by name will be included in the "
"Alembic export. Attributes should be separated by "
"semi-colon `;`"
)
),
"userAttrPrefix": TextDef(
"userAttrPrefix",
label="User Attr Prefix",
placeholder="prefix1; prefix2; ...",
default=cls.userAttrPrefix,
tooltip=(
"Attributes starting with these prefixes will be included "
"in the Alembic export. Attributes should be separated by "
"semi-colon `;`"
)
),
"melPerFrameCallback": TextDef(
"melPerFrameCallback",
label="Mel Per Frame Callback",
default=cls.melPerFrameCallback,
tooltip=(
"When each frame (and the static frame) is evaluated the "
"string specified is evaluated as a Mel command."
)
),
"melPostJobCallback": TextDef(
"melPostJobCallback",
label="Mel Post Job Callback",
default=cls.melPostJobCallback,
tooltip=(
"When the translation has finished the string specified "
"is evaluated as a Mel command."
)
),
"pythonPerFrameCallback": TextDef(
"pythonPerFrameCallback",
label="Python Per Frame Callback",
default=cls.pythonPerFrameCallback,
tooltip=(
"When each frame (and the static frame) is evaluated the "
"string specified is evaluated as a python command."
)
),
"pythonPostJobCallback": TextDef(
"pythonPostJobCallback",
label="Python Post Frame Callback",
default=cls.pythonPostJobCallback,
tooltip=(
"When the translation has finished the string specified "
"is evaluated as a python command."
)
)
})
defs = super(ExtractAlembic, cls).get_attribute_defs()
defs.extend([
UISeparatorDef("sep_alembic_options"),
UILabelDef("Alembic Options"),
])
# The Arguments that can be modified by the Publisher
overrides = set(cls.overrides)
for key, value in override_defs.items():
if key not in overrides:
continue
defs.append(value)
defs.append(
UISeparatorDef("sep_alembic_options_end")
)
return defs
class ExtractAnimation(ExtractAlembic):
label = "Extract Animation"
label = "Extract Animation (Alembic)"
families = ["animation"]
def get_members_and_roots(self, instance):
# Collect the out set nodes
out_sets = [node for node in instance if node.endswith("out_SET")]
if len(out_sets) != 1:
raise RuntimeError("Couldn't find exactly one out_SET: "
"{0}".format(out_sets))
raise KnownPublishError(
"Couldn't find exactly one out_SET: {0}".format(out_sets)
)
out_set = out_sets[0]
roots = cmds.sets(out_set, query=True)
roots = cmds.sets(out_set, query=True) or []
# Include all descendants
nodes = roots + cmds.listRelatives(roots,
allDescendents=True,
fullPath=True) or []
nodes = roots.copy()
nodes.extend(get_all_children(roots, ignore_intermediate_objects=True))
return nodes, roots

View file

@ -3,8 +3,8 @@ import os
from maya import cmds
from ayon_core.pipeline import publish
from ayon_core.hosts.maya.api.alembic import extract_alembic
from ayon_core.hosts.maya.api.lib import (
extract_alembic,
suspended_refresh,
maintained_selection,
iter_visible_nodes_in_range

View file

@ -5,7 +5,13 @@ import os
from maya import cmds
from ayon_core.pipeline import publish
from ayon_core.hosts.maya.api.lib import maintained_selection
from ayon_core.hosts.maya.api.lib import (
maintained_selection,
renderlayer
)
from ayon_core.hosts.maya.api.render_setup_tools import (
allow_export_from_render_setup_layer
)
class ExtractRedshiftProxy(publish.Extractor):
@ -18,6 +24,9 @@ class ExtractRedshiftProxy(publish.Extractor):
def process(self, instance):
"""Extractor entry point."""
# Make sure Redshift is loaded
cmds.loadPlugin("redshift4maya", quiet=True)
staging_dir = self.staging_dir(instance)
file_name = "{}.rs".format(instance.name)
file_path = os.path.join(staging_dir, file_name)
@ -60,14 +69,22 @@ class ExtractRedshiftProxy(publish.Extractor):
# Write out rs file
self.log.debug("Writing: '%s'" % file_path)
# Allow overriding what renderlayer to export from. By default force
# it to the default render layer. (Note that the renderlayer isn't
# currently exposed as an attribute to artists)
layer = instance.data.get("renderLayer", "defaultRenderLayer")
with maintained_selection():
cmds.select(instance.data["setMembers"], noExpand=True)
cmds.file(file_path,
pr=False,
force=True,
type="Redshift Proxy",
exportSelected=True,
options=rs_options)
with renderlayer(layer):
with allow_export_from_render_setup_layer():
cmds.select(instance.data["setMembers"], noExpand=True)
cmds.file(file_path,
preserveReferences=False,
force=True,
type="Redshift Proxy",
exportSelected=True,
options=rs_options)
if "representations" not in instance.data:
instance.data["representations"] = []

View file

@ -5,8 +5,8 @@ import os
from maya import cmds # noqa
from ayon_core.pipeline import publish
from ayon_core.hosts.maya.api.alembic import extract_alembic
from ayon_core.hosts.maya.api.lib import (
extract_alembic,
suspended_refresh,
maintained_selection
)

View file

@ -5,7 +5,7 @@ import copy
from maya import cmds
import pyblish.api
from ayon_core.hosts.maya.api.lib import extract_alembic
from ayon_core.hosts.maya.api.alembic import extract_alembic
from ayon_core.pipeline import publish

View file

@ -0,0 +1,130 @@
import inspect
import pyblish.api
from ayon_core.pipeline import OptionalPyblishPluginMixin
from ayon_core.pipeline.publish import RepairAction, PublishValidationError
class ValidateAlembicDefaultsPointcache(
pyblish.api.InstancePlugin, OptionalPyblishPluginMixin
):
"""Validate the attributes on the instance are defaults.
The defaults are defined in the project settings.
"""
order = pyblish.api.ValidatorOrder
families = ["pointcache"]
hosts = ["maya"]
label = "Validate Alembic Options Defaults"
actions = [RepairAction]
optional = True
plugin_name = "ExtractAlembic"
@classmethod
def _get_settings(cls, context):
maya_settings = context.data["project_settings"]["maya"]
settings = maya_settings["publish"]["ExtractAlembic"]
return settings
@classmethod
def _get_publish_attributes(cls, instance):
return instance.data["publish_attributes"][cls.plugin_name]
def process(self, instance):
if not self.is_active(instance.data):
return
settings = self._get_settings(instance.context)
attributes = self._get_publish_attributes(instance)
invalid = {}
for key, value in attributes.items():
if key not in settings:
# This may occur if attributes have changed over time and an
# existing instance has older legacy attributes that do not
# match the current settings definition.
self.log.warning(
"Publish attribute %s not found in Alembic Export "
"default settings. Ignoring validation for attribute.",
key
)
continue
default_value = settings[key]
# Lists are best to compared sorted since we cant rely on the order
# of the items.
if isinstance(value, list):
value = sorted(value)
default_value = sorted(default_value)
if value != default_value:
invalid[key] = value, default_value
if invalid:
non_defaults = "\n".join(
f"- {key}: {value} \t(default: {default_value})"
for key, (value, default_value) in invalid.items()
)
raise PublishValidationError(
"Alembic extract options differ from default values:\n"
f"{non_defaults}",
description=self.get_description()
)
@staticmethod
def get_description():
return inspect.cleandoc(
"""### Alembic Extract settings differ from defaults
The alembic export options differ from the project default values.
If this is intentional you can disable this validation by
disabling **Validate Alembic Options Default**.
If not you may use the "Repair" action to revert all the options to
their default values.
"""
)
@classmethod
def repair(cls, instance):
# Find create instance twin.
create_context = instance.context.data["create_context"]
create_instance = create_context.get_instance_by_id(
instance.data["instance_id"]
)
# Set the settings values on the create context then save to workfile.
settings = cls._get_settings(instance.context)
attributes = cls._get_publish_attributes(create_instance)
for key in attributes:
if key not in settings:
# This may occur if attributes have changed over time and an
# existing instance has older legacy attributes that do not
# match the current settings definition.
cls.log.warning(
"Publish attribute %s not found in Alembic Export "
"default settings. Ignoring repair for attribute.",
key
)
continue
attributes[key] = settings[key]
create_context.save_changes()
class ValidateAlembicDefaultsAnimation(
ValidateAlembicDefaultsPointcache
):
"""Validate the attributes on the instance are defaults.
The defaults are defined in the project settings.
"""
label = "Validate Alembic Options Defaults"
families = ["animation"]
plugin_name = "ExtractAnimation"

View file

@ -1,71 +0,0 @@
import pyblish.api
import ayon_core.hosts.maya.api.action
from ayon_core.pipeline.publish import (
PublishValidationError,
ValidateContentsOrder,
OptionalPyblishPluginMixin
)
from maya import cmds
class ValidateAnimatedReferenceRig(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""Validate all nodes in skeletonAnim_SET are referenced"""
order = ValidateContentsOrder
hosts = ["maya"]
families = ["animation.fbx"]
label = "Animated Reference Rig"
accepted_controllers = ["transform", "locator"]
actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction]
optional = False
def process(self, instance):
if not self.is_active(instance.data):
return
animated_sets = instance.data.get("animated_skeleton", [])
if not animated_sets:
self.log.debug(
"No nodes found in skeletonAnim_SET. "
"Skipping validation of animated reference rig..."
)
return
for animated_reference in animated_sets:
is_referenced = cmds.referenceQuery(
animated_reference, isNodeReferenced=True)
if not bool(is_referenced):
raise PublishValidationError(
"All the content in skeletonAnim_SET"
" should be referenced nodes"
)
invalid_controls = self.validate_controls(animated_sets)
if invalid_controls:
raise PublishValidationError(
"All the content in skeletonAnim_SET"
" should be transforms"
)
@classmethod
def validate_controls(self, set_members):
"""Check if the controller set contains only accepted node types.
Checks if all its set members are within the hierarchy of the root
Checks if the node types of the set members valid
Args:
set_members: list of nodes of the skeleton_anim_set
hierarchy: list of nodes which reside under the root node
Returns:
errors (list)
"""
# Validate control types
invalid = []
set_members = cmds.ls(set_members, long=True)
for node in set_members:
if cmds.nodeType(node) not in self.accepted_controllers:
invalid.append(node)
return invalid

View file

@ -1,30 +1,56 @@
from maya import cmds
import pyblish.api
from ayon_core.pipeline.publish import (
ValidateContentsOrder, PublishValidationError
)
from ayon_core.hosts.maya.api.lib import is_visible
class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
"""Validate Arnold Scene Source.
We require at least 1 root node/parent for the meshes. This is to ensure we
can duplicate the nodes and preserve the names.
Ensure no nodes are hidden.
"""
If using proxies we need the nodes to share the same names and not be
order = ValidateContentsOrder
hosts = ["maya"]
families = ["ass", "assProxy"]
label = "Validate Arnold Scene Source"
def process(self, instance):
# Validate against having nodes hidden, which will result in the
# extraction to ignore the node.
nodes = instance.data["members"] + instance.data.get("proxy", [])
nodes = [x for x in nodes if cmds.objectType(x, isAType='dagNode')]
hidden_nodes = [
x for x in nodes if not is_visible(x, intermediateObject=False)
]
if hidden_nodes:
raise PublishValidationError(
"Found hidden nodes:\n\n{}\n\nPlease unhide for"
" publishing.".format("\n".join(hidden_nodes))
)
class ValidateArnoldSceneSourceProxy(pyblish.api.InstancePlugin):
"""Validate Arnold Scene Source Proxy.
When using proxies we need the nodes to share the same names and not be
parent to the world. This ends up needing at least two groups with content
nodes and proxy nodes in another.
"""
order = ValidateContentsOrder
hosts = ["maya"]
families = ["ass"]
label = "Validate Arnold Scene Source"
families = ["assProxy"]
label = "Validate Arnold Scene Source Proxy"
def _get_nodes_by_name(self, nodes):
ungrouped_nodes = []
nodes_by_name = {}
parents = []
same_named_nodes = {}
for node in nodes:
node_split = node.split("|")
if len(node_split) == 2:
@ -35,33 +61,16 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
parents.append(parent)
node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1]
# Check for same same nodes, which can happen in different
# hierarchies.
if node_name in nodes_by_name:
try:
same_named_nodes[node_name].append(node)
except KeyError:
same_named_nodes[node_name] = [
nodes_by_name[node_name], node
]
nodes_by_name[node_name] = node
if same_named_nodes:
message = "Found nodes with the same name:"
for name, nodes in same_named_nodes.items():
message += "\n\n\"{}\":\n{}".format(name, "\n".join(nodes))
raise PublishValidationError(message)
return ungrouped_nodes, nodes_by_name, parents
def process(self, instance):
# Validate against nodes directly parented to world.
ungrouped_nodes = []
nodes, content_nodes_by_name, content_parents = (
self._get_nodes_by_name(instance.data["contentMembers"])
self._get_nodes_by_name(instance.data["members"])
)
ungrouped_nodes.extend(nodes)
@ -70,24 +79,21 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
)
ungrouped_nodes.extend(nodes)
# Validate against nodes directly parented to world.
if ungrouped_nodes:
raise PublishValidationError(
"Found nodes parented to the world: {}\n"
"All nodes need to be grouped.".format(ungrouped_nodes)
)
# Proxy validation.
if not instance.data.get("proxy", []):
return
# Validate for content and proxy nodes amount being the same.
if len(instance.data["contentMembers"]) != len(instance.data["proxy"]):
if len(instance.data["members"]) != len(instance.data["proxy"]):
raise PublishValidationError(
"Amount of content nodes ({}) and proxy nodes ({}) needs to "
"be the same.".format(
len(instance.data["contentMembers"]),
len(instance.data["proxy"])
"be the same.\nContent nodes: {}\nProxy nodes:{}".format(
len(instance.data["members"]),
len(instance.data["proxy"]),
instance.data["members"],
instance.data["proxy"]
)
)

Some files were not shown because too many files have changed in this diff Show more