mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
get up to dat with colorbleed. add 'studio.' to families temporarily
This commit is contained in:
parent
d341fbbc03
commit
9a99d640fe
176 changed files with 5217 additions and 2535 deletions
|
|
@ -1,7 +1,4 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
import imp
|
||||
|
||||
from pyblish import api as pyblish
|
||||
from avalon import api as avalon
|
||||
|
|
|
|||
116
pype/action.py
116
pype/action.py
|
|
@ -1,5 +1,6 @@
|
|||
# absolute_import is needed to counter the `module has no cmds error` in Maya
|
||||
from __future__ import absolute_import
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
|
|
@ -41,7 +42,7 @@ def get_errored_plugins_from_data(context):
|
|||
class RepairAction(pyblish.api.Action):
|
||||
"""Repairs the action
|
||||
|
||||
To process the repairing this requires a static `repair(instance)` method
|
||||
To process the repairing this requires a static `repair(instance)` method
|
||||
is available on the plugin.
|
||||
|
||||
"""
|
||||
|
|
@ -67,7 +68,7 @@ class RepairAction(pyblish.api.Action):
|
|||
class RepairContextAction(pyblish.api.Action):
|
||||
"""Repairs the action
|
||||
|
||||
To process the repairing this requires a static `repair(instance)` method
|
||||
To process the repairing this requires a static `repair(instance)` method
|
||||
is available on the plugin.
|
||||
|
||||
"""
|
||||
|
|
@ -89,114 +90,3 @@ class RepairContextAction(pyblish.api.Action):
|
|||
plugin.repair()
|
||||
|
||||
|
||||
class SelectInvalidAction(pyblish.api.Action):
|
||||
"""Select invalid nodes in Maya when plug-in failed.
|
||||
|
||||
To retrieve the invalid nodes this assumes a static `get_invalid()`
|
||||
method is available on the plugin.
|
||||
|
||||
"""
|
||||
label = "Select invalid"
|
||||
on = "failed" # This action is only available on a failed plug-in
|
||||
icon = "search" # Icon from Awesome Icon
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
try:
|
||||
from maya import cmds
|
||||
except ImportError:
|
||||
raise ImportError("Current host is not Maya")
|
||||
|
||||
errored_instances = get_errored_instances_from_context(context)
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
|
||||
|
||||
# Get the invalid nodes for the plug-ins
|
||||
self.log.info("Finding invalid nodes..")
|
||||
invalid = list()
|
||||
for instance in instances:
|
||||
invalid_nodes = plugin.get_invalid(instance)
|
||||
if invalid_nodes:
|
||||
if isinstance(invalid_nodes, (list, tuple)):
|
||||
invalid.extend(invalid_nodes)
|
||||
else:
|
||||
self.log.warning("Plug-in returned to be invalid, "
|
||||
"but has no selectable nodes.")
|
||||
|
||||
# Ensure unique (process each node only once)
|
||||
invalid = list(set(invalid))
|
||||
|
||||
if invalid:
|
||||
self.log.info("Selecting invalid nodes: %s" % ", ".join(invalid))
|
||||
cmds.select(invalid, replace=True, noExpand=True)
|
||||
else:
|
||||
self.log.info("No invalid nodes found.")
|
||||
cmds.select(deselect=True)
|
||||
|
||||
|
||||
class GenerateUUIDsOnInvalidAction(pyblish.api.Action):
|
||||
"""Generate UUIDs on the invalid nodes in the instance.
|
||||
|
||||
Invalid nodes are those returned by the plugin's `get_invalid` method.
|
||||
As such it is the plug-in's responsibility to ensure the nodes that
|
||||
receive new UUIDs are actually invalid.
|
||||
|
||||
Requires:
|
||||
- instance.data["asset"]
|
||||
|
||||
"""
|
||||
|
||||
label = "Regenerate UUIDs"
|
||||
on = "failed" # This action is only available on a failed plug-in
|
||||
icon = "wrench" # Icon from Awesome Icon
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
self.log.info("Finding bad nodes..")
|
||||
|
||||
# Get the errored instances
|
||||
errored_instances = []
|
||||
for result in context.data["results"]:
|
||||
if result["error"] is not None and result["instance"] is not None:
|
||||
if result["error"]:
|
||||
instance = result["instance"]
|
||||
errored_instances.append(instance)
|
||||
|
||||
# Apply pyblish logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
|
||||
|
||||
# Get the nodes from the all instances that ran through this plug-in
|
||||
all_invalid = []
|
||||
for instance in instances:
|
||||
invalid = plugin.get_invalid(instance)
|
||||
if invalid:
|
||||
|
||||
self.log.info("Fixing instance {}".format(instance.name))
|
||||
self._update_id_attribute(instance, invalid)
|
||||
|
||||
all_invalid.extend(invalid)
|
||||
|
||||
if not all_invalid:
|
||||
self.log.info("No invalid nodes found.")
|
||||
return
|
||||
|
||||
all_invalid = list(set(all_invalid))
|
||||
self.log.info("Generated ids on nodes: {0}".format(all_invalid))
|
||||
|
||||
def _update_id_attribute(self, instance, nodes):
|
||||
"""Delete the id attribute
|
||||
|
||||
Args:
|
||||
instance: The instance we're fixing for
|
||||
nodes (list): all nodes to regenerate ids on
|
||||
"""
|
||||
|
||||
import pype.maya.lib as lib
|
||||
import avalon.io as io
|
||||
|
||||
asset = instance.data['asset']
|
||||
asset_id = io.find_one({"name": asset, "type": "asset"},
|
||||
projection={"_id": True})['_id']
|
||||
for node, _id in lib.generate_ids(nodes, asset_id=asset_id):
|
||||
lib.set_id(node, _id, overwrite=True)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
from collections import OrderedDict
|
||||
|
||||
from .plugin import (
|
||||
|
||||
Extractor,
|
||||
|
|
@ -12,15 +10,12 @@ from .plugin import (
|
|||
|
||||
# temporary fix, might
|
||||
from .action import (
|
||||
|
||||
get_errored_instances_from_context,
|
||||
SelectInvalidAction,
|
||||
GenerateUUIDsOnInvalidAction,
|
||||
RepairAction,
|
||||
RepairContextAction
|
||||
)
|
||||
|
||||
all = [
|
||||
__all__ = [
|
||||
# plugin classes
|
||||
"Extractor",
|
||||
# ordering
|
||||
|
|
@ -30,7 +25,5 @@ all = [
|
|||
"ValidateMeshOrder",
|
||||
# action
|
||||
"get_errored_instances_from_context",
|
||||
"SelectInvalidAction",
|
||||
"GenerateUUIDsOnInvalidAction",
|
||||
"RepairAction"
|
||||
]
|
||||
|
|
|
|||
|
|
@ -24,9 +24,9 @@ def install():
|
|||
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
|
||||
|
||||
# Disable all families except for the ones we explicitly want to see
|
||||
family_states = ["imagesequence",
|
||||
"camera",
|
||||
"pointcache"]
|
||||
family_states = ["studio.imagesequence",
|
||||
"studio.camera",
|
||||
"studio.pointcache"]
|
||||
|
||||
avalon.data["familiesStateDefault"] = False
|
||||
avalon.data["familiesStateToggled"] = family_states
|
||||
|
|
|
|||
91
pype/houdini/__init__.py
Normal file
91
pype/houdini/__init__.py
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
import os
|
||||
import logging
|
||||
|
||||
import hou
|
||||
|
||||
from pyblish import api as pyblish
|
||||
|
||||
from avalon import api as avalon
|
||||
from avalon.houdini import pipeline as houdini
|
||||
|
||||
from pype.houdini import lib
|
||||
|
||||
from pype.lib import (
|
||||
any_outdated,
|
||||
update_task_from_path
|
||||
)
|
||||
|
||||
|
||||
PARENT_DIR = os.path.dirname(__file__)
|
||||
PACKAGE_DIR = os.path.dirname(PARENT_DIR)
|
||||
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
|
||||
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "houdini", "publish")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "houdini", "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "houdini", "create")
|
||||
|
||||
log = logging.getLogger("pype.houdini")
|
||||
|
||||
|
||||
def install():
|
||||
|
||||
pyblish.register_plugin_path(PUBLISH_PATH)
|
||||
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
|
||||
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
|
||||
|
||||
log.info("Installing callbacks ... ")
|
||||
avalon.on("init", on_init)
|
||||
avalon.on("save", on_save)
|
||||
avalon.on("open", on_open)
|
||||
|
||||
log.info("Overriding existing event 'taskChanged'")
|
||||
|
||||
log.info("Setting default family states for loader..")
|
||||
avalon.data["familiesStateToggled"] = ["studio.imagesequence"]
|
||||
|
||||
|
||||
def on_init(*args):
|
||||
houdini.on_houdini_initialize()
|
||||
|
||||
|
||||
def on_save(*args):
|
||||
|
||||
avalon.logger.info("Running callback on save..")
|
||||
|
||||
update_task_from_path(hou.hipFile.path())
|
||||
|
||||
nodes = lib.get_id_required_nodes()
|
||||
for node, new_id in lib.generate_ids(nodes):
|
||||
lib.set_id(node, new_id, overwrite=False)
|
||||
|
||||
|
||||
def on_open(*args):
|
||||
|
||||
avalon.logger.info("Running callback on open..")
|
||||
|
||||
update_task_from_path(hou.hipFile.path())
|
||||
|
||||
if any_outdated():
|
||||
from ..widgets import popup
|
||||
|
||||
log.warning("Scene has outdated content.")
|
||||
|
||||
# Get main window
|
||||
parent = hou.ui.mainQtWindow()
|
||||
|
||||
if parent is None:
|
||||
log.info("Skipping outdated content pop-up "
|
||||
"because Maya window can't be found.")
|
||||
else:
|
||||
|
||||
# Show outdated pop-up
|
||||
def _on_show_inventory():
|
||||
import avalon.tools.cbsceneinventory as tool
|
||||
tool.show(parent=parent)
|
||||
|
||||
dialog = popup.Popup(parent=parent)
|
||||
dialog.setWindowTitle("Maya scene has outdated content")
|
||||
dialog.setMessage("There are outdated containers in "
|
||||
"your Maya scene.")
|
||||
dialog.on_show.connect(_on_show_inventory)
|
||||
dialog.show()
|
||||
190
pype/houdini/lib.py
Normal file
190
pype/houdini/lib.py
Normal file
|
|
@ -0,0 +1,190 @@
|
|||
import uuid
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
import hou
|
||||
|
||||
from avalon import api, io
|
||||
from avalon.houdini import lib
|
||||
|
||||
|
||||
def set_id(node, unique_id, overwrite=False):
|
||||
|
||||
exists = node.parm("id")
|
||||
if not exists:
|
||||
lib.imprint(node, {"id": unique_id})
|
||||
|
||||
if not exists and overwrite:
|
||||
node.setParm("id", unique_id)
|
||||
|
||||
|
||||
def get_id(node):
|
||||
"""
|
||||
Get the `cbId` attribute of the given node
|
||||
Args:
|
||||
node (hou.Node): the name of the node to retrieve the attribute from
|
||||
|
||||
Returns:
|
||||
str
|
||||
|
||||
"""
|
||||
|
||||
if node is None:
|
||||
return
|
||||
|
||||
id = node.parm("id")
|
||||
if node is None:
|
||||
return
|
||||
return id
|
||||
|
||||
|
||||
def generate_ids(nodes, asset_id=None):
|
||||
"""Returns new unique ids for the given nodes.
|
||||
|
||||
Note: This does not assign the new ids, it only generates the values.
|
||||
|
||||
To assign new ids using this method:
|
||||
>>> nodes = ["a", "b", "c"]
|
||||
>>> for node, id in generate_ids(nodes):
|
||||
>>> set_id(node, id)
|
||||
|
||||
To also override any existing values (and assign regenerated ids):
|
||||
>>> nodes = ["a", "b", "c"]
|
||||
>>> for node, id in generate_ids(nodes):
|
||||
>>> set_id(node, id, overwrite=True)
|
||||
|
||||
Args:
|
||||
nodes (list): List of nodes.
|
||||
asset_id (str or bson.ObjectId): The database id for the *asset* to
|
||||
generate for. When None provided the current asset in the
|
||||
active session is used.
|
||||
|
||||
Returns:
|
||||
list: A list of (node, id) tuples.
|
||||
|
||||
"""
|
||||
|
||||
if asset_id is None:
|
||||
# Get the asset ID from the database for the asset of current context
|
||||
asset_data = io.find_one({"type": "asset",
|
||||
"name": api.Session["AVALON_ASSET"]},
|
||||
projection={"_id": True})
|
||||
assert asset_data, "No current asset found in Session"
|
||||
asset_id = asset_data['_id']
|
||||
|
||||
node_ids = []
|
||||
for node in nodes:
|
||||
_, uid = str(uuid.uuid4()).rsplit("-", 1)
|
||||
unique_id = "{}:{}".format(asset_id, uid)
|
||||
node_ids.append((node, unique_id))
|
||||
|
||||
return node_ids
|
||||
|
||||
|
||||
def get_id_required_nodes():
|
||||
|
||||
valid_types = ["geometry"]
|
||||
nodes = {n for n in hou.node("/out").children() if
|
||||
n.type().name() in valid_types}
|
||||
|
||||
return list(nodes)
|
||||
|
||||
|
||||
def get_additional_data(container):
|
||||
"""Not implemented yet!"""
|
||||
return container
|
||||
|
||||
|
||||
def set_parameter_callback(node, parameter, language, callback):
|
||||
"""Link a callback to a parameter of a node
|
||||
|
||||
Args:
|
||||
node(hou.Node): instance of the nodee
|
||||
parameter(str): name of the parameter
|
||||
language(str): name of the language, e.g.: python
|
||||
callback(str): command which needs to be triggered
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
|
||||
template_grp = node.parmTemplateGroup()
|
||||
template = template_grp.find(parameter)
|
||||
if not template:
|
||||
return
|
||||
|
||||
script_language = (hou.scriptLanguage.Python if language == "python" else
|
||||
hou.scriptLanguage.Hscript)
|
||||
|
||||
template.setScriptCallbackLanguage(script_language)
|
||||
template.setScriptCallback(callback)
|
||||
|
||||
template.setTags({"script_callback": callback,
|
||||
"script_callback_language": language.lower()})
|
||||
|
||||
# Replace the existing template with the adjusted one
|
||||
template_grp.replace(parameter, template)
|
||||
|
||||
node.setParmTemplateGroup(template_grp)
|
||||
|
||||
|
||||
def set_parameter_callbacks(node, parameter_callbacks):
|
||||
"""Set callbacks for multiple parameters of a node
|
||||
|
||||
Args:
|
||||
node(hou.Node): instance of a hou.Node
|
||||
parameter_callbacks(dict): collection of parameter and callback data
|
||||
example: {"active" :
|
||||
{"language": "python",
|
||||
"callback": "print('hello world)'"}
|
||||
}
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
for parameter, data in parameter_callbacks.items():
|
||||
language = data["language"]
|
||||
callback = data["callback"]
|
||||
|
||||
set_parameter_callback(node, parameter, language, callback)
|
||||
|
||||
|
||||
def get_output_parameter(node):
|
||||
"""Return the render output parameter name of the given node
|
||||
|
||||
Example:
|
||||
root = hou.node("/obj")
|
||||
my_alembic_node = root.createNode("alembic")
|
||||
get_output_parameter(my_alembic_node)
|
||||
# Result: "output"
|
||||
|
||||
Args:
|
||||
node(hou.Node): node instance
|
||||
|
||||
Returns:
|
||||
hou.Parm
|
||||
|
||||
"""
|
||||
|
||||
node_type = node.type().name()
|
||||
if node_type == "geometry":
|
||||
return node.parm("sopoutput")
|
||||
|
||||
elif node_type == "alembic":
|
||||
return node.parm("filename")
|
||||
|
||||
else:
|
||||
raise TypeError("Node type '%s' not supported" % node_type)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def attribute_values(node, data):
|
||||
|
||||
previous_attrs = {key: node.parm(key).eval() for key in data.keys()}
|
||||
try:
|
||||
node.setParms(data)
|
||||
yield
|
||||
except Exception as exc:
|
||||
pass
|
||||
finally:
|
||||
node.setParms(previous_attrs)
|
||||
79
pype/lib.py
79
pype/lib.py
|
|
@ -2,6 +2,7 @@ import os
|
|||
import re
|
||||
import logging
|
||||
import importlib
|
||||
import itertools
|
||||
|
||||
from .vendor import pather
|
||||
from .vendor.pather.error import ParseError
|
||||
|
|
@ -12,6 +13,24 @@ import avalon.api
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def pairwise(iterable):
|
||||
"""s -> (s0,s1), (s2,s3), (s4, s5), ..."""
|
||||
a = iter(iterable)
|
||||
return itertools.izip(a, a)
|
||||
|
||||
|
||||
def grouper(iterable, n, fillvalue=None):
|
||||
"""Collect data into fixed-length chunks or blocks
|
||||
|
||||
Examples:
|
||||
grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
|
||||
|
||||
"""
|
||||
|
||||
args = [iter(iterable)] * n
|
||||
return itertools.izip_longest(fillvalue=fillvalue, *args)
|
||||
|
||||
|
||||
def is_latest(representation):
|
||||
"""Return whether the representation is from latest version
|
||||
|
||||
|
|
@ -75,7 +94,7 @@ def update_task_from_path(path):
|
|||
|
||||
# Find the current context from the filename
|
||||
project = io.find_one({"type": "project"},
|
||||
projection={"pype.template.work": True})
|
||||
projection={"config.template.work": True})
|
||||
template = project['config']['template']['work']
|
||||
# Force to use the registered to root to avoid using wrong paths
|
||||
template = pather.format(template, {"root": avalon.api.registered_root()})
|
||||
|
|
@ -252,21 +271,67 @@ def collect_container_metadata(container):
|
|||
return hostlib.get_additional_data(container)
|
||||
|
||||
|
||||
def get_project_fps():
|
||||
def get_asset_fps():
|
||||
"""Returns project's FPS, if not found will return 25 by default
|
||||
|
||||
Returns:
|
||||
int, float
|
||||
|
||||
"""
|
||||
|
||||
key = "fps"
|
||||
|
||||
# FPS from asset data (if set)
|
||||
asset_data = get_asset_data()
|
||||
if key in asset_data:
|
||||
return asset_data[key]
|
||||
|
||||
# FPS from project data (if set)
|
||||
project_data = get_project_data()
|
||||
if key in project_data:
|
||||
return project_data[key]
|
||||
|
||||
# Fallback to 25 FPS
|
||||
return 25.0
|
||||
|
||||
|
||||
def get_project_data():
|
||||
"""Get the data of the current project
|
||||
|
||||
The data of the project can contain things like:
|
||||
resolution
|
||||
fps
|
||||
renderer
|
||||
|
||||
Returns:
|
||||
dict:
|
||||
|
||||
"""
|
||||
|
||||
project_name = io.active_project()
|
||||
project = io.find_one({"name": project_name,
|
||||
"type": "project"},
|
||||
projection={"config": True})
|
||||
projection={"data": True})
|
||||
|
||||
config = project.get("config", None)
|
||||
assert config, "This is a bug"
|
||||
data = project.get("data", {})
|
||||
|
||||
fps = pype.get("fps", 25.0)
|
||||
return data
|
||||
|
||||
return fps
|
||||
|
||||
def get_asset_data(asset=None):
|
||||
"""Get the data from the current asset
|
||||
|
||||
Args:
|
||||
asset(str, Optional): name of the asset, eg:
|
||||
|
||||
Returns:
|
||||
dict
|
||||
"""
|
||||
|
||||
asset_name = asset or avalon.api.Session["AVALON_ASSET"]
|
||||
document = io.find_one({"name": asset_name,
|
||||
"type": "asset"})
|
||||
|
||||
data = document.get("data", {})
|
||||
|
||||
return data
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import weakref
|
|||
from maya import utils, cmds, mel
|
||||
|
||||
from avalon import api as avalon, pipeline, maya
|
||||
from avalon.maya.pipeline import IS_HEADLESS
|
||||
from pyblish import api as pyblish
|
||||
|
||||
from ..lib import (
|
||||
|
|
@ -34,16 +35,24 @@ def install():
|
|||
|
||||
log.info("Installing callbacks ... ")
|
||||
avalon.on("init", on_init)
|
||||
|
||||
# Callbacks below are not required for headless mode, the `init` however
|
||||
# is important to load referenced Alembics correctly at rendertime.
|
||||
if IS_HEADLESS:
|
||||
log.info("Running in headless mode, skipping Colorbleed Maya "
|
||||
"save/open/new callback installation..")
|
||||
return
|
||||
|
||||
avalon.on("save", on_save)
|
||||
avalon.on("open", on_open)
|
||||
|
||||
avalon.on("new", on_new)
|
||||
avalon.before("save", on_before_save)
|
||||
|
||||
log.info("Overriding existing event 'taskChanged'")
|
||||
override_event("taskChanged", on_task_changed)
|
||||
|
||||
log.info("Setting default family states for loader..")
|
||||
avalon.data["familiesStateToggled"] = ["imagesequence"]
|
||||
avalon.data["familiesStateToggled"] = ["studio.imagesequence"]
|
||||
|
||||
|
||||
def uninstall():
|
||||
|
|
@ -126,12 +135,13 @@ def on_open(_):
|
|||
from avalon.vendor.Qt import QtWidgets
|
||||
from ..widgets import popup
|
||||
|
||||
# Ensure scene's FPS is set to project config
|
||||
lib.validate_fps()
|
||||
|
||||
# Update current task for the current scene
|
||||
update_task_from_path(cmds.file(query=True, sceneName=True))
|
||||
|
||||
# Validate FPS after update_task_from_path to
|
||||
# ensure it is using correct FPS for the asset
|
||||
lib.validate_fps()
|
||||
|
||||
if any_outdated():
|
||||
log.warning("Scene has outdated content.")
|
||||
|
||||
|
|
@ -158,6 +168,13 @@ def on_open(_):
|
|||
dialog.show()
|
||||
|
||||
|
||||
def on_new(_):
|
||||
"""Set project resolution and fps when create a new file"""
|
||||
avalon.logger.info("Running callback on new..")
|
||||
with maya.suspended_refresh():
|
||||
lib.set_context_settings()
|
||||
|
||||
|
||||
def on_task_changed(*args):
|
||||
"""Wrapped function of app initialize and maya's on task changed"""
|
||||
|
||||
|
|
|
|||
128
pype/maya/action.py
Normal file
128
pype/maya/action.py
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
# absolute_import is needed to counter the `module has no cmds error` in Maya
|
||||
from __future__ import absolute_import
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
from ..action import get_errored_instances_from_context
|
||||
|
||||
|
||||
class GenerateUUIDsOnInvalidAction(pyblish.api.Action):
|
||||
"""Generate UUIDs on the invalid nodes in the instance.
|
||||
|
||||
Invalid nodes are those returned by the plugin's `get_invalid` method.
|
||||
As such it is the plug-in's responsibility to ensure the nodes that
|
||||
receive new UUIDs are actually invalid.
|
||||
|
||||
Requires:
|
||||
- instance.data["asset"]
|
||||
|
||||
"""
|
||||
|
||||
label = "Regenerate UUIDs"
|
||||
on = "failed" # This action is only available on a failed plug-in
|
||||
icon = "wrench" # Icon from Awesome Icon
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
from maya import cmds
|
||||
|
||||
self.log.info("Finding bad nodes..")
|
||||
|
||||
errored_instances = get_errored_instances_from_context(context)
|
||||
|
||||
# Apply pyblish logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
|
||||
|
||||
# Get the nodes from the all instances that ran through this plug-in
|
||||
all_invalid = []
|
||||
for instance in instances:
|
||||
invalid = plugin.get_invalid(instance)
|
||||
|
||||
# Don't allow referenced nodes to get their ids regenerated to
|
||||
# avoid loaded content getting messed up with reference edits
|
||||
if invalid:
|
||||
referenced = {node for node in invalid if
|
||||
cmds.referenceQuery(node, isNodeReferenced=True)}
|
||||
if referenced:
|
||||
self.log.warning("Skipping UUID generation on referenced "
|
||||
"nodes: {}".format(list(referenced)))
|
||||
invalid = [node for node in invalid
|
||||
if node not in referenced]
|
||||
|
||||
if invalid:
|
||||
|
||||
self.log.info("Fixing instance {}".format(instance.name))
|
||||
self._update_id_attribute(instance, invalid)
|
||||
|
||||
all_invalid.extend(invalid)
|
||||
|
||||
if not all_invalid:
|
||||
self.log.info("No invalid nodes found.")
|
||||
return
|
||||
|
||||
all_invalid = list(set(all_invalid))
|
||||
self.log.info("Generated ids on nodes: {0}".format(all_invalid))
|
||||
|
||||
def _update_id_attribute(self, instance, nodes):
|
||||
"""Delete the id attribute
|
||||
|
||||
Args:
|
||||
instance: The instance we're fixing for
|
||||
nodes (list): all nodes to regenerate ids on
|
||||
"""
|
||||
|
||||
import pype.maya.lib as lib
|
||||
import avalon.io as io
|
||||
|
||||
asset = instance.data['asset']
|
||||
asset_id = io.find_one({"name": asset, "type": "asset"},
|
||||
projection={"_id": True})['_id']
|
||||
for node, _id in lib.generate_ids(nodes, asset_id=asset_id):
|
||||
lib.set_id(node, _id, overwrite=True)
|
||||
|
||||
|
||||
class SelectInvalidAction(pyblish.api.Action):
|
||||
"""Select invalid nodes in Maya when plug-in failed.
|
||||
|
||||
To retrieve the invalid nodes this assumes a static `get_invalid()`
|
||||
method is available on the plugin.
|
||||
|
||||
"""
|
||||
label = "Select invalid"
|
||||
on = "failed" # This action is only available on a failed plug-in
|
||||
icon = "search" # Icon from Awesome Icon
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
try:
|
||||
from maya import cmds
|
||||
except ImportError:
|
||||
raise ImportError("Current host is not Maya")
|
||||
|
||||
errored_instances = get_errored_instances_from_context(context)
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
|
||||
|
||||
# Get the invalid nodes for the plug-ins
|
||||
self.log.info("Finding invalid nodes..")
|
||||
invalid = list()
|
||||
for instance in instances:
|
||||
invalid_nodes = plugin.get_invalid(instance)
|
||||
if invalid_nodes:
|
||||
if isinstance(invalid_nodes, (list, tuple)):
|
||||
invalid.extend(invalid_nodes)
|
||||
else:
|
||||
self.log.warning("Plug-in returned to be invalid, "
|
||||
"but has no selectable nodes.")
|
||||
|
||||
# Ensure unique (process each node only once)
|
||||
invalid = list(set(invalid))
|
||||
|
||||
if invalid:
|
||||
self.log.info("Selecting invalid nodes: %s" % ", ".join(invalid))
|
||||
cmds.select(invalid, replace=True, noExpand=True)
|
||||
else:
|
||||
self.log.info("No invalid nodes found.")
|
||||
cmds.select(deselect=True)
|
||||
622
pype/maya/lib.py
622
pype/maya/lib.py
|
|
@ -11,12 +11,13 @@ import contextlib
|
|||
from collections import OrderedDict, defaultdict
|
||||
|
||||
from maya import cmds, mel
|
||||
import maya.api.OpenMaya as om
|
||||
|
||||
from avalon import api, maya, io, pipeline
|
||||
from avalon.vendor.six import string_types
|
||||
import avalon.maya.lib
|
||||
|
||||
from config import lib
|
||||
from pype import lib
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
|
@ -76,6 +77,7 @@ _alembic_options = {
|
|||
"writeColorSets": bool,
|
||||
"writeFaceSets": bool,
|
||||
"writeCreases": bool, # Maya 2015 Ext1+
|
||||
"writeUVSets": bool, # Maya 2017+
|
||||
"dataFormat": str,
|
||||
"root": (list, tuple),
|
||||
"attr": (list, tuple),
|
||||
|
|
@ -89,7 +91,12 @@ _alembic_options = {
|
|||
}
|
||||
|
||||
INT_FPS = {15, 24, 25, 30, 48, 50, 60, 44100, 48000}
|
||||
FLOAT_FPS = {23.976, 29.97, 29.97, 47.952, 59.94}
|
||||
FLOAT_FPS = {23.976, 29.97, 47.952, 59.94}
|
||||
|
||||
|
||||
def _get_mel_global(name):
|
||||
"""Return the value of a mel global variable"""
|
||||
return mel.eval("$%s = $%s;" % (name, name))
|
||||
|
||||
|
||||
def matrix_equals(a, b, tolerance=1e-10):
|
||||
|
|
@ -304,6 +311,33 @@ def attribute_values(attr_values):
|
|||
cmds.setAttr(attr, value)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def keytangent_default(in_tangent_type='auto',
|
||||
out_tangent_type='auto'):
|
||||
"""Set the default keyTangent for new keys during this context"""
|
||||
|
||||
original_itt = cmds.keyTangent(query=True, g=True, itt=True)[0]
|
||||
original_ott = cmds.keyTangent(query=True, g=True, ott=True)[0]
|
||||
cmds.keyTangent(g=True, itt=in_tangent_type)
|
||||
cmds.keyTangent(g=True, ott=out_tangent_type)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
cmds.keyTangent(g=True, itt=original_itt)
|
||||
cmds.keyTangent(g=True, ott=original_ott)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def undo_chunk():
|
||||
"""Open a undo chunk during context."""
|
||||
|
||||
try:
|
||||
cmds.undoInfo(openChunk=True)
|
||||
yield
|
||||
finally:
|
||||
cmds.undoInfo(closeChunk=True)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def renderlayer(layer):
|
||||
"""Set the renderlayer during the context"""
|
||||
|
|
@ -337,6 +371,126 @@ def evaluation(mode="off"):
|
|||
cmds.evaluationManager(mode=original)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def no_refresh():
|
||||
"""Temporarily disables Maya's UI updates
|
||||
|
||||
Note:
|
||||
This only disabled the main pane and will sometimes still
|
||||
trigger updates in torn off panels.
|
||||
|
||||
"""
|
||||
|
||||
pane = _get_mel_global('gMainPane')
|
||||
state = cmds.paneLayout(pane, query=True, manage=True)
|
||||
cmds.paneLayout(pane, edit=True, manage=False)
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
cmds.paneLayout(pane, edit=True, manage=state)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def empty_sets(sets, force=False):
|
||||
"""Remove all members of the sets during the context"""
|
||||
|
||||
assert isinstance(sets, (list, tuple))
|
||||
|
||||
original = dict()
|
||||
original_connections = []
|
||||
|
||||
# Store original state
|
||||
for obj_set in sets:
|
||||
members = cmds.sets(obj_set, query=True)
|
||||
original[obj_set] = members
|
||||
|
||||
try:
|
||||
for obj_set in sets:
|
||||
cmds.sets(clear=obj_set)
|
||||
if force:
|
||||
# Break all connections if force is enabled, this way we
|
||||
# prevent Maya from exporting any reference nodes which are
|
||||
# connected with placeHolder[x] attributes
|
||||
plug = "%s.dagSetMembers" % obj_set
|
||||
connections = cmds.listConnections(plug,
|
||||
source=True,
|
||||
destination=False,
|
||||
plugs=True,
|
||||
connections=True) or []
|
||||
original_connections.extend(connections)
|
||||
for dest, src in lib.pairwise(connections):
|
||||
cmds.disconnectAttr(src, dest)
|
||||
yield
|
||||
finally:
|
||||
|
||||
for dest, src in lib.pairwise(original_connections):
|
||||
cmds.connectAttr(src, dest)
|
||||
|
||||
# Restore original members
|
||||
for origin_set, members in original.iteritems():
|
||||
cmds.sets(members, forceElement=origin_set)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def renderlayer(layer):
|
||||
"""Set the renderlayer during the context
|
||||
|
||||
Arguments:
|
||||
layer (str): Name of layer to switch to.
|
||||
|
||||
"""
|
||||
|
||||
original = cmds.editRenderLayerGlobals(query=True,
|
||||
currentRenderLayer=True)
|
||||
|
||||
try:
|
||||
cmds.editRenderLayerGlobals(currentRenderLayer=layer)
|
||||
yield
|
||||
finally:
|
||||
cmds.editRenderLayerGlobals(currentRenderLayer=original)
|
||||
|
||||
|
||||
class delete_after(object):
|
||||
"""Context Manager that will delete collected nodes after exit.
|
||||
|
||||
This allows to ensure the nodes added to the context are deleted
|
||||
afterwards. This is useful if you want to ensure nodes are deleted
|
||||
even if an error is raised.
|
||||
|
||||
Examples:
|
||||
with delete_after() as delete_bin:
|
||||
cube = maya.cmds.polyCube()
|
||||
delete_bin.extend(cube)
|
||||
# cube exists
|
||||
# cube deleted
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, nodes=None):
|
||||
|
||||
self._nodes = list()
|
||||
|
||||
if nodes:
|
||||
self.extend(nodes)
|
||||
|
||||
def append(self, node):
|
||||
self._nodes.append(node)
|
||||
|
||||
def extend(self, nodes):
|
||||
self._nodes.extend(nodes)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._nodes)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
if self._nodes:
|
||||
cmds.delete(self._nodes)
|
||||
|
||||
|
||||
def get_renderer(layer):
|
||||
with renderlayer(layer):
|
||||
return cmds.getAttr("defaultRenderGlobals.currentRenderer")
|
||||
|
|
@ -365,6 +519,161 @@ def no_undo(flush=False):
|
|||
cmds.undoInfo(**{keyword: original})
|
||||
|
||||
|
||||
def get_shader_assignments_from_shapes(shapes):
|
||||
"""Return the shape assignment per related shading engines.
|
||||
|
||||
Returns a dictionary where the keys are shadingGroups and the values are
|
||||
lists of assigned shapes or shape-components.
|
||||
|
||||
For the 'shapes' this will return a dictionary like:
|
||||
{
|
||||
"shadingEngineX": ["nodeX", "nodeY"],
|
||||
"shadingEngineY": ["nodeA", "nodeB"]
|
||||
}
|
||||
|
||||
Args:
|
||||
shapes (list): The shapes to collect the assignments for.
|
||||
|
||||
Returns:
|
||||
dict: The {shadingEngine: shapes} relationships
|
||||
|
||||
"""
|
||||
|
||||
shapes = cmds.ls(shapes,
|
||||
long=True,
|
||||
selection=True,
|
||||
shapes=True,
|
||||
objectsOnly=True)
|
||||
if not shapes:
|
||||
return {}
|
||||
|
||||
# Collect shading engines and their shapes
|
||||
assignments = defaultdict(list)
|
||||
for shape in shapes:
|
||||
|
||||
# Get unique shading groups for the shape
|
||||
shading_groups = cmds.listConnections(shape,
|
||||
source=False,
|
||||
destination=True,
|
||||
plugs=False,
|
||||
connections=False,
|
||||
type="shadingEngine") or []
|
||||
shading_groups = list(set(shading_groups))
|
||||
for shading_group in shading_groups:
|
||||
assignments[shading_group].add(shape)
|
||||
|
||||
return dict(assignments)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def shader(nodes, shadingEngine="initialShadingGroup"):
|
||||
"""Assign a shader to nodes during the context"""
|
||||
|
||||
shapes = cmds.ls(nodes, dag=1, o=1, shapes=1, long=1)
|
||||
original = get_shader_assignments_from_shapes(shapes)
|
||||
|
||||
try:
|
||||
# Assign override shader
|
||||
if shapes:
|
||||
cmds.sets(shapes, edit=True, forceElement=shadingEngine)
|
||||
yield
|
||||
finally:
|
||||
|
||||
# Assign original shaders
|
||||
for sg, members in original.items():
|
||||
if members:
|
||||
cmds.sets(shapes, edit=True, forceElement=shadingEngine)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def displaySmoothness(nodes,
|
||||
divisionsU=0,
|
||||
divisionsV=0,
|
||||
pointsWire=4,
|
||||
pointsShaded=1,
|
||||
polygonObject=1):
|
||||
"""Set the displaySmoothness during the context"""
|
||||
|
||||
# Ensure only non-intermediate shapes
|
||||
nodes = cmds.ls(nodes,
|
||||
dag=1,
|
||||
shapes=1,
|
||||
long=1,
|
||||
noIntermediate=True)
|
||||
|
||||
def parse(node):
|
||||
"""Parse the current state of a node"""
|
||||
state = {}
|
||||
for key in ["divisionsU",
|
||||
"divisionsV",
|
||||
"pointsWire",
|
||||
"pointsShaded",
|
||||
"polygonObject"]:
|
||||
value = cmds.displaySmoothness(node, query=1, **{key: True})
|
||||
if value is not None:
|
||||
state[key] = value[0]
|
||||
return state
|
||||
|
||||
originals = dict((node, parse(node)) for node in nodes)
|
||||
|
||||
try:
|
||||
# Apply current state
|
||||
cmds.displaySmoothness(nodes,
|
||||
divisionsU=divisionsU,
|
||||
divisionsV=divisionsV,
|
||||
pointsWire=pointsWire,
|
||||
pointsShaded=pointsShaded,
|
||||
polygonObject=polygonObject)
|
||||
yield
|
||||
finally:
|
||||
# Revert state
|
||||
for node, state in originals.iteritems():
|
||||
if state:
|
||||
cmds.displaySmoothness(node, **state)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def no_display_layers(nodes):
|
||||
"""Ensure nodes are not in a displayLayer during context.
|
||||
|
||||
Arguments:
|
||||
nodes (list): The nodes to remove from any display layer.
|
||||
|
||||
"""
|
||||
|
||||
# Ensure long names
|
||||
nodes = cmds.ls(nodes, long=True)
|
||||
|
||||
# Get the original state
|
||||
lookup = set(nodes)
|
||||
original = {}
|
||||
for layer in cmds.ls(type='displayLayer'):
|
||||
|
||||
# Skip default layer
|
||||
if layer == "defaultLayer":
|
||||
continue
|
||||
|
||||
members = cmds.editDisplayLayerMembers(layer,
|
||||
query=True,
|
||||
fullNames=True)
|
||||
if not members:
|
||||
continue
|
||||
members = set(members)
|
||||
|
||||
included = lookup.intersection(members)
|
||||
if included:
|
||||
original[layer] = list(included)
|
||||
|
||||
try:
|
||||
# Add all nodes to default layer
|
||||
cmds.editDisplayLayerMembers("defaultLayer", nodes, noRecurse=True)
|
||||
yield
|
||||
finally:
|
||||
# Restore original members
|
||||
for layer, members in original.iteritems():
|
||||
cmds.editDisplayLayerMembers(layer, members, noRecurse=True)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def namespaced(namespace, new=True):
|
||||
"""Work inside namespace during context
|
||||
|
|
@ -607,6 +916,8 @@ def extract_alembic(file,
|
|||
|
||||
# Discard unknown options
|
||||
if key not in _alembic_options:
|
||||
log.warning("extract_alembic() does not support option '%s'. "
|
||||
"Flag will be ignored..", key)
|
||||
options.pop(key)
|
||||
continue
|
||||
|
||||
|
|
@ -761,10 +1072,20 @@ def get_id(node):
|
|||
if node is None:
|
||||
return
|
||||
|
||||
if not cmds.attributeQuery("cbId", node=node, exists=True):
|
||||
sel = om.MSelectionList()
|
||||
sel.add(node)
|
||||
|
||||
api_node = sel.getDependNode(0)
|
||||
fn = om.MFnDependencyNode(api_node)
|
||||
|
||||
if not fn.hasAttribute("cbId"):
|
||||
return
|
||||
|
||||
return cmds.getAttr("{}.cbId".format(node))
|
||||
try:
|
||||
return fn.findPlug("cbId", False).asString()
|
||||
except RuntimeError:
|
||||
log.warning("Failed to retrieve cbId on %s", node)
|
||||
return
|
||||
|
||||
|
||||
def generate_ids(nodes, asset_id=None):
|
||||
|
|
@ -825,7 +1146,6 @@ def set_id(node, unique_id, overwrite=False):
|
|||
|
||||
"""
|
||||
|
||||
attr = "{0}.cbId".format(node)
|
||||
exists = cmds.attributeQuery("cbId", node=node, exists=True)
|
||||
|
||||
# Add the attribute if it does not exist yet
|
||||
|
|
@ -834,6 +1154,7 @@ def set_id(node, unique_id, overwrite=False):
|
|||
|
||||
# Set the value
|
||||
if not exists or overwrite:
|
||||
attr = "{0}.cbId".format(node)
|
||||
cmds.setAttr(attr, unique_id, type="string")
|
||||
|
||||
|
||||
|
|
@ -1012,11 +1333,11 @@ def assign_look(nodes, subset="lookDefault"):
|
|||
# Group all nodes per asset id
|
||||
grouped = defaultdict(list)
|
||||
for node in nodes:
|
||||
studio_id = get_id(node)
|
||||
if not studio_id:
|
||||
pype_id = get_id(node)
|
||||
if not pype_id:
|
||||
continue
|
||||
|
||||
parts = studio_id.split(":", 1)
|
||||
parts = pype_id.split(":", 1)
|
||||
grouped[parts[0]].append(node)
|
||||
|
||||
for asset_id, asset_nodes in grouped.items():
|
||||
|
|
@ -1039,7 +1360,7 @@ def assign_look(nodes, subset="lookDefault"):
|
|||
version = io.find_one({"parent": subset_data['_id'],
|
||||
"type": "version",
|
||||
"data.families":
|
||||
{"$in": ["look"]}
|
||||
{"$in": ["studio.look"]}
|
||||
},
|
||||
sort=[("name", -1)],
|
||||
projection={"_id": True, "name": True})
|
||||
|
|
@ -1368,6 +1689,7 @@ def get_id_from_history(node):
|
|||
return _id
|
||||
|
||||
|
||||
# Project settings
|
||||
def set_scene_fps(fps, update=True):
|
||||
"""Set FPS from project configuration
|
||||
|
||||
|
|
@ -1381,21 +1703,104 @@ def set_scene_fps(fps, update=True):
|
|||
"""
|
||||
|
||||
if fps in FLOAT_FPS:
|
||||
unit = "{:f}fps".format(fps)
|
||||
unit = "{}fps".format(fps)
|
||||
|
||||
elif fps in INT_FPS:
|
||||
unit = "{:d}fps".format(int(fps))
|
||||
unit = "{}fps".format(int(fps))
|
||||
|
||||
else:
|
||||
raise ValueError("Unsupported FPS value: `%s`" % fps)
|
||||
|
||||
log.info("Updating FPS to '{}'".format(unit))
|
||||
# Get time slider current state
|
||||
start_frame = cmds.playbackOptions(query=True, minTime=True)
|
||||
end_frame = cmds.playbackOptions(query=True, maxTime=True)
|
||||
|
||||
# Get animation data
|
||||
animation_start = cmds.playbackOptions(query=True, animationStartTime=True)
|
||||
animation_end = cmds.playbackOptions(query=True, animationEndTime=True)
|
||||
|
||||
current_frame = cmds.currentTime(query=True)
|
||||
|
||||
log.info("Setting scene FPS to: '{}'".format(unit))
|
||||
cmds.currentUnit(time=unit, updateAnimation=update)
|
||||
|
||||
# Set time slider data back to previous state
|
||||
cmds.playbackOptions(edit=True, minTime=start_frame)
|
||||
cmds.playbackOptions(edit=True, maxTime=end_frame)
|
||||
|
||||
# Set animation data
|
||||
cmds.playbackOptions(edit=True, animationStartTime=animation_start)
|
||||
cmds.playbackOptions(edit=True, animationEndTime=animation_end)
|
||||
|
||||
cmds.currentTime(current_frame, edit=True, update=True)
|
||||
|
||||
# Force file stated to 'modified'
|
||||
cmds.file(modified=True)
|
||||
|
||||
|
||||
def set_scene_resolution(width, height):
|
||||
"""Set the render resolution
|
||||
|
||||
Args:
|
||||
width(int): value of the width
|
||||
height(int): value of the height
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
|
||||
control_node = "defaultResolution"
|
||||
current_renderer = cmds.getAttr("defaultRenderGlobals.currentRenderer")
|
||||
|
||||
# Give VRay a helping hand as it is slightly different from the rest
|
||||
if current_renderer == "vray":
|
||||
vray_node = "vraySettings"
|
||||
if cmds.objExists(vray_node):
|
||||
control_node = vray_node
|
||||
else:
|
||||
log.error("Can't set VRay resolution because there is no node "
|
||||
"named: `%s`" % vray_node)
|
||||
|
||||
log.info("Setting scene resolution to: %s x %s" % (width, height))
|
||||
cmds.setAttr("%s.width" % control_node, width)
|
||||
cmds.setAttr("%s.height" % control_node, height)
|
||||
|
||||
|
||||
def set_context_settings():
|
||||
"""Apply the project settings from the project definition
|
||||
|
||||
Settings can be overwritten by an asset if the asset.data contains
|
||||
any information regarding those settings.
|
||||
|
||||
Examples of settings:
|
||||
fps
|
||||
resolution
|
||||
renderer
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
# Todo (Wijnand): apply renderer and resolution of project
|
||||
|
||||
project_data = lib.get_project_data()
|
||||
asset_data = lib.get_asset_data()
|
||||
|
||||
# Set project fps
|
||||
fps = asset_data.get("fps", project_data.get("fps", 25))
|
||||
set_scene_fps(fps)
|
||||
|
||||
# Set project resolution
|
||||
width_key = "resolution_width"
|
||||
height_key = "resolution_height"
|
||||
|
||||
width = asset_data.get(width_key, project_data.get(width_key, 1920))
|
||||
height = asset_data.get(height_key, project_data.get(height_key, 1080))
|
||||
|
||||
set_scene_resolution(width, height)
|
||||
|
||||
|
||||
# Valid FPS
|
||||
def validate_fps():
|
||||
"""Validate current scene FPS and show pop-up when it is incorrect
|
||||
|
|
@ -1405,7 +1810,7 @@ def validate_fps():
|
|||
|
||||
"""
|
||||
|
||||
fps = lib.get_project_fps() # can be int or float
|
||||
fps = lib.get_asset_fps()
|
||||
current_fps = mel.eval('currentTimeUnitToFPS()') # returns float
|
||||
|
||||
if current_fps != fps:
|
||||
|
|
@ -1436,3 +1841,194 @@ def validate_fps():
|
|||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def bake(nodes,
|
||||
frame_range=None,
|
||||
step=1.0,
|
||||
simulation=True,
|
||||
preserve_outside_keys=False,
|
||||
disable_implicit_control=True,
|
||||
shape=True):
|
||||
"""Bake the given nodes over the time range.
|
||||
|
||||
This will bake all attributes of the node, including custom attributes.
|
||||
|
||||
Args:
|
||||
nodes (list): Names of transform nodes, eg. camera, light.
|
||||
frame_range (list): frame range with start and end frame.
|
||||
or if None then takes timeSliderRange
|
||||
simulation (bool): Whether to perform a full simulation of the
|
||||
attributes over time.
|
||||
preserve_outside_keys (bool): Keep keys that are outside of the baked
|
||||
range.
|
||||
disable_implicit_control (bool): When True will disable any
|
||||
constraints to the object.
|
||||
shape (bool): When True also bake attributes on the children shapes.
|
||||
step (float): The step size to sample by.
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
|
||||
# Parse inputs
|
||||
if not nodes:
|
||||
return
|
||||
|
||||
assert isinstance(nodes, (list, tuple)), "Nodes must be a list or tuple"
|
||||
|
||||
# If frame range is None fall back to time slider playback time range
|
||||
if frame_range is None:
|
||||
frame_range = [cmds.playbackOptions(query=True, minTime=True),
|
||||
cmds.playbackOptions(query=True, maxTime=True)]
|
||||
|
||||
# If frame range is single frame bake one frame more,
|
||||
# otherwise maya.cmds.bakeResults gets confused
|
||||
if frame_range[1] == frame_range[0]:
|
||||
frame_range[1] += 1
|
||||
|
||||
# Bake it
|
||||
with keytangent_default(in_tangent_type='auto',
|
||||
out_tangent_type='auto'):
|
||||
cmds.bakeResults(nodes,
|
||||
simulation=simulation,
|
||||
preserveOutsideKeys=preserve_outside_keys,
|
||||
disableImplicitControl=disable_implicit_control,
|
||||
shape=shape,
|
||||
sampleBy=step,
|
||||
time=(frame_range[0], frame_range[1]))
|
||||
|
||||
|
||||
def bake_to_world_space(nodes,
|
||||
frame_range=None,
|
||||
simulation=True,
|
||||
preserve_outside_keys=False,
|
||||
disable_implicit_control=True,
|
||||
shape=True,
|
||||
step=1.0):
|
||||
"""Bake the nodes to world space transformation (incl. other attributes)
|
||||
|
||||
Bakes the transforms to world space (while maintaining all its animated
|
||||
attributes and settings) by duplicating the node. Then parents it to world
|
||||
and constrains to the original.
|
||||
|
||||
Other attributes are also baked by connecting all attributes directly.
|
||||
Baking is then done using Maya's bakeResults command.
|
||||
|
||||
See `bake` for the argument documentation.
|
||||
|
||||
Returns:
|
||||
list: The newly created and baked node names.
|
||||
|
||||
"""
|
||||
|
||||
def _get_attrs(node):
|
||||
"""Workaround for buggy shape attribute listing with listAttr"""
|
||||
attrs = cmds.listAttr(node,
|
||||
write=True,
|
||||
scalar=True,
|
||||
settable=True,
|
||||
connectable=True,
|
||||
keyable=True,
|
||||
shortNames=True) or []
|
||||
valid_attrs = []
|
||||
for attr in attrs:
|
||||
node_attr = '{0}.{1}'.format(node, attr)
|
||||
|
||||
# Sometimes Maya returns 'non-existent' attributes for shapes
|
||||
# so we filter those out
|
||||
if not cmds.attributeQuery(attr, node=node, exists=True):
|
||||
continue
|
||||
|
||||
# We only need those that have a connection, just to be safe
|
||||
# that it's actually keyable/connectable anyway.
|
||||
if cmds.connectionInfo(node_attr,
|
||||
isDestination=True):
|
||||
valid_attrs.append(attr)
|
||||
|
||||
return valid_attrs
|
||||
|
||||
transform_attrs = set(["t", "r", "s",
|
||||
"tx", "ty", "tz",
|
||||
"rx", "ry", "rz",
|
||||
"sx", "sy", "sz"])
|
||||
|
||||
world_space_nodes = []
|
||||
with delete_after() as delete_bin:
|
||||
|
||||
# Create the duplicate nodes that are in world-space connected to
|
||||
# the originals
|
||||
for node in nodes:
|
||||
|
||||
# Duplicate the node
|
||||
short_name = node.rsplit("|", 1)[-1]
|
||||
new_name = "{0}_baked".format(short_name)
|
||||
new_node = cmds.duplicate(node,
|
||||
name=new_name,
|
||||
renameChildren=True)[0]
|
||||
|
||||
# Connect all attributes on the node except for transform
|
||||
# attributes
|
||||
attrs = _get_attrs(node)
|
||||
attrs = set(attrs) - transform_attrs if attrs else []
|
||||
|
||||
for attr in attrs:
|
||||
orig_node_attr = '{0}.{1}'.format(node, attr)
|
||||
new_node_attr = '{0}.{1}'.format(new_node, attr)
|
||||
|
||||
# unlock to avoid connection errors
|
||||
cmds.setAttr(new_node_attr, lock=False)
|
||||
|
||||
cmds.connectAttr(orig_node_attr,
|
||||
new_node_attr,
|
||||
force=True)
|
||||
|
||||
# If shapes are also baked then connect those keyable attributes
|
||||
if shape:
|
||||
children_shapes = cmds.listRelatives(new_node,
|
||||
children=True,
|
||||
fullPath=True,
|
||||
shapes=True)
|
||||
if children_shapes:
|
||||
orig_children_shapes = cmds.listRelatives(node,
|
||||
children=True,
|
||||
fullPath=True,
|
||||
shapes=True)
|
||||
for orig_shape, new_shape in zip(orig_children_shapes,
|
||||
children_shapes):
|
||||
attrs = _get_attrs(orig_shape)
|
||||
for attr in attrs:
|
||||
orig_node_attr = '{0}.{1}'.format(orig_shape, attr)
|
||||
new_node_attr = '{0}.{1}'.format(new_shape, attr)
|
||||
|
||||
# unlock to avoid connection errors
|
||||
cmds.setAttr(new_node_attr, lock=False)
|
||||
|
||||
cmds.connectAttr(orig_node_attr,
|
||||
new_node_attr,
|
||||
force=True)
|
||||
|
||||
# Parent to world
|
||||
if cmds.listRelatives(new_node, parent=True):
|
||||
new_node = cmds.parent(new_node, world=True)[0]
|
||||
|
||||
# Unlock transform attributes so constraint can be created
|
||||
for attr in transform_attrs:
|
||||
cmds.setAttr('{0}.{1}'.format(new_node, attr), lock=False)
|
||||
|
||||
# Constraints
|
||||
delete_bin.extend(cmds.parentConstraint(node, new_node, mo=False))
|
||||
delete_bin.extend(cmds.scaleConstraint(node, new_node, mo=False))
|
||||
|
||||
world_space_nodes.append(new_node)
|
||||
|
||||
bake(world_space_nodes,
|
||||
frame_range=frame_range,
|
||||
step=step,
|
||||
simulation=simulation,
|
||||
preserve_outside_keys=preserve_outside_keys,
|
||||
disable_implicit_control=disable_implicit_control,
|
||||
shape=shape)
|
||||
|
||||
return world_space_nodes
|
||||
|
|
|
|||
3726
pype/maya/menu.json
3726
pype/maya/menu.json
File diff suppressed because it is too large
Load diff
|
|
@ -23,10 +23,15 @@ def _get_menu():
|
|||
|
||||
def deferred():
|
||||
|
||||
import scriptsmenu.launchformaya as launchformaya
|
||||
import scriptsmenu.scriptsmenu as scriptsmenu
|
||||
log.info("Attempting to install scripts menu..")
|
||||
|
||||
log.info("Attempting to install ...")
|
||||
try:
|
||||
import scriptsmenu.launchformaya as launchformaya
|
||||
import scriptsmenu.scriptsmenu as scriptsmenu
|
||||
except ImportError:
|
||||
log.warning("Skipping pype.menu install, because "
|
||||
"'scriptsmenu' module seems unavailable.")
|
||||
return
|
||||
|
||||
# load configuration of custom menu
|
||||
config_path = os.path.join(os.path.dirname(__file__), "menu.json")
|
||||
|
|
@ -44,7 +49,7 @@ def uninstall():
|
|||
|
||||
menu = _get_menu()
|
||||
if menu:
|
||||
log.info("Attempting to uninstall ..")
|
||||
log.info("Attempting to uninstall..")
|
||||
|
||||
try:
|
||||
menu.deleteLater()
|
||||
|
|
@ -56,7 +61,7 @@ def uninstall():
|
|||
def install():
|
||||
|
||||
if cmds.about(batch=True):
|
||||
print("Skipping studio.menu initialization in batch mode..")
|
||||
print("Skipping pype.menu initialization in batch mode..")
|
||||
return
|
||||
|
||||
uninstall()
|
||||
|
|
|
|||
|
|
@ -95,6 +95,10 @@ class ReferenceLoader(api.Loader):
|
|||
if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"):
|
||||
continue
|
||||
|
||||
# Ignore _UNKNOWN_REF_NODE_ (PLN-160)
|
||||
if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"):
|
||||
continue
|
||||
|
||||
references.add(ref)
|
||||
|
||||
assert references, "No reference node found in container"
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ class CreateTiffSaver(avalon.api.Creator):
|
|||
name = "tiffDefault"
|
||||
label = "Create Tiff Saver"
|
||||
hosts = ["fusion"]
|
||||
family = "saver"
|
||||
family = "studio.saver"
|
||||
|
||||
def process(self):
|
||||
|
||||
|
|
|
|||
|
|
@ -8,11 +8,11 @@ from avalon import api
|
|||
class FusionSetFrameRangeLoader(api.Loader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
|
||||
families = ["animation",
|
||||
"camera",
|
||||
"imagesequence",
|
||||
"yeticache",
|
||||
"pointcache"]
|
||||
families = ["studio.animation",
|
||||
"studio.camera",
|
||||
"studio.imagesequence",
|
||||
"studio.yeticache",
|
||||
"studio.pointcache"]
|
||||
representations = ["*"]
|
||||
|
||||
label = "Set frame range"
|
||||
|
|
@ -41,11 +41,11 @@ class FusionSetFrameRangeLoader(api.Loader):
|
|||
class FusionSetFrameRangeWithHandlesLoader(api.Loader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
|
||||
families = ["animation",
|
||||
"camera",
|
||||
"imagesequence",
|
||||
"yeticache",
|
||||
"pointcache"]
|
||||
families = ["studio.animation",
|
||||
"studio.camera",
|
||||
"studio.imagesequence",
|
||||
"studio.yeticache",
|
||||
"studio.pointcache"]
|
||||
representations = ["*"]
|
||||
|
||||
label = "Set frame range (with handles)"
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ def loader_shift(loader, frame, relative=True):
|
|||
class FusionLoadSequence(api.Loader):
|
||||
"""Load image sequence into Fusion"""
|
||||
|
||||
families = ["imagesequence"]
|
||||
families = ["studio.imagesequence"]
|
||||
representations = ["*"]
|
||||
|
||||
label = "Load sequence"
|
||||
|
|
|
|||
|
|
@ -76,8 +76,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
"outputDir": os.path.dirname(path),
|
||||
"ext": ext, # todo: should be redundant
|
||||
"label": label,
|
||||
"families": ["saver"],
|
||||
"family": "saver",
|
||||
"families": ["studio.saver"],
|
||||
"family": "studio.saver",
|
||||
"active": active,
|
||||
"publish": active # backwards compatibility
|
||||
})
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ class CollectFusionRenderMode(pyblish.api.InstancePlugin):
|
|||
available tool does not visualize which render mode is set for the
|
||||
current comp, please run the following line in the console (Py2)
|
||||
|
||||
comp.GetData("rendermode")
|
||||
comp.GetData("studio.rendermode")
|
||||
|
||||
This will return the name of the current render mode as seen above under
|
||||
Options.
|
||||
|
|
@ -23,7 +23,7 @@ class CollectFusionRenderMode(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.CollectorOrder + 0.4
|
||||
label = "Collect Render Mode"
|
||||
hosts = ["fusion"]
|
||||
families = ["saver"]
|
||||
families = ["studio.saver"]
|
||||
|
||||
def process(self, instance):
|
||||
"""Collect all image sequence tools"""
|
||||
|
|
@ -34,11 +34,11 @@ class CollectFusionRenderMode(pyblish.api.InstancePlugin):
|
|||
raise RuntimeError("No comp previously collected, unable to "
|
||||
"retrieve Fusion version.")
|
||||
|
||||
rendermode = comp.GetData("rendermode") or "renderlocal"
|
||||
rendermode = comp.GetData("studio.rendermode") or "renderlocal"
|
||||
assert rendermode in options, "Must be supported render mode"
|
||||
|
||||
self.log.info("Render mode: {0}".format(rendermode))
|
||||
|
||||
# Append family
|
||||
family = "saver.{0}".format(rendermode)
|
||||
family = "studio.saver.{0}".format(rendermode)
|
||||
instance.data["families"].append(family)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ class FusionIncrementCurrentFile(pyblish.api.ContextPlugin):
|
|||
label = "Increment current file"
|
||||
order = pyblish.api.IntegratorOrder + 9.0
|
||||
hosts = ["fusion"]
|
||||
families = ["saver.deadline"]
|
||||
families = ["studio.saver.deadline"]
|
||||
optional = True
|
||||
|
||||
def process(self, context):
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ class PublishImageSequence(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.IntegratorOrder
|
||||
label = "Publish Rendered Image Sequence(s)"
|
||||
hosts = ["fusion"]
|
||||
families = ["saver.renderlocal"]
|
||||
families = ["studio.saver.renderlocal"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -55,7 +55,7 @@ class PublishImageSequence(pyblish.api.InstancePlugin):
|
|||
"regex": regex,
|
||||
"startFrame": instance.context.data["startFrame"],
|
||||
"endFrame": instance.context.data["endFrame"],
|
||||
"families": ["imagesequence"],
|
||||
"families": ["studio.imagesequence"],
|
||||
}
|
||||
|
||||
# Write metadata and store the path in the instance
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ class FusionRenderLocal(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.ExtractorOrder
|
||||
label = "Render Local"
|
||||
hosts = ["fusion"]
|
||||
families = ["saver.renderlocal"]
|
||||
families = ["studio.saver.renderlocal"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ class FusionSaveComp(pyblish.api.ContextPlugin):
|
|||
label = "Save current file"
|
||||
order = pyblish.api.ExtractorOrder - 0.49
|
||||
hosts = ["fusion"]
|
||||
families = ["saver"]
|
||||
families = ["studio.saver"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
label = "Submit to Deadline"
|
||||
order = pyblish.api.IntegratorOrder
|
||||
hosts = ["fusion"]
|
||||
families = ["saver.deadline"]
|
||||
families = ["studio.saver.deadline"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import pyblish.api
|
||||
|
||||
from config import action
|
||||
from pype import action
|
||||
|
||||
|
||||
class ValidateBackgroundDepth(pyblish.api.InstancePlugin):
|
||||
|
|
@ -10,7 +10,7 @@ class ValidateBackgroundDepth(pyblish.api.InstancePlugin):
|
|||
label = "Validate Background Depth 32 bit"
|
||||
actions = [action.RepairAction]
|
||||
hosts = ["fusion"]
|
||||
families = ["saver"]
|
||||
families = ["studio.saver"]
|
||||
optional = True
|
||||
|
||||
@classmethod
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ class ValidateFusionCompSaved(pyblish.api.ContextPlugin):
|
|||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Comp Saved"
|
||||
families = ["saver"]
|
||||
families = ["studio.saver"]
|
||||
hosts = ["fusion"]
|
||||
|
||||
def process(self, context):
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import pyblish.api
|
||||
|
||||
from config import action
|
||||
from pype import action
|
||||
|
||||
|
||||
class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
|
||||
|
|
@ -13,7 +13,7 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.ValidatorOrder
|
||||
actions = [action.RepairAction]
|
||||
label = "Validate Create Folder Checked"
|
||||
families = ["saver"]
|
||||
families = ["studio.saver"]
|
||||
hosts = ["fusion"]
|
||||
|
||||
@classmethod
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ class ValidateFilenameHasExtension(pyblish.api.InstancePlugin):
|
|||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Filename Has Extension"
|
||||
families = ["saver"]
|
||||
families = ["studio.saver"]
|
||||
hosts = ["fusion"]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ class ValidateSaverHasInput(pyblish.api.InstancePlugin):
|
|||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Saver Has Input"
|
||||
families = ["saver"]
|
||||
families = ["studio.saver"]
|
||||
hosts = ["fusion"]
|
||||
|
||||
@classmethod
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
|
|||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Saver Passthrough"
|
||||
families = ["saver"]
|
||||
families = ["studio.saver"]
|
||||
hosts = ["fusion"]
|
||||
|
||||
def process(self, context):
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ class ValidateUniqueSubsets(pyblish.api.InstancePlugin):
|
|||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Unique Subsets"
|
||||
families = ["saver"]
|
||||
families = ["studio.saver"]
|
||||
hosts = ["fusion"]
|
||||
|
||||
@classmethod
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ def open(filepath):
|
|||
class PlayImageSequence(api.Loader):
|
||||
"""Open Image Sequence with system default"""
|
||||
|
||||
families = ["imagesequence"]
|
||||
families = ["studio.imagesequence"]
|
||||
representations = ["*"]
|
||||
|
||||
label = "Play sequence"
|
||||
|
|
|
|||
|
|
@ -35,6 +35,12 @@ class CollectAssumedDestination(pyblish.api.InstancePlugin):
|
|||
# Add destination to the resource
|
||||
source_filename = os.path.basename(resource["source"])
|
||||
destination = os.path.join(mock_destination, source_filename)
|
||||
|
||||
# Force forward slashes to fix issue with software unable
|
||||
# to work correctly with backslashes in specific scenarios
|
||||
# (e.g. escape characters in PLN-151 V-Ray UDIM)
|
||||
destination = destination.replace("\\", "/")
|
||||
|
||||
resource['destination'] = destination
|
||||
|
||||
# Collect transfers for the individual files of the resource
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ class CollectDeadlineUser(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.CollectorOrder + 0.499
|
||||
label = "Deadline User"
|
||||
hosts = ['maya', 'fusion']
|
||||
families = ["renderlayer", "saver.deadline"]
|
||||
families = ["studio.renderlayer", "studio.saver.deadline"]
|
||||
|
||||
def process(self, context):
|
||||
"""Inject the current working file"""
|
||||
|
|
|
|||
|
|
@ -148,7 +148,7 @@ class CollectFileSequences(pyblish.api.ContextPlugin):
|
|||
raise RuntimeError("Invalid sequence")
|
||||
|
||||
# Get family from the data
|
||||
families = data.get("families", ["imagesequence"])
|
||||
families = data.get("families", ["studio.imagesequence"])
|
||||
assert isinstance(families, (list, tuple)), "Must be iterable"
|
||||
assert families, "Must have at least a single family"
|
||||
|
||||
|
|
|
|||
|
|
@ -23,18 +23,19 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
|
||||
label = "Integrate Asset"
|
||||
order = pyblish.api.IntegratorOrder
|
||||
families = ["animation",
|
||||
"camera",
|
||||
"imagesequence",
|
||||
"look",
|
||||
"pype.mayaAscii",
|
||||
"model",
|
||||
"pointcache",
|
||||
"setdress",
|
||||
"rig",
|
||||
"vrayproxy",
|
||||
"yetiRig",
|
||||
"yeticache"]
|
||||
families = ["studio.animation",
|
||||
"studio.camera",
|
||||
"studio.imagesequence",
|
||||
"studio.look",
|
||||
"studio.mayaAscii",
|
||||
"studio.model",
|
||||
"studio.pointcache",
|
||||
"studio.vdbcache",
|
||||
"studio.setdress",
|
||||
"studio.rig",
|
||||
"studio.vrayproxy",
|
||||
"studio.yetiRig",
|
||||
"studio.yeticache"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -82,7 +83,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
self.log.debug("Establishing staging directory @ %s" % stagingdir)
|
||||
|
||||
project = io.find_one({"type": "project"},
|
||||
projection={"pype.template.publish": True})
|
||||
projection={"config.template.publish": True})
|
||||
|
||||
asset = io.find_one({"type": "asset",
|
||||
"name": ASSET,
|
||||
|
|
|
|||
|
|
@ -123,7 +123,7 @@ class SubmitDependentImageSequenceJobDeadline(pyblish.api.InstancePlugin):
|
|||
label = "Submit image sequence jobs to Deadline"
|
||||
order = pyblish.api.IntegratorOrder + 0.1
|
||||
hosts = ["fusion", "maya"]
|
||||
families = ["saver.deadline", "renderlayer"]
|
||||
families = ["studio.saver.deadline", "studio.renderlayer"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -168,7 +168,7 @@ class SubmitDependentImageSequenceJobDeadline(pyblish.api.InstancePlugin):
|
|||
"regex": regex,
|
||||
"startFrame": start,
|
||||
"endFrame": end,
|
||||
"families": ["imagesequence"],
|
||||
"families": ["studio.imagesequence"],
|
||||
|
||||
# Optional metadata (for debugging)
|
||||
"metadata": {
|
||||
|
|
@ -185,7 +185,7 @@ class SubmitDependentImageSequenceJobDeadline(pyblish.api.InstancePlugin):
|
|||
|
||||
if data.get("extendFrames", False):
|
||||
|
||||
family = "imagesequence"
|
||||
family = "studio.imagesequence"
|
||||
override = data["overrideExistingFrame"]
|
||||
|
||||
# override = data.get("overrideExistingFrame", False)
|
||||
|
|
@ -293,6 +293,10 @@ class SubmitDependentImageSequenceJobDeadline(pyblish.api.InstancePlugin):
|
|||
) for index, key in enumerate(environment)
|
||||
})
|
||||
|
||||
# Avoid copied pools and remove secondary pool
|
||||
payload["JobInfo"]["Pool"] = "none"
|
||||
payload["JobInfo"].pop("SecondaryPool", None)
|
||||
|
||||
self.log.info("Submitting..")
|
||||
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
|
||||
|
||||
|
|
|
|||
29
pype/plugins/global/publish/validate_resources.py
Normal file
29
pype/plugins/global/publish/validate_resources.py
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
import pyblish.api
|
||||
import pype.api
|
||||
|
||||
import os
|
||||
|
||||
|
||||
class ValidateResources(pyblish.api.InstancePlugin):
|
||||
"""Validates mapped resources.
|
||||
|
||||
These are external files to the current application, for example
|
||||
these could be textures, image planes, cache files or other linked
|
||||
media.
|
||||
|
||||
This validates:
|
||||
- The resources are existing files.
|
||||
- The resources have correctly collected the data.
|
||||
|
||||
"""
|
||||
|
||||
order = pype.api.ValidateContentsOrder
|
||||
label = "Resources"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
for resource in instance.data.get('resources', []):
|
||||
# Required data
|
||||
assert "source" in resource, "No source found"
|
||||
assert "files" in resource, "No files from source"
|
||||
assert all(os.path.exists(f) for f in resource['files'])
|
||||
|
|
@ -11,7 +11,7 @@ class ValidateSequenceFrames(pyblish.api.InstancePlugin):
|
|||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Sequence Frames"
|
||||
families = ["imagesequence"]
|
||||
families = ["studio.imagesequence"]
|
||||
hosts = ["shell"]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
34
pype/plugins/houdini/create/create_alembic_camera.py
Normal file
34
pype/plugins/houdini/create/create_alembic_camera.py
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
from collections import OrderedDict
|
||||
|
||||
from avalon import houdini
|
||||
|
||||
|
||||
class CreateAlembicCamera(houdini.Creator):
|
||||
|
||||
name = "camera"
|
||||
label = "Camera (Abc)"
|
||||
family = "studio.camera"
|
||||
icon = "camera"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateAlembicCamera, self).__init__(*args, **kwargs)
|
||||
|
||||
# Remove the active, we are checking the bypass flag of the nodes
|
||||
self.data.pop("active", None)
|
||||
|
||||
# Set node type to create for output
|
||||
self.data.update({"node_type": "alembic"})
|
||||
|
||||
def process(self):
|
||||
instance = super(CreateAlembicCamera, self).process()
|
||||
|
||||
parms = {"use_sop_path": True,
|
||||
"build_from_path": True,
|
||||
"path_attrib": "path",
|
||||
"filename": "$HIP/pyblish/%s.abc" % self.name}
|
||||
|
||||
if self.nodes:
|
||||
node = self.nodes[0]
|
||||
parms.update({"sop_path": node.path()})
|
||||
|
||||
instance.setParms(parms)
|
||||
40
pype/plugins/houdini/create/create_pointcache.py
Normal file
40
pype/plugins/houdini/create/create_pointcache.py
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
from avalon import houdini
|
||||
|
||||
|
||||
class CreatePointCache(houdini.Creator):
|
||||
"""Alembic pointcache for animated data"""
|
||||
|
||||
name = "pointcache"
|
||||
label = "Point Cache"
|
||||
family = "studio.pointcache"
|
||||
icon = "gears"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreatePointCache, self).__init__(*args, **kwargs)
|
||||
|
||||
# Remove the active, we are checking the bypass flag of the nodes
|
||||
self.data.pop("active", None)
|
||||
|
||||
self.data.update({"node_type": "alembic"})
|
||||
|
||||
def process(self):
|
||||
instance = super(CreatePointCache, self).process()
|
||||
|
||||
parms = {"use_sop_path": True, # Export single node from SOP Path
|
||||
"build_from_path": True, # Direct path of primitive in output
|
||||
"path_attrib": "path", # Pass path attribute for output\
|
||||
"prim_to_detail_pattern": "cbId",
|
||||
"format": 2, # Set format to Ogawa
|
||||
"filename": "$HIP/pyblish/%s.abc" % self.name}
|
||||
|
||||
if self.nodes:
|
||||
node = self.nodes[0]
|
||||
parms.update({"sop_path": node.path()})
|
||||
|
||||
instance.setParms(parms)
|
||||
|
||||
# Lock any parameters in this list
|
||||
to_lock = ["prim_to_detail_pattern"]
|
||||
for name in to_lock:
|
||||
parm = instance.parm(name)
|
||||
parm.lock(True)
|
||||
33
pype/plugins/houdini/create/create_vbd_cache.py
Normal file
33
pype/plugins/houdini/create/create_vbd_cache.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
from avalon import houdini
|
||||
|
||||
|
||||
class CreateVDBCache(houdini.Creator):
|
||||
"""Alembic pointcache for animated data"""
|
||||
|
||||
name = "vbdcache"
|
||||
label = "VDB Cache"
|
||||
family = "studio.vdbcache"
|
||||
icon = "cloud"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateVDBCache, self).__init__(*args, **kwargs)
|
||||
|
||||
# Remove the active, we are checking the bypass flag of the nodes
|
||||
self.data.pop("active", None)
|
||||
|
||||
self.data.update({
|
||||
"node_type": "geometry", # Set node type to create for output
|
||||
"executeBackground": True # Render node in background
|
||||
})
|
||||
|
||||
def process(self):
|
||||
instance = super(CreateVDBCache, self).process()
|
||||
|
||||
parms = {"sopoutput": "$HIP/pyblish/%s.$F4.vdb" % self.name,
|
||||
"initsim": True}
|
||||
|
||||
if self.nodes:
|
||||
node = self.nodes[0]
|
||||
parms.update({"sop_path": node.path()})
|
||||
|
||||
instance.setParms(parms)
|
||||
109
pype/plugins/houdini/load/load_alembic.py
Normal file
109
pype/plugins/houdini/load/load_alembic.py
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
from avalon import api
|
||||
|
||||
from avalon.houdini import pipeline, lib
|
||||
|
||||
|
||||
class AbcLoader(api.Loader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
|
||||
families = ["studio.model",
|
||||
"studio.animation",
|
||||
"studio.pointcache"]
|
||||
label = "Load Alembic"
|
||||
representations = ["abc"]
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
|
||||
import os
|
||||
import hou
|
||||
|
||||
# Format file name, Houdini only wants forward slashes
|
||||
file_path = os.path.normpath(self.fname)
|
||||
file_path = file_path.replace("\\", "/")
|
||||
|
||||
# Get the root node
|
||||
obj = hou.node("/obj")
|
||||
|
||||
# Create a unique name
|
||||
counter = 1
|
||||
namespace = namespace if namespace else context["asset"]["name"]
|
||||
formatted = "{}_{}".format(namespace, name) if namespace else name
|
||||
node_name = "{0}_{1:03d}".format(formatted, counter)
|
||||
|
||||
children = lib.children_as_string(hou.node("/obj"))
|
||||
while node_name in children:
|
||||
counter += 1
|
||||
node_name = "{0}_{1:03d}".format(formatted, counter)
|
||||
|
||||
# Create a new geo node
|
||||
container = obj.createNode("geo", node_name=node_name)
|
||||
|
||||
# Remove the file node, it only loads static meshes
|
||||
# Houdini 17 has removed the file node from the geo node
|
||||
file_node = container.node("file1")
|
||||
if file_node:
|
||||
file_node.destroy()
|
||||
|
||||
# Create an alembic node (supports animation)
|
||||
alembic = container.createNode("alembic", node_name=node_name)
|
||||
alembic.setParms({"fileName": file_path})
|
||||
|
||||
# Add unpack node
|
||||
unpack_name = "unpack_{}".format(name)
|
||||
unpack = container.createNode("unpack", node_name=unpack_name)
|
||||
unpack.setInput(0, alembic)
|
||||
unpack.setParms({"transfer_attributes": "path"})
|
||||
|
||||
# Add normal to points
|
||||
# Order of menu ['point', 'vertex', 'prim', 'detail']
|
||||
normal_name = "normal_{}".format(name)
|
||||
normal_node = container.createNode("normal", node_name=normal_name)
|
||||
normal_node.setParms({"type": 0})
|
||||
|
||||
normal_node.setInput(0, unpack)
|
||||
|
||||
null = container.createNode("null", node_name="OUT".format(name))
|
||||
null.setInput(0, normal_node)
|
||||
|
||||
# Set display on last node
|
||||
null.setDisplayFlag(True)
|
||||
|
||||
# Set new position for unpack node else it gets cluttered
|
||||
nodes = [container, alembic, unpack, normal_node, null]
|
||||
for nr, node in enumerate(nodes):
|
||||
node.setPosition([0, (0 - nr)])
|
||||
|
||||
self[:] = nodes
|
||||
|
||||
return pipeline.containerise(node_name,
|
||||
namespace,
|
||||
nodes,
|
||||
context,
|
||||
self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
|
||||
node = container["node"]
|
||||
try:
|
||||
alembic_node = next(n for n in node.children() if
|
||||
n.type().name() == "alembic")
|
||||
except StopIteration:
|
||||
self.log.error("Could not find node of type `alembic`")
|
||||
return
|
||||
|
||||
# Update the file path
|
||||
file_path = api.get_representation_path(representation)
|
||||
file_path = file_path.replace("\\", "/")
|
||||
|
||||
alembic_node.setParms({"fileName": file_path})
|
||||
|
||||
# Update attribute
|
||||
node.setParms({"representation": str(representation["_id"])})
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
node = container["node"]
|
||||
node.destroy()
|
||||
119
pype/plugins/houdini/load/load_camera.py
Normal file
119
pype/plugins/houdini/load/load_camera.py
Normal file
|
|
@ -0,0 +1,119 @@
|
|||
from avalon import api
|
||||
|
||||
from avalon.houdini import pipeline, lib
|
||||
|
||||
|
||||
class CameraLoader(api.Loader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
|
||||
families = ["studio.camera"]
|
||||
label = "Load Camera (abc)"
|
||||
representations = ["abc"]
|
||||
order = -10
|
||||
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
|
||||
import os
|
||||
import hou
|
||||
|
||||
# Format file name, Houdini only wants forward slashes
|
||||
file_path = os.path.normpath(self.fname)
|
||||
file_path = file_path.replace("\\", "/")
|
||||
|
||||
# Get the root node
|
||||
obj = hou.node("/obj")
|
||||
|
||||
# Create a unique name
|
||||
counter = 1
|
||||
asset_name = context["asset"]["name"]
|
||||
|
||||
namespace = namespace if namespace else asset_name
|
||||
formatted = "{}_{}".format(namespace, name) if namespace else name
|
||||
node_name = "{0}_{1:03d}".format(formatted, counter)
|
||||
|
||||
children = lib.children_as_string(hou.node("/obj"))
|
||||
while node_name in children:
|
||||
counter += 1
|
||||
node_name = "{0}_{1:03d}".format(formatted, counter)
|
||||
|
||||
# Create a archive node
|
||||
container = self.create_and_connect(obj, "alembicarchive", node_name)
|
||||
|
||||
# TODO: add FPS of project / asset
|
||||
container.setParms({"fileName": file_path,
|
||||
"channelRef": True})
|
||||
|
||||
# Apply some magic
|
||||
container.parm("buildHierarchy").pressButton()
|
||||
container.moveToGoodPosition()
|
||||
|
||||
# Create an alembic xform node
|
||||
nodes = [container]
|
||||
|
||||
self[:] = nodes
|
||||
|
||||
return pipeline.containerise(node_name,
|
||||
namespace,
|
||||
nodes,
|
||||
context,
|
||||
self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
|
||||
node = container["node"]
|
||||
|
||||
# Update the file path
|
||||
file_path = api.get_representation_path(representation)
|
||||
file_path = file_path.replace("\\", "/")
|
||||
|
||||
# Update attributes
|
||||
node.setParms({"fileName": file_path,
|
||||
"representation": str(representation["_id"])})
|
||||
|
||||
# Rebuild
|
||||
node.parm("buildHierarchy").pressButton()
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
node = container["node"]
|
||||
node.destroy()
|
||||
|
||||
def create_and_connect(self, node, node_type, name=None):
|
||||
"""Create a node within a node which and connect it to the input
|
||||
|
||||
Args:
|
||||
node(hou.Node): parent of the new node
|
||||
node_type(str) name of the type of node, eg: 'alembic'
|
||||
name(str, Optional): name of the node
|
||||
|
||||
Returns:
|
||||
hou.Node
|
||||
|
||||
"""
|
||||
|
||||
import hou
|
||||
|
||||
try:
|
||||
|
||||
if name:
|
||||
new_node = node.createNode(node_type, node_name=name)
|
||||
else:
|
||||
new_node = node.createNode(node_type)
|
||||
|
||||
new_node.moveToGoodPosition()
|
||||
|
||||
try:
|
||||
input_node = next(i for i in node.allItems() if
|
||||
isinstance(i, hou.SubnetIndirectInput))
|
||||
except StopIteration:
|
||||
return new_node
|
||||
|
||||
new_node.setInput(0, input_node)
|
||||
return new_node
|
||||
|
||||
except Exception:
|
||||
raise RuntimeError("Could not created node type `%s` in node `%s`"
|
||||
% (node_type, node))
|
||||
15
pype/plugins/houdini/publish/collect_current_file.py
Normal file
15
pype/plugins/houdini/publish/collect_current_file.py
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
import hou
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectHoudiniCurrentFile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.5
|
||||
label = "Houdini Current File"
|
||||
hosts = ['houdini']
|
||||
|
||||
def process(self, context):
|
||||
"""Inject the current working file"""
|
||||
context.data['currentFile'] = hou.hipFile.path()
|
||||
66
pype/plugins/houdini/publish/collect_frames.py
Normal file
66
pype/plugins/houdini/publish/collect_frames.py
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
import os
|
||||
import re
|
||||
|
||||
import pyblish.api
|
||||
from pype.houdini import lib
|
||||
|
||||
|
||||
class CollectFrames(pyblish.api.InstancePlugin):
|
||||
"""Collect all frames which would be a resukl"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Frames"
|
||||
families = ["studio.vdbcache"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
ropnode = instance[0]
|
||||
|
||||
output_parm = lib.get_output_parameter(ropnode)
|
||||
output = output_parm.eval()
|
||||
|
||||
file_name = os.path.basename(output)
|
||||
match = re.match("(\w+)\.(\d+)\.vdb", file_name)
|
||||
result = file_name
|
||||
|
||||
start_frame = instance.data.get("startFrame", None)
|
||||
end_frame = instance.data.get("endFrame", None)
|
||||
|
||||
if match and start_frame is not None:
|
||||
|
||||
# Check if frames are bigger than 1 (file collection)
|
||||
# override the result
|
||||
if end_frame - start_frame > 1:
|
||||
result = self.create_file_list(match,
|
||||
int(start_frame),
|
||||
int(end_frame))
|
||||
|
||||
instance.data.update({"frames": result})
|
||||
|
||||
def create_file_list(self, match, start_frame, end_frame):
|
||||
"""Collect files based on frame range and regex.match
|
||||
|
||||
Args:
|
||||
match(re.match): match object
|
||||
start_frame(int): start of the animation
|
||||
end_frame(int): end of the animation
|
||||
|
||||
Returns:
|
||||
list
|
||||
|
||||
"""
|
||||
|
||||
result = []
|
||||
|
||||
padding = len(match.group(2))
|
||||
name = match.group(1)
|
||||
padding_format = "{number:0{width}d}"
|
||||
|
||||
count = start_frame
|
||||
while count <= end_frame:
|
||||
str_count = padding_format.format(number=count, width=padding)
|
||||
file_name = "{}.{}.vdb".format(name, str_count)
|
||||
result.append(file_name)
|
||||
count += 1
|
||||
|
||||
return result
|
||||
104
pype/plugins/houdini/publish/collect_instances.py
Normal file
104
pype/plugins/houdini/publish/collect_instances.py
Normal file
|
|
@ -0,0 +1,104 @@
|
|||
import hou
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from avalon.houdini import lib
|
||||
|
||||
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
"""Gather instances by all node in out graph and pre-defined attributes
|
||||
|
||||
This collector takes into account assets that are associated with
|
||||
an specific node and marked with a unique identifier;
|
||||
|
||||
Identifier:
|
||||
id (str): "pyblish.avalon.instance
|
||||
|
||||
Specific node:
|
||||
The specific node is important because it dictates in which way the subset
|
||||
is being exported.
|
||||
|
||||
alembic: will export Alembic file which supports cascading attributes
|
||||
like 'cbId' and 'path'
|
||||
geometry: Can export a wide range of file types, default out
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.01
|
||||
label = "Collect Instances"
|
||||
hosts = ["houdini"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
instances = []
|
||||
|
||||
nodes = hou.node("/out").children()
|
||||
for node in nodes:
|
||||
|
||||
if not node.parm("id"):
|
||||
continue
|
||||
|
||||
if node.evalParm("id") != "pyblish.avalon.instance":
|
||||
continue
|
||||
|
||||
has_family = node.evalParm("family")
|
||||
assert has_family, "'%s' is missing 'family'" % node.name()
|
||||
|
||||
data = lib.read(node)
|
||||
# Check bypass state and reverse
|
||||
data.update({"active": not node.isBypassed()})
|
||||
|
||||
# temporarily translation of `active` to `publish` till issue has
|
||||
# been resolved, https://github.com/pyblish/pyblish-base/issues/307
|
||||
if "active" in data:
|
||||
data["publish"] = data["active"]
|
||||
|
||||
data.update(self.get_frame_data(node))
|
||||
|
||||
# Create nice name
|
||||
# All nodes in the Outputs graph have the 'Valid Frame Range'
|
||||
# attribute, we check here if any frames are set
|
||||
label = data.get("name", node.name())
|
||||
if "startFrame" in data:
|
||||
frames = "[{startFrame} - {endFrame}]".format(**data)
|
||||
label = "{} {}".format(label, frames)
|
||||
|
||||
instance = context.create_instance(label)
|
||||
|
||||
instance[:] = [node]
|
||||
instance.data.update(data)
|
||||
|
||||
instances.append(instance)
|
||||
|
||||
def sort_by_family(instance):
|
||||
"""Sort by family"""
|
||||
return instance.data.get("families", instance.data.get("family"))
|
||||
|
||||
# Sort/grouped by family (preserving local index)
|
||||
context[:] = sorted(context, key=sort_by_family)
|
||||
|
||||
return context
|
||||
|
||||
def get_frame_data(self, node):
|
||||
"""Get the frame data: start frame, end frame and steps
|
||||
Args:
|
||||
node(hou.Node)
|
||||
|
||||
Returns:
|
||||
dict
|
||||
|
||||
"""
|
||||
|
||||
data = {}
|
||||
|
||||
if node.parm("trange") is None:
|
||||
return data
|
||||
|
||||
if node.evalParm("trange") == 0:
|
||||
return data
|
||||
|
||||
data["startFrame"] = node.evalParm("f1")
|
||||
data["endFrame"] = node.evalParm("f2")
|
||||
data["steps"] = node.evalParm("f3")
|
||||
|
||||
return data
|
||||
27
pype/plugins/houdini/publish/collect_output_node.py
Normal file
27
pype/plugins/houdini/publish/collect_output_node.py
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectOutputNode(pyblish.api.InstancePlugin):
|
||||
"""Collect the out node which of the instance"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
families = ["*"]
|
||||
hosts = ["houdini"]
|
||||
label = "Collect Output Node"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
import hou
|
||||
|
||||
node = instance[0]
|
||||
|
||||
# Get sop path
|
||||
if node.type().name() == "alembic":
|
||||
sop_path_parm = "sop_path"
|
||||
else:
|
||||
sop_path_parm = "soppath"
|
||||
|
||||
sop_path = node.parm(sop_path_parm).eval()
|
||||
out_node = hou.node(sop_path)
|
||||
|
||||
instance.data["output_node"] = out_node
|
||||
32
pype/plugins/houdini/publish/extract_alembic.py
Normal file
32
pype/plugins/houdini/publish/extract_alembic.py
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
import pype.api
|
||||
|
||||
|
||||
class ExtractAlembic(pype.api.Extractor):
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Alembic"
|
||||
hosts = ["houdini"]
|
||||
families = ["studio.pointcache", "studio.camera"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
ropnode = instance[0]
|
||||
|
||||
# Get the filename from the filename parameter
|
||||
output = ropnode.evalParm("filename")
|
||||
staging_dir = os.path.dirname(output)
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
file_name = os.path.basename(output)
|
||||
|
||||
# We run the render
|
||||
self.log.info("Writing alembic '%s' to '%s'" % (file_name, staging_dir))
|
||||
ropnode.render()
|
||||
|
||||
if "files" not in instance.data:
|
||||
instance.data["files"] = []
|
||||
|
||||
instance.data["files"].append(file_name)
|
||||
36
pype/plugins/houdini/publish/extract_vdb_cache.py
Normal file
36
pype/plugins/houdini/publish/extract_vdb_cache.py
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
import pype.api
|
||||
|
||||
|
||||
class ExtractVDBCache(pype.api.Extractor):
|
||||
|
||||
order = pyblish.api.ExtractorOrder + 0.1
|
||||
label = "Extract VDB Cache"
|
||||
families = ["studio.vdbcache"]
|
||||
hosts = ["houdini"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
ropnode = instance[0]
|
||||
|
||||
# Get the filename from the filename parameter
|
||||
# `.evalParm(parameter)` will make sure all tokens are resolved
|
||||
sop_output = ropnode.evalParm("sopoutput")
|
||||
staging_dir = os.path.normpath(os.path.dirname(sop_output))
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
if instance.data.get("executeBackground", True):
|
||||
self.log.info("Creating background task..")
|
||||
ropnode.parm("executebackground").pressButton()
|
||||
self.log.info("Finished")
|
||||
else:
|
||||
ropnode.render()
|
||||
|
||||
if "files" not in instance.data:
|
||||
instance.data["files"] = []
|
||||
|
||||
output = instance.data["frames"]
|
||||
|
||||
instance.data["files"].append(output)
|
||||
46
pype/plugins/houdini/publish/valiate_vdb_input_node.py
Normal file
46
pype/plugins/houdini/publish/valiate_vdb_input_node.py
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
import pyblish.api
|
||||
import pype.api
|
||||
|
||||
|
||||
class ValidateVDBInputNode(pyblish.api.InstancePlugin):
|
||||
"""Validate that the node connected to the output node is of type VDB
|
||||
|
||||
Regardless of the amount of VDBs create the output will need to have an
|
||||
equal amount of VDBs, points, primitives and vertices
|
||||
|
||||
A VDB is an inherited type of Prim, holds the following data:
|
||||
- Primitives: 1
|
||||
- Points: 1
|
||||
- Vertices: 1
|
||||
- VDBs: 1
|
||||
|
||||
"""
|
||||
|
||||
order = pype.api.ValidateContentsOrder + 0.1
|
||||
families = ["studio.vdbcache"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Input Node (VDB)"
|
||||
|
||||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Node connected to the output node is not"
|
||||
"of type VDB!")
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
node = instance.data["output_node"]
|
||||
|
||||
prims = node.geometry().prims()
|
||||
nr_of_prims = len(prims)
|
||||
|
||||
nr_of_points = len(node.geometry().points())
|
||||
if nr_of_points != nr_of_prims:
|
||||
cls.log.error("The number of primitives and points do not match")
|
||||
return [instance]
|
||||
|
||||
for prim in prims:
|
||||
if prim.numVertices() != 1:
|
||||
cls.log.error("Found primitive with more than 1 vertex!")
|
||||
return [instance]
|
||||
37
pype/plugins/houdini/publish/validate_alembic_input_node.py
Normal file
37
pype/plugins/houdini/publish/validate_alembic_input_node.py
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
import pyblish.api
|
||||
import pype.api
|
||||
|
||||
|
||||
class ValidateAlembicInputNode(pyblish.api.InstancePlugin):
|
||||
"""Validate that the node connected to the output is correct
|
||||
|
||||
The connected node cannot be of the following types for Alembic:
|
||||
- VDB
|
||||
- Volumne
|
||||
|
||||
"""
|
||||
|
||||
order = pype.api.ValidateContentsOrder + 0.1
|
||||
families = ["studio.pointcache"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Input Node (Abc)"
|
||||
|
||||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Node connected to the output node incorrect")
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
invalid_nodes = ["VDB", "Volume"]
|
||||
node = instance.data["output_node"]
|
||||
|
||||
prims = node.geometry().prims()
|
||||
|
||||
for prim in prims:
|
||||
prim_type = prim.type().name()
|
||||
if prim_type in invalid_nodes:
|
||||
cls.log.error("Found a primitive which is of type '%s' !"
|
||||
% prim_type)
|
||||
return [instance]
|
||||
50
pype/plugins/houdini/publish/validate_animation_settings.py
Normal file
50
pype/plugins/houdini/publish/validate_animation_settings.py
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
import pyblish.api
|
||||
|
||||
from pype.houdini import lib
|
||||
|
||||
|
||||
class ValidateAnimationSettings(pyblish.api.InstancePlugin):
|
||||
"""Validate if the unexpanded string contains the frame ('$F') token
|
||||
|
||||
This validator will only check the output parameter of the node if
|
||||
the Valid Frame Range is not set to 'Render Current Frame'
|
||||
|
||||
Rules:
|
||||
If you render out a frame range it is mandatory to have the
|
||||
frame token - '$F4' or similar - to ensure that each frame gets
|
||||
written. If this is not the case you will override the same file
|
||||
every time a frame is written out.
|
||||
|
||||
Examples:
|
||||
Good: 'my_vbd_cache.$F4.vdb'
|
||||
Bad: 'my_vbd_cache.vdb'
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Frame Settings"
|
||||
families = ["studio.vdbcache"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Output settings do no match for '%s'" %
|
||||
instance)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
node = instance[0]
|
||||
|
||||
# Check trange parm, 0 means Render Current Frame
|
||||
frame_range = node.evalParm("trange")
|
||||
if frame_range == 0:
|
||||
return []
|
||||
|
||||
output_parm = lib.get_output_parameter(node)
|
||||
unexpanded_str = output_parm.unexpandedString()
|
||||
|
||||
if "$F" not in unexpanded_str:
|
||||
cls.log.error("No frame token found in '%s'" % node.path())
|
||||
return [instance]
|
||||
38
pype/plugins/houdini/publish/validate_mkpaths_toggled.py
Normal file
38
pype/plugins/houdini/publish/validate_mkpaths_toggled.py
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
import pyblish.api
|
||||
import pype.api
|
||||
|
||||
|
||||
class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin):
|
||||
"""Validate if node attribute Create intermediate Directories is turned on
|
||||
|
||||
Rules:
|
||||
* The node must have Create intermediate Directories turned on to
|
||||
ensure the output file will be created
|
||||
|
||||
"""
|
||||
|
||||
order = pype.api.ValidateContentsOrder
|
||||
families = ["studio.pointcache']
|
||||
hosts = ['houdini']
|
||||
label = 'Create Intermediate Directories Checked'
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Found ROP nodes with Create Intermediate "
|
||||
"Directories turned off")
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
result = []
|
||||
|
||||
for node in instance[:]:
|
||||
if node.parm("mkpath").eval() != 1:
|
||||
cls.log.error("Invalid settings found on `%s`" % node.path())
|
||||
result.append(node.path())
|
||||
|
||||
return result
|
||||
|
||||
|
||||
50
pype/plugins/houdini/publish/validate_outnode_exists.py
Normal file
50
pype/plugins/houdini/publish/validate_outnode_exists.py
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
import pyblish.api
|
||||
import pype.api
|
||||
|
||||
|
||||
class ValidatOutputNodeExists(pyblish.api.InstancePlugin):
|
||||
"""Validate if node attribute Create intermediate Directories is turned on
|
||||
|
||||
Rules:
|
||||
* The node must have Create intermediate Directories turned on to
|
||||
ensure the output file will be created
|
||||
|
||||
"""
|
||||
|
||||
order = pype.api.ValidateContentsOrder
|
||||
families = ["*"]
|
||||
hosts = ['houdini']
|
||||
label = "Output Node Exists"
|
||||
|
||||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Could not find output node(s)!")
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
import hou
|
||||
|
||||
result = set()
|
||||
|
||||
node = instance[0]
|
||||
if node.type().name() == "alembic":
|
||||
soppath_parm = "sop_path"
|
||||
else:
|
||||
# Fall back to geometry node
|
||||
soppath_parm = "soppath"
|
||||
|
||||
sop_path = node.parm(soppath_parm).eval()
|
||||
output_node = hou.node(sop_path)
|
||||
|
||||
if output_node is None:
|
||||
cls.log.error("Node at '%s' does not exist" % sop_path)
|
||||
result.add(node.path())
|
||||
|
||||
# Added cam as this is a legit output type (cameras can't
|
||||
if output_node.type().name() not in ["output", "cam"]:
|
||||
cls.log.error("SOP Path does not end path at output node")
|
||||
result.add(node.path())
|
||||
|
||||
return result
|
||||
45
pype/plugins/houdini/publish/validate_output_node.py
Normal file
45
pype/plugins/houdini/publish/validate_output_node.py
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class ValidateOutputNode(pyblish.api.InstancePlugin):
|
||||
"""Validate if output node:
|
||||
- exists
|
||||
- is of type 'output'
|
||||
- has an input"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["*"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Output Node"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Output node(s) `%s` are incorrect" % invalid)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
output_node = instance.data["output_node"]
|
||||
|
||||
if output_node is None:
|
||||
node = instance[0]
|
||||
cls.log.error("Output node at '%s' does not exist, see source" %
|
||||
node.path())
|
||||
|
||||
return node.path()
|
||||
|
||||
# Check if type is correct
|
||||
type_name = output_node.type().name()
|
||||
if type_name not in ["output", "cam"]:
|
||||
cls.log.error("Output node `%s` is not an accepted type `output` "
|
||||
"or `camera`" %
|
||||
output_node.path())
|
||||
return [output_node.path()]
|
||||
|
||||
# Check if output node has incoming connections
|
||||
if type_name == "output" and not output_node.inputConnections():
|
||||
cls.log.error("Output node `%s` has no incoming connections"
|
||||
% output_node.path())
|
||||
return [output_node.path()]
|
||||
|
|
@ -9,7 +9,7 @@ class CreateAnimation(avalon.maya.Creator):
|
|||
|
||||
name = "animationDefault"
|
||||
label = "Animation"
|
||||
family = "animation"
|
||||
family = "studio.animation"
|
||||
icon = "male"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ class CreateCamera(avalon.maya.Creator):
|
|||
|
||||
name = "cameraDefault"
|
||||
label = "Camera"
|
||||
family = "camera"
|
||||
family = "studio.camera"
|
||||
icon = "video-camera"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ class CreateLook(avalon.maya.Creator):
|
|||
|
||||
name = "look"
|
||||
label = "Look"
|
||||
family = "look"
|
||||
family = "studio.look"
|
||||
icon = "paint-brush"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
|
|
|||
|
|
@ -6,5 +6,5 @@ class CreateMayaAscii(avalon.maya.Creator):
|
|||
|
||||
name = "mayaAscii"
|
||||
label = "Maya Ascii"
|
||||
family = "pype.mayaAscii"
|
||||
family = "studio.mayaAscii"
|
||||
icon = "file-archive-o"
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
from collections import OrderedDict
|
||||
|
||||
import avalon.maya
|
||||
|
||||
|
||||
|
|
@ -6,5 +8,16 @@ class CreateModel(avalon.maya.Creator):
|
|||
|
||||
name = "modelDefault"
|
||||
label = "Model"
|
||||
family = "model"
|
||||
family = "studio.model"
|
||||
icon = "cube"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateModel, self).__init__(*args, **kwargs)
|
||||
|
||||
# create an ordered dict with the existing data first
|
||||
data = OrderedDict(**self.data)
|
||||
|
||||
# Write vertex colors with the geometry.
|
||||
data["writeColorSets"] = True
|
||||
|
||||
self.data = data
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ class CreatePointCache(avalon.maya.Creator):
|
|||
|
||||
name = "pointcache"
|
||||
label = "Point Cache"
|
||||
family = "pointcache"
|
||||
family = "studio.pointcache"
|
||||
icon = "gears"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ from avalon import api
|
|||
class CreateRenderGlobals(avalon.maya.Creator):
|
||||
|
||||
label = "Render Globals"
|
||||
family = "renderglobals"
|
||||
family = "studio.renderglobals"
|
||||
icon = "gears"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ class CreateRig(avalon.maya.Creator):
|
|||
|
||||
name = "rigDefault"
|
||||
label = "Rig"
|
||||
family = "rig"
|
||||
family = "studio.rig"
|
||||
icon = "wheelchair"
|
||||
|
||||
def process(self):
|
||||
|
|
|
|||
|
|
@ -6,5 +6,5 @@ class CreateSetDress(avalon.maya.Creator):
|
|||
|
||||
name = "setdress"
|
||||
label = "Set Dress"
|
||||
family = "setdress"
|
||||
family = "studio.setdress"
|
||||
icon = "cubes"
|
||||
|
|
@ -8,7 +8,7 @@ class CreateVrayProxy(avalon.maya.Creator):
|
|||
|
||||
name = "vrayproxy"
|
||||
label = "VRay Proxy"
|
||||
family = "vrayproxy"
|
||||
family = "studio.vrayproxy"
|
||||
icon = "gears"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
|
@ -20,4 +20,7 @@ class CreateVrayProxy(avalon.maya.Creator):
|
|||
data["startFrame"] = 1
|
||||
data["endFrame"] = 1
|
||||
|
||||
# Write vertex colors
|
||||
data["vertexColors"] = False
|
||||
|
||||
self.data.update(data)
|
||||
|
|
|
|||
|
|
@ -9,17 +9,18 @@ class CreateYetiCache(avalon.maya.Creator):
|
|||
|
||||
name = "yetiDefault"
|
||||
label = "Yeti Cache"
|
||||
family = "yeticache"
|
||||
family = "studio.yeticache"
|
||||
icon = "pagelines"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateYetiCache, self).__init__(*args, **kwargs)
|
||||
|
||||
data = OrderedDict(self.data)
|
||||
data = OrderedDict(**self.data)
|
||||
data["peroll"] = 0
|
||||
|
||||
anim_data = lib.collect_animation_data()
|
||||
data.update({"startFrame": anim_data["startFrame"],
|
||||
"endFrame": anim_data["endFrame"]})
|
||||
"endFrame": anim_data["endFrame"],
|
||||
"samples": 3})
|
||||
|
||||
self.data = data
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ class CreateYetiRig(avalon.maya.Creator):
|
|||
"""Output for procedural plugin nodes ( Yeti / XGen / etc)"""
|
||||
|
||||
label = "Yeti Rig"
|
||||
family = "yetiRig"
|
||||
family = "studio.yetiRig"
|
||||
icon = "usb"
|
||||
|
||||
def process(self):
|
||||
|
|
|
|||
|
|
@ -4,9 +4,9 @@ import pype.maya.plugin
|
|||
class AbcLoader(pype.maya.plugin.ReferenceLoader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
|
||||
families = ["animation",
|
||||
"camera",
|
||||
"pointcache"]
|
||||
families = ["studio.animation",
|
||||
"studio.camera",
|
||||
"studio.pointcache"]
|
||||
representations = ["abc"]
|
||||
|
||||
label = "Reference animation"
|
||||
|
|
@ -42,7 +42,7 @@ class AbcLoader(pype.maya.plugin.ReferenceLoader):
|
|||
reference=True,
|
||||
returnNewNodes=True)
|
||||
|
||||
# load studio ID attribute
|
||||
# load colorbleed ID attribute
|
||||
self[:] = nodes
|
||||
|
||||
return nodes
|
||||
|
|
|
|||
|
|
@ -8,9 +8,9 @@ from avalon import api
|
|||
class SetFrameRangeLoader(api.Loader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
|
||||
families = ["animation",
|
||||
"camera",
|
||||
"pointcache"]
|
||||
families = ["studio.animation",
|
||||
"studio.camera",
|
||||
"studio.pointcache"]
|
||||
representations = ["abc"]
|
||||
|
||||
label = "Set frame range"
|
||||
|
|
@ -42,9 +42,9 @@ class SetFrameRangeLoader(api.Loader):
|
|||
class SetFrameRangeWithHandlesLoader(api.Loader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
|
||||
families = ["animation",
|
||||
"camera",
|
||||
"pointcache"]
|
||||
families = ["studio.animation",
|
||||
"studio.camera",
|
||||
"studio.pointcache"]
|
||||
representations = ["abc"]
|
||||
|
||||
label = "Set frame range (with handles)"
|
||||
|
|
|
|||
|
|
@ -2,10 +2,10 @@ import pype.maya.plugin
|
|||
|
||||
|
||||
class AbcLoader(pype.maya.plugin.ReferenceLoader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
"""Specific loader of Alembic for the studio.animation family"""
|
||||
|
||||
families = ["animation",
|
||||
"pointcache"]
|
||||
families = ["studio.animation",
|
||||
"studio.pointcache"]
|
||||
label = "Reference animation"
|
||||
representations = ["abc"]
|
||||
order = -10
|
||||
|
|
|
|||
|
|
@ -2,9 +2,9 @@ import pype.maya.plugin
|
|||
|
||||
|
||||
class CameraLoader(pype.maya.plugin.ReferenceLoader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
"""Specific loader of Alembic for the studio.camera family"""
|
||||
|
||||
families = ["camera"]
|
||||
families = ["studio.camera"]
|
||||
label = "Reference camera"
|
||||
representations = ["abc", "ma"]
|
||||
order = -10
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import pype.maya.plugin
|
|||
class LookLoader(pype.maya.plugin.ReferenceLoader):
|
||||
"""Specific loader for lookdev"""
|
||||
|
||||
families = ["look"]
|
||||
families = ["studio.look"]
|
||||
representations = ["ma"]
|
||||
|
||||
label = "Reference look"
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import pype.maya.plugin
|
|||
class MayaAsciiLoader(pype.maya.plugin.ReferenceLoader):
|
||||
"""Load the model"""
|
||||
|
||||
families = ["pype.mayaAscii"]
|
||||
families = ["studio.mayaAscii"]
|
||||
representations = ["ma"]
|
||||
|
||||
label = "Reference Maya Ascii"
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import pype.maya.plugin
|
|||
class ModelLoader(pype.maya.plugin.ReferenceLoader):
|
||||
"""Load the model"""
|
||||
|
||||
families = ["model"]
|
||||
families = ["studio.model"]
|
||||
representations = ["ma"]
|
||||
|
||||
label = "Reference Model"
|
||||
|
|
@ -37,7 +37,7 @@ class ModelLoader(pype.maya.plugin.ReferenceLoader):
|
|||
class GpuCacheLoader(api.Loader):
|
||||
"""Load model Alembic as gpuCache"""
|
||||
|
||||
families = ["model"]
|
||||
families = ["studio.model"]
|
||||
representations = ["abc"]
|
||||
|
||||
label = "Import Gpu Cache"
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ class RigLoader(pype.maya.plugin.ReferenceLoader):
|
|||
|
||||
"""
|
||||
|
||||
families = ["rig"]
|
||||
families = ["studio.rig"]
|
||||
representations = ["ma"]
|
||||
|
||||
label = "Reference rig"
|
||||
|
|
@ -62,7 +62,7 @@ class RigLoader(pype.maya.plugin.ReferenceLoader):
|
|||
cmds.select([output, controls] + roots, noExpand=True)
|
||||
api.create(name=namespace,
|
||||
asset=asset,
|
||||
family="animation",
|
||||
family="studio.animation",
|
||||
options={"useSelection": True},
|
||||
data={"dependencies": dependency})
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from avalon import api
|
|||
|
||||
class SetDressLoader(api.Loader):
|
||||
|
||||
families = ["setdress"]
|
||||
families = ["studio.setdress"]
|
||||
representations = ["json"]
|
||||
|
||||
label = "Load Set Dress"
|
||||
|
|
@ -23,7 +23,7 @@ class SetDressLoader(api.Loader):
|
|||
suffix="_",
|
||||
)
|
||||
|
||||
from config import setdress_api
|
||||
from pype import setdress_api
|
||||
|
||||
containers = setdress_api.load_package(filepath=self.fname,
|
||||
name=name,
|
||||
|
|
@ -45,7 +45,7 @@ class SetDressLoader(api.Loader):
|
|||
|
||||
def update(self, container, representation):
|
||||
|
||||
from config import setdress_api
|
||||
from pype import setdress_api
|
||||
return setdress_api.update_package(container,
|
||||
representation)
|
||||
|
||||
|
|
@ -53,7 +53,7 @@ class SetDressLoader(api.Loader):
|
|||
"""Remove all sub containers"""
|
||||
|
||||
from avalon import api
|
||||
from config import setdress_api
|
||||
from pype import setdress_api
|
||||
import maya.cmds as cmds
|
||||
|
||||
# Remove all members
|
||||
|
|
|
|||
69
pype/plugins/maya/load/load_vdb_to_redshift.py
Normal file
69
pype/plugins/maya/load/load_vdb_to_redshift.py
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
from avalon import api
|
||||
|
||||
|
||||
class LoadVDBtoRedShift(api.Loader):
|
||||
"""Load OpenVDB in a Redshift Volume Shape"""
|
||||
|
||||
families = ["studio.vdbcache"]
|
||||
representations = ["vdb"]
|
||||
|
||||
label = "Load VDB to RedShift"
|
||||
icon = "cloud"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
|
||||
from maya import cmds
|
||||
import avalon.maya.lib as lib
|
||||
from avalon.maya.pipeline import containerise
|
||||
|
||||
# Check if the plugin for redshift is available on the pc
|
||||
try:
|
||||
cmds.loadPlugin("redshift4maya", quiet=True)
|
||||
except Exception as exc:
|
||||
self.log.error("Encountered exception:\n%s" % exc)
|
||||
return
|
||||
|
||||
# Check if viewport drawing engine is Open GL Core (compat)
|
||||
render_engine = None
|
||||
compatible = "OpenGL"
|
||||
if cmds.optionVar(exists="vp2RenderingEngine"):
|
||||
render_engine = cmds.optionVar(query="vp2RenderingEngine")
|
||||
|
||||
if not render_engine or not render_engine.startswith(compatible):
|
||||
raise RuntimeError("Current scene's settings are incompatible."
|
||||
"See Preferences > Display > Viewport 2.0 to "
|
||||
"set the render engine to '%s<type>'"
|
||||
% compatible)
|
||||
|
||||
asset = context['asset']
|
||||
|
||||
asset_name = asset["name"]
|
||||
namespace = namespace or lib.unique_namespace(
|
||||
asset_name + "_",
|
||||
prefix="_" if asset_name[0].isdigit() else "",
|
||||
suffix="_",
|
||||
)
|
||||
|
||||
# Root group
|
||||
label = "{}:{}".format(namespace, name)
|
||||
root = cmds.group(name=label, empty=True)
|
||||
|
||||
# Create VR
|
||||
volume_node = cmds.createNode("RedshiftVolumeShape",
|
||||
name="{}RVSShape".format(label),
|
||||
parent=root)
|
||||
|
||||
cmds.setAttr("{}.fileName".format(volume_node),
|
||||
self.fname,
|
||||
type="string")
|
||||
|
||||
nodes = [root, volume_node]
|
||||
self[:] = nodes
|
||||
|
||||
return containerise(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
nodes=nodes,
|
||||
context=context,
|
||||
loader=self.__class__.__name__)
|
||||
62
pype/plugins/maya/load/load_vdb_to_vray.py
Normal file
62
pype/plugins/maya/load/load_vdb_to_vray.py
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
from avalon import api
|
||||
|
||||
|
||||
class LoadVDBtoVRay(api.Loader):
|
||||
|
||||
families = ["studio.vdbcache"]
|
||||
representations = ["vdb"]
|
||||
|
||||
label = "Load VDB to VRay"
|
||||
icon = "cloud"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
|
||||
from maya import cmds
|
||||
import avalon.maya.lib as lib
|
||||
from avalon.maya.pipeline import containerise
|
||||
|
||||
# Check if viewport drawing engine is Open GL Core (compat)
|
||||
render_engine = None
|
||||
compatible = "OpenGLCoreProfileCompat"
|
||||
if cmds.optionVar(exists="vp2RenderingEngine"):
|
||||
render_engine = cmds.optionVar(query="vp2RenderingEngine")
|
||||
|
||||
if not render_engine or render_engine != compatible:
|
||||
raise RuntimeError("Current scene's settings are incompatible."
|
||||
"See Preferences > Display > Viewport 2.0 to "
|
||||
"set the render engine to '%s'" % compatible)
|
||||
|
||||
asset = context['asset']
|
||||
version = context["version"]
|
||||
|
||||
asset_name = asset["name"]
|
||||
namespace = namespace or lib.unique_namespace(
|
||||
asset_name + "_",
|
||||
prefix="_" if asset_name[0].isdigit() else "",
|
||||
suffix="_",
|
||||
)
|
||||
|
||||
# Root group
|
||||
label = "{}:{}".format(namespace, name)
|
||||
root = cmds.group(name=label, empty=True)
|
||||
|
||||
# Create VR
|
||||
grid_node = cmds.createNode("VRayVolumeGrid",
|
||||
name="{}VVGShape".format(label),
|
||||
parent=root)
|
||||
|
||||
# Set attributes
|
||||
cmds.setAttr("{}.inFile".format(grid_node), self.fname, type="string")
|
||||
cmds.setAttr("{}.inReadOffset".format(grid_node),
|
||||
version["startFrames"])
|
||||
|
||||
nodes = [root, grid_node]
|
||||
self[:] = nodes
|
||||
|
||||
return containerise(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
nodes=nodes,
|
||||
context=context,
|
||||
loader=self.__class__.__name__)
|
||||
|
|
@ -7,7 +7,7 @@ import maya.cmds as cmds
|
|||
class VRayProxyLoader(api.Loader):
|
||||
"""Load VRayMesh proxy"""
|
||||
|
||||
families = ["vrayproxy"]
|
||||
families = ["studio.vrayproxy"]
|
||||
representations = ["vrmesh"]
|
||||
|
||||
label = "Import VRay Proxy"
|
||||
|
|
@ -101,10 +101,12 @@ class VRayProxyLoader(api.Loader):
|
|||
# Create nodes
|
||||
vray_mesh = cmds.createNode('VRayMesh', name="{}_VRMS".format(name))
|
||||
mesh_shape = cmds.createNode("mesh", name="{}_GEOShape".format(name))
|
||||
vray_mat = cmds.createNode("VRayMeshMaterial",
|
||||
vray_mat = cmds.shadingNode("VRayMeshMaterial", asShader=True,
|
||||
name="{}_VRMM".format(name))
|
||||
vray_mat_sg = cmds.createNode("shadingEngine",
|
||||
name="{}_VRSG".format(name))
|
||||
vray_mat_sg = cmds.sets(name="{}_VRSG".format(name),
|
||||
empty=True,
|
||||
renderable=True,
|
||||
noSurfaceShader=True)
|
||||
|
||||
cmds.setAttr("{}.fileName".format(vray_mesh),
|
||||
filename,
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ from pype.maya import lib
|
|||
|
||||
class YetiCacheLoader(api.Loader):
|
||||
|
||||
families = ["yeticache", "yetiRig"]
|
||||
families = ["studio.yeticache", "studio.yetiRig"]
|
||||
representations = ["fur"]
|
||||
|
||||
label = "Load Yeti Cache"
|
||||
|
|
@ -284,6 +284,8 @@ class YetiCacheLoader(api.Loader):
|
|||
|
||||
# Apply attributes to pgYetiMaya node
|
||||
for attr, value in attributes.items():
|
||||
if value is None:
|
||||
continue
|
||||
lib.set_attribute(attr, value, yeti_node)
|
||||
|
||||
# Fix for : YETI-6
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ import pype.maya.plugin
|
|||
|
||||
class YetiRigLoader(pype.maya.plugin.ReferenceLoader):
|
||||
|
||||
families = ["yetiRig"]
|
||||
families = ["studio.yetiRig"]
|
||||
representations = ["ma"]
|
||||
|
||||
label = "Load Yeti Rig"
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.4
|
||||
families = ["animation"]
|
||||
families = ["studio.animation"]
|
||||
label = "Collect Animation Output Geometry"
|
||||
hosts = ["maya"]
|
||||
|
||||
|
|
@ -29,8 +29,11 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin):
|
|||
out_set = next((i for i in instance.data["setMembers"] if
|
||||
i.endswith("out_SET")), None)
|
||||
|
||||
assert out_set, ("Expecting out_SET for instance of family"
|
||||
" '%s'" % family)
|
||||
if out_set is None:
|
||||
warning = "Expecting out_SET for instance of family '%s'" % family
|
||||
self.log.warning(warning)
|
||||
return
|
||||
|
||||
members = cmds.ls(cmds.sets(out_set, query=True), long=True)
|
||||
|
||||
# Get all the relatives of the members
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ class CollectMayaHistory(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.CollectorOrder + 0.1
|
||||
hosts = ["maya"]
|
||||
label = "Maya History"
|
||||
families = ["rig"]
|
||||
families = ["studio.rig"]
|
||||
verbose = False
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -103,10 +103,24 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
members_hierarchy = list(set(members + children + parents))
|
||||
|
||||
# Create the instance
|
||||
name = cmds.ls(objset, long=False)[0] # use short name
|
||||
instance = context.create_instance(data.get("name", name))
|
||||
instance = context.create_instance(objset)
|
||||
instance[:] = members_hierarchy
|
||||
|
||||
# Store the exact members of the object set
|
||||
instance.data["setMembers"] = members
|
||||
|
||||
# Define nice label
|
||||
name = cmds.ls(objset, long=False)[0] # use short name
|
||||
label = "{0} ({1})".format(name,
|
||||
data["asset"])
|
||||
|
||||
# Append start frame and end frame to label if present
|
||||
if "startFrame" and "endFrame" in data:
|
||||
label += " [{0}-{1}]".format(int(data["startFrame"]),
|
||||
int(data["endFrame"]))
|
||||
|
||||
instance.data["label"] = label
|
||||
|
||||
instance.data.update(data)
|
||||
|
||||
# Produce diagnostic message for any graphical
|
||||
|
|
|
|||
|
|
@ -1,7 +1,10 @@
|
|||
import re
|
||||
import os
|
||||
import glob
|
||||
|
||||
from maya import cmds
|
||||
import pyblish.api
|
||||
import pype.maya.lib as lib
|
||||
from cb.utils.maya import context, shaders
|
||||
|
||||
SHAPE_ATTRS = ["castsShadows",
|
||||
"receiveShadows",
|
||||
|
|
@ -48,6 +51,139 @@ def get_look_attrs(node):
|
|||
return result
|
||||
|
||||
|
||||
def node_uses_image_sequence(node):
|
||||
"""Return whether file node uses an image sequence or single image.
|
||||
|
||||
Determine if a node uses an image sequence or just a single image,
|
||||
not always obvious from its file path alone.
|
||||
|
||||
Args:
|
||||
node (str): Name of the Maya node
|
||||
|
||||
Returns:
|
||||
bool: True if node uses an image sequence
|
||||
|
||||
"""
|
||||
|
||||
# useFrameExtension indicates an explicit image sequence
|
||||
node_path = get_file_node_path(node).lower()
|
||||
|
||||
# The following tokens imply a sequence
|
||||
patterns = ["<udim>", "<tile>", "<uvtile>", "u<u>_v<v>", "<frame0"]
|
||||
|
||||
return (cmds.getAttr('%s.useFrameExtension' % node) or
|
||||
any(pattern in node_path for pattern in patterns))
|
||||
|
||||
|
||||
def seq_to_glob(path):
|
||||
"""Takes an image sequence path and returns it in glob format,
|
||||
with the frame number replaced by a '*'.
|
||||
|
||||
Image sequences may be numerical sequences, e.g. /path/to/file.1001.exr
|
||||
will return as /path/to/file.*.exr.
|
||||
|
||||
Image sequences may also use tokens to denote sequences, e.g.
|
||||
/path/to/texture.<UDIM>.tif will return as /path/to/texture.*.tif.
|
||||
|
||||
Args:
|
||||
path (str): the image sequence path
|
||||
|
||||
Returns:
|
||||
str: Return glob string that matches the filename pattern.
|
||||
|
||||
"""
|
||||
|
||||
if path is None:
|
||||
return path
|
||||
|
||||
# If any of the patterns, convert the pattern
|
||||
patterns = {
|
||||
"<udim>": "<udim>",
|
||||
"<tile>": "<tile>",
|
||||
"<uvtile>": "<uvtile>",
|
||||
"#": "#",
|
||||
"u<u>_v<v>": "<u>|<v>",
|
||||
"<frame0": "<frame0\d+>",
|
||||
"<f>": "<f>"
|
||||
}
|
||||
|
||||
lower = path.lower()
|
||||
has_pattern = False
|
||||
for pattern, regex_pattern in patterns.items():
|
||||
if pattern in lower:
|
||||
path = re.sub(regex_pattern, "*", path, flags=re.IGNORECASE)
|
||||
has_pattern = True
|
||||
|
||||
if has_pattern:
|
||||
return path
|
||||
|
||||
base = os.path.basename(path)
|
||||
matches = list(re.finditer(r'\d+', base))
|
||||
if matches:
|
||||
match = matches[-1]
|
||||
new_base = '{0}*{1}'.format(base[:match.start()],
|
||||
base[match.end():])
|
||||
head = os.path.dirname(path)
|
||||
return os.path.join(head, new_base)
|
||||
else:
|
||||
return path
|
||||
|
||||
|
||||
def get_file_node_path(node):
|
||||
"""Get the file path used by a Maya file node.
|
||||
|
||||
Args:
|
||||
node (str): Name of the Maya file node
|
||||
|
||||
Returns:
|
||||
str: the file path in use
|
||||
|
||||
"""
|
||||
# if the path appears to be sequence, use computedFileTextureNamePattern,
|
||||
# this preserves the <> tag
|
||||
if cmds.attributeQuery('computedFileTextureNamePattern',
|
||||
node=node,
|
||||
exists=True):
|
||||
plug = '{0}.computedFileTextureNamePattern'.format(node)
|
||||
texture_pattern = cmds.getAttr(plug)
|
||||
|
||||
patterns = ["<udim>",
|
||||
"<tile>",
|
||||
"u<u>_v<v>",
|
||||
"<f>",
|
||||
"<frame0",
|
||||
"<uvtile>"]
|
||||
lower = texture_pattern.lower()
|
||||
if any(pattern in lower for pattern in patterns):
|
||||
return texture_pattern
|
||||
|
||||
# otherwise use fileTextureName
|
||||
return cmds.getAttr('{0}.fileTextureName'.format(node))
|
||||
|
||||
|
||||
def get_file_node_files(node):
|
||||
"""Return the file paths related to the file node
|
||||
|
||||
Note:
|
||||
Will only return existing files. Returns an empty list
|
||||
if not valid existing files are linked.
|
||||
|
||||
Returns:
|
||||
list: List of full file paths.
|
||||
|
||||
"""
|
||||
|
||||
path = get_file_node_path(node)
|
||||
path = cmds.workspace(expandName=path)
|
||||
if node_uses_image_sequence(node):
|
||||
glob_pattern = seq_to_glob(path)
|
||||
return glob.glob(glob_pattern)
|
||||
elif os.path.exists(path):
|
||||
return [path]
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
class CollectLook(pyblish.api.InstancePlugin):
|
||||
"""Collect look data for instance.
|
||||
|
||||
|
|
@ -67,14 +203,14 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.4
|
||||
families = ["look"]
|
||||
families = ["studio.look"]
|
||||
label = "Collect Look"
|
||||
hosts = ["maya"]
|
||||
|
||||
def process(self, instance):
|
||||
"""Collect the Look in the instance with the correct layer settings"""
|
||||
|
||||
with context.renderlayer(instance.data["renderlayer"]):
|
||||
with lib.renderlayer(instance.data["renderlayer"]):
|
||||
self.collect(instance)
|
||||
|
||||
def collect(self, instance):
|
||||
|
|
@ -268,7 +404,7 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
# paths as the computed patterns
|
||||
source = source.replace("\\", "/")
|
||||
|
||||
files = shaders.get_file_node_files(node)
|
||||
files = get_file_node_files(node)
|
||||
if len(files) == 0:
|
||||
self.log.error("No valid files found from node `%s`" % node)
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ class CollectModelData(pyblish.api.InstancePlugin):
|
|||
|
||||
order = pyblish.api.CollectorOrder + 0.499
|
||||
label = 'Collect Model Data'
|
||||
families = ["model"]
|
||||
families = ["studio.model"]
|
||||
|
||||
def process(self, instance):
|
||||
# Extract only current frame (override)
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ class CollectRenderLayerAOVS(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.CollectorOrder + 0.01
|
||||
label = "Render Elements / AOVs"
|
||||
hosts = ["maya"]
|
||||
families = ["renderlayer"]
|
||||
families = ["studio.renderlayer"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -41,9 +41,9 @@ class CollectRenderLayerAOVS(pyblish.api.InstancePlugin):
|
|||
|
||||
self.log.info("Renderer found: {}".format(renderer))
|
||||
|
||||
rp_node_types = {"vray": "VRayRenderElement",
|
||||
"arnold": "aiAOV",
|
||||
"redshift": "RedshiftAOV"}
|
||||
rp_node_types = {"vray": ["VRayRenderElement", "VRayRenderElementSet"],
|
||||
"arnold": ["aiAOV"],
|
||||
"redshift": ["RedshiftAOV"]}
|
||||
|
||||
if renderer not in rp_node_types.keys():
|
||||
self.log.error("Unsupported renderer found: '{}'".format(renderer))
|
||||
|
|
@ -52,7 +52,8 @@ class CollectRenderLayerAOVS(pyblish.api.InstancePlugin):
|
|||
result = []
|
||||
|
||||
# Collect all AOVs / Render Elements
|
||||
with lib.renderlayer(instance.name):
|
||||
layer = instance.data["setMembers"]
|
||||
with lib.renderlayer(layer):
|
||||
|
||||
node_type = rp_node_types[renderer]
|
||||
render_elements = cmds.ls(type=node_type)
|
||||
|
|
@ -64,32 +65,36 @@ class CollectRenderLayerAOVS(pyblish.api.InstancePlugin):
|
|||
continue
|
||||
|
||||
pass_name = self.get_pass_name(renderer, element)
|
||||
render_pass = "%s.%s" % (instance.name, pass_name)
|
||||
render_pass = "%s.%s" % (instance.data["subset"], pass_name)
|
||||
|
||||
result.append(render_pass)
|
||||
|
||||
self.log.info("Found {} render elements / AOVs for "
|
||||
"'{}'".format(len(result), instance.name))
|
||||
"'{}'".format(len(result), instance.data["subset"]))
|
||||
|
||||
instance.data["renderPasses"] = result
|
||||
|
||||
def get_pass_name(self, renderer, node):
|
||||
|
||||
if renderer == "vray":
|
||||
|
||||
# Get render element pass type
|
||||
vray_node_attr = next(attr for attr in cmds.listAttr(node)
|
||||
if attr.startswith("vray_name"))
|
||||
|
||||
pass_type = vray_node_attr.rsplit("_", 1)[-1]
|
||||
|
||||
# Support V-Ray extratex explicit name (if set by user)
|
||||
if pass_type == "extratex":
|
||||
vray_node_attr = "vray_explicit_name_extratex"
|
||||
explicit_attr = "{}.vray_explicit_name_extratex".format(node)
|
||||
explicit_name = cmds.getAttr(explicit_attr)
|
||||
if explicit_name:
|
||||
return explicit_name
|
||||
|
||||
# Node type is in the attribute name but we need to check if value
|
||||
# of the attribute as it can be changed
|
||||
pass_name = cmds.getAttr("{}.{}".format(node, vray_node_attr))
|
||||
return cmds.getAttr("{}.{}".format(node, vray_node_attr))
|
||||
|
||||
elif renderer in ["arnold", "redshift"]:
|
||||
pass_name = cmds.getAttr("{}.name".format(node))
|
||||
return cmds.getAttr("{}.name".format(node))
|
||||
else:
|
||||
raise RuntimeError("Unsupported renderer: '{}'".format(renderer))
|
||||
|
||||
return pass_name
|
||||
|
|
@ -74,7 +74,7 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
|
|||
|
||||
# instance subset
|
||||
"family": "Render Layers",
|
||||
"families": ["renderlayer"],
|
||||
"families": ["studio.renderlayer"],
|
||||
"asset": asset,
|
||||
"time": api.time(),
|
||||
"author": context.data["user"],
|
||||
|
|
@ -103,7 +103,13 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
|
|||
overrides = self.parse_options(render_globals)
|
||||
data.update(**overrides)
|
||||
|
||||
# Define nice label
|
||||
label = "{0} ({1})".format(layername, data["asset"])
|
||||
label += " [{0}-{1}]".format(int(data["startFrame"]),
|
||||
int(data["endFrame"]))
|
||||
|
||||
instance = context.create_instance(layername)
|
||||
instance.data["label"] = label
|
||||
instance.data.update(data)
|
||||
|
||||
def get_render_attribute(self, attr):
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ class CollectSetDress(pyblish.api.InstancePlugin):
|
|||
|
||||
order = pyblish.api.CollectorOrder + 0.49
|
||||
label = "Set Dress"
|
||||
families = ["setdress"]
|
||||
families = ["studio.setdress"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
|
|||
|
||||
order = pyblish.api.CollectorOrder + 0.45
|
||||
label = "Collect Yeti Cache"
|
||||
families = ["yetiRig", "yeticache"]
|
||||
families = ["studio.yetiRig", "studio.yeticache"]
|
||||
hosts = ["maya"]
|
||||
tasks = ["animation", "fx"]
|
||||
|
||||
|
|
|
|||
|
|
@ -21,25 +21,27 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
|
||||
order = pyblish.api.CollectorOrder + 0.4
|
||||
label = "Collect Yeti Rig"
|
||||
families = ["yetiRig"]
|
||||
families = ["studio.yetiRig"]
|
||||
hosts = ["maya"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
assert "input_SET" in cmds.sets(instance.name, query=True), (
|
||||
assert "input_SET" in instance.data["setMembers"], (
|
||||
"Yeti Rig must have an input_SET")
|
||||
|
||||
# Get the input meshes information
|
||||
input_content = cmds.sets("input_SET", query=True)
|
||||
input_nodes = cmds.listRelatives(input_content,
|
||||
allDescendents=True,
|
||||
fullPath=True) or input_content
|
||||
input_content = cmds.ls(cmds.sets("input_SET", query=True), long=True)
|
||||
|
||||
# Get all the shapes
|
||||
input_shapes = cmds.ls(input_nodes, long=True, noIntermediate=True)
|
||||
# Include children
|
||||
input_content += cmds.listRelatives(input_content,
|
||||
allDescendents=True,
|
||||
fullPath=True) or []
|
||||
|
||||
# Ignore intermediate objects
|
||||
input_content = cmds.ls(input_content, long=True, noIntermediate=True)
|
||||
|
||||
# Store all connections
|
||||
connections = cmds.listConnections(input_shapes,
|
||||
connections = cmds.listConnections(input_content,
|
||||
source=True,
|
||||
destination=False,
|
||||
connections=True,
|
||||
|
|
@ -62,10 +64,9 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
|
||||
# Collect any textures if used
|
||||
yeti_resources = []
|
||||
yeti_nodes = cmds.ls(instance[:], type="pgYetiMaya")
|
||||
yeti_nodes = cmds.ls(instance[:], type="pgYetiMaya", long=True)
|
||||
for node in yeti_nodes:
|
||||
# Get Yeti resources (textures)
|
||||
# TODO: referenced files in Yeti Graph
|
||||
resources = self.get_yeti_resources(node)
|
||||
yeti_resources.extend(resources)
|
||||
|
||||
|
|
@ -78,11 +79,16 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
instance.data["endFrame"] = 1
|
||||
|
||||
def get_yeti_resources(self, node):
|
||||
"""Get all texture file paths
|
||||
"""Get all resource file paths
|
||||
|
||||
If a texture is a sequence it gathers all sibling files to ensure
|
||||
the texture sequence is complete.
|
||||
|
||||
References can be used in the Yeti graph, this means that it is
|
||||
possible to load previously caches files. The information will need
|
||||
to be stored and, if the file not publish, copied to the resource
|
||||
folder.
|
||||
|
||||
Args:
|
||||
node (str): node name of the pgYetiMaya node
|
||||
|
||||
|
|
@ -91,15 +97,25 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
resources = []
|
||||
image_search_path = cmds.getAttr("{}.imageSearchPath".format(node))
|
||||
|
||||
# List all related textures
|
||||
texture_filenames = cmds.pgYetiCommand(node, listTextures=True)
|
||||
self.log.info("Found %i texture(s)" % len(texture_filenames))
|
||||
|
||||
# Get all reference nodes
|
||||
reference_nodes = cmds.pgYetiGraph(node,
|
||||
listNodes=True,
|
||||
type="reference")
|
||||
self.log.info("Found %i reference node(s)" % len(reference_nodes))
|
||||
|
||||
if texture_filenames and not image_search_path:
|
||||
raise ValueError("pgYetiMaya node '%s' is missing the path to the "
|
||||
"files in the 'imageSearchPath "
|
||||
"atttribute'" % node)
|
||||
|
||||
# Collect all texture files
|
||||
for texture in texture_filenames:
|
||||
node_resources = {"files": [], "source": texture, "node": node}
|
||||
item = {"files": [], "source": texture, "node": node}
|
||||
texture_filepath = os.path.join(image_search_path, texture)
|
||||
if len(texture.split(".")) > 2:
|
||||
|
||||
|
|
@ -107,20 +123,46 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
if "<UDIM>" in texture:
|
||||
sequences = self.get_sequence(texture_filepath,
|
||||
pattern="<UDIM>")
|
||||
node_resources["files"].extend(sequences)
|
||||
item["files"].extend(sequences)
|
||||
|
||||
# Based textures (animated masks f.e)
|
||||
elif "%04d" in texture:
|
||||
sequences = self.get_sequence(texture_filepath,
|
||||
pattern="%04d")
|
||||
node_resources["files"].extend(sequences)
|
||||
item["files"].extend(sequences)
|
||||
# Assuming it is a fixed name
|
||||
else:
|
||||
node_resources["files"].append(texture_filepath)
|
||||
item["files"].append(texture_filepath)
|
||||
else:
|
||||
node_resources["files"].append(texture_filepath)
|
||||
item["files"].append(texture_filepath)
|
||||
|
||||
resources.append(node_resources)
|
||||
resources.append(item)
|
||||
|
||||
# Collect all referenced files
|
||||
for reference_node in reference_nodes:
|
||||
ref_file = cmds.pgYetiGraph(node,
|
||||
node=reference_node,
|
||||
param="reference_file",
|
||||
getParamValue=True)
|
||||
|
||||
if not os.path.isfile(ref_file):
|
||||
raise RuntimeError("Reference file must be a full file path!")
|
||||
|
||||
# Create resource dict
|
||||
item = {"files": [],
|
||||
"source": ref_file,
|
||||
"node": node,
|
||||
"graphnode": reference_node,
|
||||
"param": "reference_file"}
|
||||
|
||||
ref_file_name = os.path.basename(ref_file)
|
||||
if "%04d" in ref_file_name:
|
||||
ref_files = self.get_sequence(ref_file)
|
||||
item["files"].extend(ref_files)
|
||||
else:
|
||||
item["files"].append(ref_file)
|
||||
|
||||
resources.append(item)
|
||||
|
||||
return resources
|
||||
|
||||
|
|
@ -139,7 +181,6 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
list: file sequence.
|
||||
|
||||
"""
|
||||
|
||||
from avalon.vendor import clique
|
||||
|
||||
escaped = re.escape(filename)
|
||||
|
|
@ -150,7 +191,6 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
if re.match(re_pattern, f)]
|
||||
|
||||
pattern = [clique.PATTERNS["frames"]]
|
||||
collection, remainder = clique.assemble(files,
|
||||
patterns=pattern)
|
||||
collection, remainder = clique.assemble(files, patterns=pattern)
|
||||
|
||||
return collection
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ class ExtractColorbleedAnimation(pype.api.Extractor):
|
|||
|
||||
label = "Extract Animation"
|
||||
hosts = ["maya"]
|
||||
families = ["animation"]
|
||||
families = ["studio.animation"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ from maya import cmds
|
|||
import avalon.maya
|
||||
import pype.api
|
||||
|
||||
import cb.utils.maya.context as context
|
||||
import pype.maya.lib as lib
|
||||
|
||||
|
||||
class ExtractCameraAlembic(pype.api.Extractor):
|
||||
|
|
@ -18,7 +18,7 @@ class ExtractCameraAlembic(pype.api.Extractor):
|
|||
|
||||
label = "Camera (Alembic)"
|
||||
hosts = ["maya"]
|
||||
families = ["camera"]
|
||||
families = ["studio.camera"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -66,8 +66,8 @@ class ExtractCameraAlembic(pype.api.Extractor):
|
|||
|
||||
job_str += ' -file "{0}"'.format(path)
|
||||
|
||||
with context.evaluation("off"):
|
||||
with context.no_refresh():
|
||||
with lib.evaluation("off"):
|
||||
with lib.no_refresh():
|
||||
cmds.AbcExport(j=job_str, verbose=False)
|
||||
|
||||
if "files" not in instance.data:
|
||||
|
|
|
|||
|
|
@ -4,9 +4,8 @@ from maya import cmds
|
|||
|
||||
import avalon.maya
|
||||
import pype.api
|
||||
|
||||
import cb.utils.maya.context as context
|
||||
from cb.utils.maya.animation import bakeToWorldSpace
|
||||
from pype.lib import grouper
|
||||
from pype.maya import lib
|
||||
|
||||
|
||||
def massage_ma_file(path):
|
||||
|
|
@ -35,6 +34,37 @@ def massage_ma_file(path):
|
|||
f.close()
|
||||
|
||||
|
||||
def unlock(plug):
|
||||
"""Unlocks attribute and disconnects inputs for a plug.
|
||||
|
||||
This will also recursively unlock the attribute
|
||||
upwards to any parent attributes for compound
|
||||
attributes, to ensure it's fully unlocked and free
|
||||
to change the value.
|
||||
|
||||
"""
|
||||
node, attr = plug.rsplit(".", 1)
|
||||
|
||||
# Unlock attribute
|
||||
cmds.setAttr(plug, lock=False)
|
||||
|
||||
# Also unlock any parent attribute (if compound)
|
||||
parents = cmds.attributeQuery(attr, node=node, listParent=True)
|
||||
if parents:
|
||||
for parent in parents:
|
||||
unlock("{0}.{1}".format(node, parent))
|
||||
|
||||
# Break incoming connections
|
||||
connections = cmds.listConnections(plug,
|
||||
source=True,
|
||||
destination=False,
|
||||
plugs=True,
|
||||
connections=True)
|
||||
if connections:
|
||||
for destination, source in grouper(connections, 2):
|
||||
cmds.disconnectAttr(source, destination)
|
||||
|
||||
|
||||
class ExtractCameraMayaAscii(pype.api.Extractor):
|
||||
"""Extract a Camera as Maya Ascii.
|
||||
|
||||
|
|
@ -53,7 +83,7 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
|
|||
|
||||
label = "Camera (Maya Ascii)"
|
||||
hosts = ["maya"]
|
||||
families = ["camera"]
|
||||
families = ["studio.camera"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -67,8 +97,8 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
|
|||
# TODO: Implement a bake to non-world space
|
||||
# Currently it will always bake the resulting camera to world-space
|
||||
# and it does not allow to include the parent hierarchy, even though
|
||||
# with `bakeToWorldSpace` set to False it should include its hierarchy
|
||||
# to be correct with the family implementation.
|
||||
# with `bakeToWorldSpace` set to False it should include its
|
||||
# hierarchy to be correct with the family implementation.
|
||||
if not bake_to_worldspace:
|
||||
self.log.warning("Camera (Maya Ascii) export only supports world"
|
||||
"space baked camera extractions. The disabled "
|
||||
|
|
@ -96,17 +126,30 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
|
|||
# Perform extraction
|
||||
self.log.info("Performing camera bakes for: {0}".format(transform))
|
||||
with avalon.maya.maintained_selection():
|
||||
with context.evaluation("off"):
|
||||
with context.no_refresh():
|
||||
baked = bakeToWorldSpace(transform,
|
||||
frameRange=range_with_handles,
|
||||
step=step)
|
||||
with lib.evaluation("off"):
|
||||
with lib.no_refresh():
|
||||
baked = lib.bake_to_world_space(
|
||||
transform,
|
||||
frame_range=range_with_handles,
|
||||
step=step
|
||||
)
|
||||
baked_shapes = cmds.ls(baked,
|
||||
type="camera",
|
||||
dag=True,
|
||||
shapes=True,
|
||||
long=True)
|
||||
|
||||
# Fix PLN-178: Don't allow background color to be non-black
|
||||
for cam in baked_shapes:
|
||||
attrs = {"backgroundColorR": 0.0,
|
||||
"backgroundColorG": 0.0,
|
||||
"backgroundColorB": 0.0,
|
||||
"overscan": 1.0}
|
||||
for attr, value in attrs.items():
|
||||
plug = "{0}.{1}".format(cam, attr)
|
||||
unlock(plug)
|
||||
cmds.setAttr(plug, value)
|
||||
|
||||
self.log.info("Performing extraction..")
|
||||
cmds.select(baked_shapes, noExpand=True)
|
||||
cmds.file(path,
|
||||
|
|
|
|||
|
|
@ -6,10 +6,9 @@ from maya import cmds
|
|||
|
||||
import pyblish.api
|
||||
import avalon.maya
|
||||
import pype.api
|
||||
import pype.maya.lib as maya
|
||||
|
||||
from cb.utils.maya import context
|
||||
import pype.api
|
||||
import pype.maya.lib as lib
|
||||
|
||||
|
||||
class ExtractLook(pype.api.Extractor):
|
||||
|
|
@ -23,7 +22,7 @@ class ExtractLook(pype.api.Extractor):
|
|||
|
||||
label = "Extract Look (Maya ASCII + JSON)"
|
||||
hosts = ["maya"]
|
||||
families = ["look"]
|
||||
families = ["studio.look"]
|
||||
order = pyblish.api.ExtractorOrder + 0.2
|
||||
|
||||
def process(self, instance):
|
||||
|
|
@ -63,10 +62,10 @@ class ExtractLook(pype.api.Extractor):
|
|||
|
||||
# Extract in correct render layer
|
||||
layer = instance.data.get("renderlayer", "defaultRenderLayer")
|
||||
with context.renderlayer(layer):
|
||||
with lib.renderlayer(layer):
|
||||
# TODO: Ensure membership edits don't become renderlayer overrides
|
||||
with context.empty_sets(sets, force=True):
|
||||
with maya.attribute_values(remap):
|
||||
with lib.empty_sets(sets, force=True):
|
||||
with lib.attribute_values(remap):
|
||||
with avalon.maya.maintained_selection():
|
||||
cmds.select(sets, noExpand=True)
|
||||
cmds.file(maya_path,
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ class ExtractMayaAsciiRaw(pype.api.Extractor):
|
|||
|
||||
label = "Maya ASCII (Raw)"
|
||||
hosts = ["maya"]
|
||||
families = ["pype.mayaAscii"]
|
||||
families = ["studio.mayaAscii"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -44,7 +44,10 @@ class ExtractMayaAsciiRaw(pype.api.Extractor):
|
|||
typ="mayaAscii",
|
||||
exportSelected=True,
|
||||
preserveReferences=True,
|
||||
constructionHistory=True)
|
||||
constructionHistory=True,
|
||||
shader=True,
|
||||
constraints=True,
|
||||
expressions=True)
|
||||
|
||||
if "files" not in instance.data:
|
||||
instance.data["files"] = list()
|
||||
|
|
|
|||
|
|
@ -4,8 +4,7 @@ from maya import cmds
|
|||
|
||||
import avalon.maya
|
||||
import pype.api
|
||||
|
||||
from cb.utils.maya import context
|
||||
import pype.maya.lib as lib
|
||||
|
||||
|
||||
class ExtractModel(pype.api.Extractor):
|
||||
|
|
@ -25,7 +24,7 @@ class ExtractModel(pype.api.Extractor):
|
|||
|
||||
label = "Model (Maya ASCII)"
|
||||
hosts = ["maya"]
|
||||
families = ["model"]
|
||||
families = ["studio.model"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -47,15 +46,15 @@ class ExtractModel(pype.api.Extractor):
|
|||
noIntermediate=True,
|
||||
long=True)
|
||||
|
||||
with context.no_display_layers(instance):
|
||||
with context.displaySmoothness(members,
|
||||
divisionsU=0,
|
||||
divisionsV=0,
|
||||
pointsWire=4,
|
||||
pointsShaded=1,
|
||||
polygonObject=1):
|
||||
with context.shader(members,
|
||||
shadingEngine="initialShadingGroup"):
|
||||
with lib.no_display_layers(instance):
|
||||
with lib.displaySmoothness(members,
|
||||
divisionsU=0,
|
||||
divisionsV=0,
|
||||
pointsWire=4,
|
||||
pointsShaded=1,
|
||||
polygonObject=1):
|
||||
with lib.shader(members,
|
||||
shadingEngine="initialShadingGroup"):
|
||||
with avalon.maya.maintained_selection():
|
||||
cmds.select(members, noExpand=True)
|
||||
cmds.file(path,
|
||||
|
|
|
|||
|
|
@ -17,8 +17,8 @@ class ExtractColorbleedAlembic(pype.api.Extractor):
|
|||
|
||||
label = "Extract Pointcache (Alembic)"
|
||||
hosts = ["maya"]
|
||||
families = ["pointcache",
|
||||
"model"]
|
||||
families = ["studio.pointcache",
|
||||
"studio.model"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -35,11 +35,9 @@ class ExtractColorbleedAlembic(pype.api.Extractor):
|
|||
# Get extra export arguments
|
||||
writeColorSets = instance.data.get("writeColorSets", False)
|
||||
|
||||
self.log.info("Extracting animation..")
|
||||
self.log.info("Extracting pointcache..")
|
||||
dirname = self.staging_dir(instance)
|
||||
|
||||
self.log.info("nodes: %s" % str(nodes))
|
||||
|
||||
parent_dir = self.staging_dir(instance)
|
||||
filename = "{name}.abc".format(**instance.data)
|
||||
path = os.path.join(parent_dir, filename)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ class ExtractColorbleedRig(pype.api.Extractor):
|
|||
|
||||
label = "Extract Rig (Maya ASCII)"
|
||||
hosts = ["maya"]
|
||||
families = ["rig"]
|
||||
families = ["studio.rig"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ class ExtractSetDress(pype.api.Extractor):
|
|||
|
||||
label = "Extract Set Dress"
|
||||
hosts = ["maya"]
|
||||
families = ["setdress"]
|
||||
families = ["studio.setdress"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue