initial commit

This commit is contained in:
Jakub Jezek 2018-09-13 15:11:59 +02:00
parent 415527918a
commit b72efea31c
187 changed files with 16919 additions and 0 deletions

99
.gitignore vendored Normal file
View file

@ -0,0 +1,99 @@
# Created by .ignore support plugin (hsz.mobi)
### Python template
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# dotenv
.env
# virtualenv
.venv
venv/
ENV/
# Spyder project settings
.spyderproject
# Rope project settings
.ropeproject
# Pycharm IDE settings
.idea

20
README.md Normal file
View file

@ -0,0 +1,20 @@
The base studio *config* for [Avalon](https://getavalon.github.io/)
<br>
_This configuration acts as a starting point for all pype club clients wth avalon deployment._
### Code convention
Below are some of the standard practices applied to this repositories.
- **Etiquette: PEP8**
- All code is written in PEP8. It is recommended you use a linter as you work, flake8 and pylinter are both good options.
- **Etiquette: Napoleon docstrings**
- Any docstrings are made in Google Napoleon format. See [Napoleon](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) for details.
- **Etiquette: Semantic Versioning**
- This project follows [semantic versioning](http://semver.org).
- **Etiquette: Underscore means private**
- Anything prefixed with an underscore means that it is internal to wherever it is used. For example, a variable name is only ever used in the parent function or class. A module is not for use by the end-user. In contrast, anything without an underscore is public, but not necessarily part of the API. Members of the API resides in `api.py`.
- **API: Idempotence**
- A public function must be able to be called twice and produce the exact same result. This means no changing of state without restoring previous state when finishing. For example, if a function requires changing the current selection in Autodesk Maya, it must restore the previous selection prior to completing.

26
config/__init__.py Normal file
View file

@ -0,0 +1,26 @@
import os
from pyblish import api as pyblish
from avalon import api as avalon
from .launcher_actions import register_launcher_actions
from .lib import collect_container_metadata
PACKAGE_DIR = os.path.dirname(__file__)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
# Global plugin paths
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "global", "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "global", "load")
def install():
print("Registering global plug-ins..")
pyblish.register_plugin_path(PUBLISH_PATH)
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
def uninstall():
print("Deregistering global plug-ins..")
pyblish.deregister_plugin_path(PUBLISH_PATH)
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)

202
config/action.py Normal file
View file

@ -0,0 +1,202 @@
# absolute_import is needed to counter the `module has no cmds error` in Maya
from __future__ import absolute_import
import pyblish.api
def get_errored_instances_from_context(context):
instances = list()
for result in context.data["results"]:
if result["instance"] is None:
# When instance is None we are on the "context" result
continue
if result["error"]:
instances.append(result["instance"])
return instances
def get_errored_plugins_from_data(context):
"""Get all failed validation plugins
Args:
context (object):
Returns:
list of plugins which failed during validation
"""
plugins = list()
results = context.data.get("results", [])
for result in results:
if result["success"] is True:
continue
plugins.append(result["plugin"])
return plugins
class RepairAction(pyblish.api.Action):
"""Repairs the action
To process the repairing this requires a static `repair(instance)` method
is available on the plugin.
"""
label = "Repair"
on = "failed" # This action is only available on a failed plug-in
icon = "wrench" # Icon from Awesome Icon
def process(self, context, plugin):
if not hasattr(plugin, "repair"):
raise RuntimeError("Plug-in does not have repair method.")
# Get the errored instances
self.log.info("Finding failed instances..")
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
for instance in instances:
plugin.repair(instance)
class RepairContextAction(pyblish.api.Action):
"""Repairs the action
To process the repairing this requires a static `repair(instance)` method
is available on the plugin.
"""
label = "Repair Context"
on = "failed" # This action is only available on a failed plug-in
def process(self, context, plugin):
if not hasattr(plugin, "repair"):
raise RuntimeError("Plug-in does not have repair method.")
# Get the errored instances
self.log.info("Finding failed instances..")
errored_plugins = get_errored_plugins_from_data(context)
# Apply pyblish.logic to get the instances for the plug-in
if plugin in errored_plugins:
self.log.info("Attempting fix ...")
plugin.repair()
class SelectInvalidAction(pyblish.api.Action):
"""Select invalid nodes in Maya when plug-in failed.
To retrieve the invalid nodes this assumes a static `get_invalid()`
method is available on the plugin.
"""
label = "Select invalid"
on = "failed" # This action is only available on a failed plug-in
icon = "search" # Icon from Awesome Icon
def process(self, context, plugin):
try:
from maya import cmds
except ImportError:
raise ImportError("Current host is not Maya")
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes..")
invalid = list()
for instance in instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):
invalid.extend(invalid_nodes)
else:
self.log.warning("Plug-in returned to be invalid, "
"but has no selectable nodes.")
# Ensure unique (process each node only once)
invalid = list(set(invalid))
if invalid:
self.log.info("Selecting invalid nodes: %s" % ", ".join(invalid))
cmds.select(invalid, replace=True, noExpand=True)
else:
self.log.info("No invalid nodes found.")
cmds.select(deselect=True)
class GenerateUUIDsOnInvalidAction(pyblish.api.Action):
"""Generate UUIDs on the invalid nodes in the instance.
Invalid nodes are those returned by the plugin's `get_invalid` method.
As such it is the plug-in's responsibility to ensure the nodes that
receive new UUIDs are actually invalid.
Requires:
- instance.data["asset"]
"""
label = "Regenerate UUIDs"
on = "failed" # This action is only available on a failed plug-in
icon = "wrench" # Icon from Awesome Icon
def process(self, context, plugin):
self.log.info("Finding bad nodes..")
# Get the errored instances
errored_instances = []
for result in context.data["results"]:
if result["error"] is not None and result["instance"] is not None:
if result["error"]:
instance = result["instance"]
errored_instances.append(instance)
# Apply pyblish logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
# Get the nodes from the all instances that ran through this plug-in
all_invalid = []
for instance in instances:
invalid = plugin.get_invalid(instance)
if invalid:
self.log.info("Fixing instance {}".format(instance.name))
self._update_id_attribute(instance, invalid)
all_invalid.extend(invalid)
if not all_invalid:
self.log.info("No invalid nodes found.")
return
all_invalid = list(set(all_invalid))
self.log.info("Generated ids on nodes: {0}".format(all_invalid))
def _update_id_attribute(self, instance, nodes):
"""Delete the id attribute
Args:
instance: The instance we're fixing for
nodes (list): all nodes to regenerate ids on
"""
import config.apps.maya.lib as lib
import avalon.io as io
asset = instance.data['asset']
asset_id = io.find_one({"name": asset, "type": "asset"},
projection={"_id": True})['_id']
for node, _id in lib.generate_ids(nodes, asset_id=asset_id):
lib.set_id(node, _id, overwrite=True)

36
config/api.py Normal file
View file

@ -0,0 +1,36 @@
from collections import OrderedDict
from .plugin import (
Extractor,
ValidatePipelineOrder,
ValidateContentsOrder,
ValidateSceneOrder,
ValidateMeshOrder
)
# temporary fix, might
from .action import (
get_errored_instances_from_context,
SelectInvalidAction,
GenerateUUIDsOnInvalidAction,
RepairAction,
RepairContextAction
)
all = [
# plugin classes
"Extractor",
# ordering
"ValidatePipelineOrder",
"ValidateContentsOrder",
"ValidateSceneOrder",
"ValidateMeshOrder",
# action
"get_errored_instances_from_context",
"SelectInvalidAction",
"GenerateUUIDsOnInvalidAction",
"RepairAction"
]

0
config/apps/__init__.py Normal file
View file

View file

@ -0,0 +1,67 @@
import os
from avalon import api as avalon
from pyblish import api as pyblish
PARENT_DIR = os.path.dirname(__file__)
PACKAGE_DIR = os.path.dirname(PARENT_DIR)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "fusion", "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "fusion", "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "fusion", "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "fusion", "inventory")
def install():
print("Registering Fusion plug-ins..")
pyblish.register_plugin_path(PUBLISH_PATH)
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
# Disable all families except for the ones we explicitly want to see
family_states = ["colorbleed.imagesequence",
"colorbleed.camera",
"colorbleed.pointcache"]
avalon.data["familiesStateDefault"] = False
avalon.data["familiesStateToggled"] = family_states
def uninstall():
print("Deregistering Fusion plug-ins..")
pyblish.deregister_plugin_path(PUBLISH_PATH)
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
def on_pyblish_instance_toggled(instance, new_value, old_value):
"""Toggle saver tool passthrough states on instance toggles."""
from avalon.fusion import comp_lock_and_undo_chunk
comp = instance.context.data.get("currentComp")
if not comp:
return
savers = [tool for tool in instance if
getattr(tool, "ID", None) == "Saver"]
if not savers:
return
# Whether instances should be passthrough based on new value
passthrough = not new_value
with comp_lock_and_undo_chunk(comp,
undo_queue_name="Change instance "
"active state"):
for tool in savers:
attrs = tool.GetAttrs()
current = attrs["TOOLB_PassThrough"]
if current != passthrough:
tool.SetAttrs({"TOOLB_PassThrough": passthrough})

61
config/apps/fusion/lib.py Normal file
View file

@ -0,0 +1,61 @@
import sys
from avalon.vendor.Qt import QtGui
import avalon.fusion
self = sys.modules[__name__]
self._project = None
def update_frame_range(start, end, comp=None, set_render_range=True):
"""Set Fusion comp's start and end frame range
Args:
start (float, int): start frame
end (float, int): end frame
comp (object, Optional): comp object from fusion
set_render_range (bool, Optional): When True this will also set the
composition's render start and end frame.
Returns:
None
"""
if not comp:
comp = avalon.fusion.get_current_comp()
attrs = {
"COMPN_GlobalStart": start,
"COMPN_GlobalEnd": end
}
if set_render_range:
attrs.update({
"COMPN_RenderStart": start,
"COMPN_RenderEnd": end
})
with avalon.fusion.comp_lock_and_undo_chunk(comp):
comp.SetAttrs(attrs)
def get_additional_data(container):
"""Get Fusion related data for the container
Args:
container(dict): the container found by the ls() function
Returns:
dict
"""
tool = container["_tool"]
tile_color = tool.TileColor
if tile_color is None:
return {}
return {"color": QtGui.QColor.fromRgbF(tile_color["R"],
tile_color["G"],
tile_color["B"])}

View file

@ -0,0 +1,181 @@
import os
import logging
import weakref
from maya import utils, cmds, mel
from avalon import api as avalon, pipeline, maya
from pyblish import api as pyblish
from ..lib import (
update_task_from_path,
any_outdated
)
from . import menu
from . import lib
log = logging.getLogger("config.apps.maya")
PARENT_DIR = os.path.dirname(__file__)
PACKAGE_DIR = os.path.dirname(PARENT_DIR)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "maya", "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "maya", "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "maya", "create")
def install():
pyblish.register_plugin_path(PUBLISH_PATH)
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
menu.install()
log.info("Installing callbacks ... ")
avalon.on("init", on_init)
avalon.on("save", on_save)
avalon.on("open", on_open)
avalon.before("save", on_before_save)
log.info("Overriding existing event 'taskChanged'")
override_event("taskChanged", on_task_changed)
log.info("Setting default family states for loader..")
avalon.data["familiesStateToggled"] = ["colorbleed.imagesequence"]
def uninstall():
pyblish.deregister_plugin_path(PUBLISH_PATH)
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
menu.uninstall()
def override_event(event, callback):
"""
Override existing event callback
Args:
event (str): name of the event
callback (function): callback to be triggered
Returns:
None
"""
ref = weakref.WeakSet()
ref.add(callback)
pipeline._registered_event_handlers[event] = ref
def on_init(_):
avalon.logger.info("Running callback on init..")
def safe_deferred(fn):
"""Execute deferred the function in a try-except"""
def _fn():
"""safely call in deferred callback"""
try:
fn()
except Exception as exc:
print(exc)
try:
utils.executeDeferred(_fn)
except Exception as exc:
print(exc)
cmds.loadPlugin("AbcImport", quiet=True)
cmds.loadPlugin("AbcExport", quiet=True)
from .customize import override_component_mask_commands
safe_deferred(override_component_mask_commands)
def on_before_save(return_code, _):
"""Run validation for scene's FPS prior to saving"""
return lib.validate_fps()
def on_save(_):
"""Automatically add IDs to new nodes
Any transform of a mesh, without an existing ID, is given one
automatically on file save.
"""
avalon.logger.info("Running callback on save..")
# Update current task for the current scene
update_task_from_path(cmds.file(query=True, sceneName=True))
# Generate ids of the current context on nodes in the scene
nodes = lib.get_id_required_nodes(referenced_nodes=False)
for node, new_id in lib.generate_ids(nodes):
lib.set_id(node, new_id, overwrite=False)
def on_open(_):
"""On scene open let's assume the containers have changed."""
from avalon.vendor.Qt import QtWidgets
from ..widgets import popup
# Ensure scene's FPS is set to project config
lib.validate_fps()
# Update current task for the current scene
update_task_from_path(cmds.file(query=True, sceneName=True))
if any_outdated():
log.warning("Scene has outdated content.")
# Find maya main window
top_level_widgets = {w.objectName(): w for w in
QtWidgets.QApplication.topLevelWidgets()}
parent = top_level_widgets.get("MayaWindow", None)
if parent is None:
log.info("Skipping outdated content pop-up "
"because Maya window can't be found.")
else:
# Show outdated pop-up
def _on_show_inventory():
import avalon.tools.cbsceneinventory as tool
tool.show(parent=parent)
dialog = popup.Popup(parent=parent)
dialog.setWindowTitle("Maya scene has outdated content")
dialog.setMessage("There are outdated containers in "
"your Maya scene.")
dialog.on_show.connect(_on_show_inventory)
dialog.show()
def on_task_changed(*args):
"""Wrapped function of app initialize and maya's on task changed"""
# Inputs (from the switched session and running app)
session = avalon.Session.copy()
app_name = os.environ["AVALON_APP_NAME"]
# Find the application definition
app_definition = pipeline.lib.get_application(app_name)
App = type("app_%s" % app_name,
(avalon.Application,),
{"config": app_definition.copy()})
# Initialize within the new session's environment
app = App()
env = app.environ(session)
app.initialize(env)
# Run
maya.pipeline._on_task_changed()

View file

@ -0,0 +1,66 @@
"""A set of commands that install overrides to Maya's UI"""
import maya.cmds as mc
import maya.mel as mel
from functools import partial
import logging
log = logging.getLogger(__name__)
COMPONENT_MASK_ORIGINAL = {}
def override_component_mask_commands():
"""Override component mask ctrl+click behavior.
This implements special behavior for Maya's component
mask menu items where a ctrl+click will instantly make
it an isolated behavior disabling all others.
Tested in Maya 2016 and 2018
"""
log.info("Installing override_component_mask_commands..")
# Get all object mask buttons
buttons = mc.formLayout("objectMaskIcons",
query=True,
childArray=True)
# Skip the triangle list item
buttons = [btn for btn in buttons if btn != "objPickMenuLayout"]
def on_changed_callback(raw_command, state):
"""New callback"""
# If "control" is held force the toggled one to on and
# toggle the others based on whether any of the buttons
# was remaining active after the toggle, if not then
# enable all
if mc.getModifiers() == 4: # = CTRL
state = True
active = [mc.iconTextCheckBox(btn, query=True, value=True) for btn
in buttons]
if any(active):
mc.selectType(allObjects=False)
else:
mc.selectType(allObjects=True)
# Replace #1 with the current button state
cmd = raw_command.replace(" #1", " {}".format(int(state)))
mel.eval(cmd)
for btn in buttons:
# Store a reference to the original command so that if
# we rerun this override command it doesn't recursively
# try to implement the fix. (This also allows us to
# "uninstall" the behavior later)
if btn not in COMPONENT_MASK_ORIGINAL:
original = mc.iconTextCheckBox(btn, query=True, cc=True)
COMPONENT_MASK_ORIGINAL[btn] = original
# Assign the special callback
original = COMPONENT_MASK_ORIGINAL[btn]
new_fn = partial(on_changed_callback, original)
mc.iconTextCheckBox(btn, edit=True, cc=new_fn)

1438
config/apps/maya/lib.py Normal file

File diff suppressed because it is too large Load diff

1959
config/apps/maya/menu.json Normal file

File diff suppressed because it is too large Load diff

73
config/apps/maya/menu.py Normal file
View file

@ -0,0 +1,73 @@
import sys
import os
import logging
from avalon.vendor.Qt import QtWidgets, QtCore, QtGui
import maya.cmds as cmds
self = sys.modules[__name__]
self._menu = "colorbleed"
log = logging.getLogger(__name__)
def _get_menu():
"""Return the menu instance if it currently exists in Maya"""
app = QtWidgets.QApplication.instance()
widgets = dict((w.objectName(), w) for w in app.allWidgets())
menu = widgets.get(self._menu)
return menu
def deferred():
import scriptsmenu.launchformaya as launchformaya
import scriptsmenu.scriptsmenu as scriptsmenu
log.info("Attempting to install ...")
# load configuration of custom menu
config_path = os.path.join(os.path.dirname(__file__), "menu.json")
config = scriptsmenu.load_configuration(config_path)
# run the launcher for Maya menu
cb_menu = launchformaya.main(title=self._menu.title(),
objectName=self._menu)
# apply configuration
cb_menu.build_from_configuration(cb_menu, config)
def uninstall():
menu = _get_menu()
if menu:
log.info("Attempting to uninstall ..")
try:
menu.deleteLater()
del menu
except Exception as e:
log.error(e)
def install():
if cmds.about(batch=True):
print("Skipping colorbleed.menu initialization in batch mode..")
return
uninstall()
# Allow time for uninstallation to finish.
cmds.evalDeferred(deferred)
def popup():
"""Pop-up the existing menu near the mouse cursor"""
menu = _get_menu()
cursor = QtGui.QCursor()
point = cursor.pos()
menu.exec_(point)

218
config/apps/maya/plugin.py Normal file
View file

@ -0,0 +1,218 @@
from avalon import api
def get_reference_node_parents(ref):
"""Return all parent reference nodes of reference node
Args:
ref (str): reference node.
Returns:
list: The upstream parent reference nodes.
"""
from maya import cmds
parent = cmds.referenceQuery(ref,
referenceNode=True,
parent=True)
parents = []
while parent:
parents.append(parent)
parent = cmds.referenceQuery(parent,
referenceNode=True,
parent=True)
return parents
class ReferenceLoader(api.Loader):
"""A basic ReferenceLoader for Maya
This will implement the basic behavior for a loader to inherit from that
will containerize the reference and will implement the `remove` and
`update` logic.
"""
def load(self,
context,
name=None,
namespace=None,
data=None):
import os
from avalon.maya import lib
from avalon.maya.pipeline import containerise
assert os.path.exists(self.fname), "%s does not exist." % self.fname
asset = context['asset']
namespace = namespace or lib.unique_namespace(
asset["name"] + "_",
prefix="_" if asset["name"][0].isdigit() else "",
suffix="_",
)
self.process_reference(context=context,
name=name,
namespace=namespace,
data=data)
# Only containerize if any nodes were loaded by the Loader
nodes = self[:]
if not nodes:
return
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def process_reference(self, context, name, namespace, data):
"""To be implemented by subclass"""
raise NotImplementedError("Must be implemented by subclass")
def _get_reference_node(self, members):
"""Get the reference node from the container members
Args:
members: list of node names
Returns:
str: Reference node name.
"""
from maya import cmds
# Collect the references without .placeHolderList[] attributes as
# unique entries (objects only) and skipping the sharedReferenceNode.
references = set()
for ref in cmds.ls(members, exactType="reference", objectsOnly=True):
# Ignore any `:sharedReferenceNode`
if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"):
continue
references.add(ref)
assert references, "No reference node found in container"
# Get highest reference node (least parents)
highest = min(references,
key=lambda x: len(get_reference_node_parents(x)))
# Warn the user when we're taking the highest reference node
if len(references) > 1:
self.log.warning("More than one reference node found in "
"container, using highest reference node: "
"%s (in: %s)", highest, list(references))
return highest
def update(self, container, representation):
import os
from maya import cmds
node = container["objectName"]
path = api.get_representation_path(representation)
# Get reference node from container members
members = cmds.sets(node, query=True, nodesOnly=True)
reference_node = self._get_reference_node(members)
file_type = {
"ma": "mayaAscii",
"mb": "mayaBinary",
"abc": "Alembic"
}.get(representation["name"])
assert file_type, "Unsupported representation: %s" % representation
assert os.path.exists(path), "%s does not exist." % path
try:
content = cmds.file(path,
loadReference=reference_node,
type=file_type,
returnNewNodes=True)
except RuntimeError as exc:
# When changing a reference to a file that has load errors the
# command will raise an error even if the file is still loaded
# correctly (e.g. when raising errors on Arnold attributes)
# When the file is loaded and has content, we consider it's fine.
if not cmds.referenceQuery(reference_node, isLoaded=True):
raise
content = cmds.referenceQuery(reference_node,
nodes=True,
dagPath=True)
if not content:
raise
self.log.warning("Ignoring file read error:\n%s", exc)
# Fix PLN-40 for older containers created with Avalon that had the
# `.verticesOnlySet` set to True.
if cmds.getAttr("{}.verticesOnlySet".format(node)):
self.log.info("Setting %s.verticesOnlySet to False", node)
cmds.setAttr("{}.verticesOnlySet".format(node), False)
# Add new nodes of the reference to the container
cmds.sets(content, forceElement=node)
# Remove any placeHolderList attribute entries from the set that
# are remaining from nodes being removed from the referenced file.
members = cmds.sets(node, query=True)
invalid = [x for x in members if ".placeHolderList" in x]
if invalid:
cmds.sets(invalid, remove=node)
# Update metadata
cmds.setAttr("{}.representation".format(node),
str(representation["_id"]),
type="string")
def remove(self, container):
"""Remove an existing `container` from Maya scene
Deprecated; this functionality is replaced by `api.remove()`
Arguments:
container (avalon-core:container-1.0): Which container
to remove from scene.
"""
from maya import cmds
node = container["objectName"]
# Assume asset has been referenced
members = cmds.sets(node, query=True)
reference_node = self._get_reference_node(members)
assert reference_node, ("Imported container not supported; "
"container must be referenced.")
self.log.info("Removing '%s' from Maya.." % container["name"])
namespace = cmds.referenceQuery(reference_node, namespace=True)
fname = cmds.referenceQuery(reference_node, filename=True)
cmds.file(fname, removeReference=True)
try:
cmds.delete(node)
except ValueError:
# Already implicitly deleted by Maya upon removing reference
pass
try:
# If container is not automatically cleaned up by May (issue #118)
cmds.namespace(removeNamespace=namespace,
deleteNamespaceContent=True)
except RuntimeError:
pass

View file

@ -0,0 +1,86 @@
import os
from avalon import api, lib, pipeline
class FusionRenderNode(api.Action):
name = "fusionrendernode9"
label = "F9 Render Node"
icon = "object-group"
order = 997
def is_compatible(self, session):
"""Return whether the action is compatible with the session"""
if "AVALON_PROJECT" in session:
return False
return True
def process(self, session, **kwargs):
"""Implement the behavior for when the action is triggered
Args:
session (dict): environment dictionary
Returns:
Popen instance of newly spawned process
"""
# Update environment with session
env = os.environ.copy()
env.update(session)
# Get executable by name
app = lib.get_application(self.name)
env.update(app["environment"])
executable = lib.which(app["executable"])
return lib.launch(executable=executable, args=[], environment=env)
class VrayRenderSlave(api.Action):
name = "vrayrenderslave"
label = "V-Ray Slave"
icon = "object-group"
order = 996
def is_compatible(self, session):
"""Return whether the action is compatible with the session"""
if "AVALON_PROJECT" in session:
return False
return True
def process(self, session, **kwargs):
"""Implement the behavior for when the action is triggered
Args:
session (dict): environment dictionary
Returns:
Popen instance of newly spawned process
"""
# Update environment with session
env = os.environ.copy()
env.update(session)
# Get executable by name
app = lib.get_application(self.name)
env.update(app["environment"])
executable = lib.which(app["executable"])
# Run as server
arguments = ["-server", "-portNumber=20207"]
return lib.launch(executable=executable,
args=arguments,
environment=env)
def register_launcher_actions():
"""Register specific actions which should be accessible in the launcher"""
pipeline.register_plugin(api.Action, FusionRenderNode)
pipeline.register_plugin(api.Action, VrayRenderSlave)

272
config/lib.py Normal file
View file

@ -0,0 +1,272 @@
import os
import re
import logging
import importlib
from .vendor import pather
from .vendor.pather.error import ParseError
import avalon.io as io
import avalon.api
log = logging.getLogger(__name__)
def is_latest(representation):
"""Return whether the representation is from latest version
Args:
representation (dict): The representation document from the database.
Returns:
bool: Whether the representation is of latest version.
"""
version = io.find_one({"_id": representation['parent']})
# Get highest version under the parent
highest_version = io.find_one({
"type": "version",
"parent": version["parent"]
}, sort=[("name", -1)], projection={"name": True})
if version['name'] == highest_version['name']:
return True
else:
return False
def any_outdated():
"""Return whether the current scene has any outdated content"""
checked = set()
host = avalon.api.registered_host()
for container in host.ls():
representation = container['representation']
if representation in checked:
continue
representation_doc = io.find_one({"_id": io.ObjectId(representation),
"type": "representation"},
projection={"parent": True})
if representation_doc and not is_latest(representation_doc):
return True
elif not representation_doc:
log.debug("Container '{objectName}' has an invalid "
"representation, it is missing in the "
"database".format(**container))
checked.add(representation)
return False
def update_task_from_path(path):
"""Update the context using the current scene state.
When no changes to the context it will not trigger an update.
When the context for a file could not be parsed an error is logged but not
raised.
"""
if not path:
log.warning("Can't update the current task. Scene is not saved.")
return
# Find the current context from the filename
project = io.find_one({"type": "project"},
projection={"config.template.work": True})
template = project['config']['template']['work']
# Force to use the registered to root to avoid using wrong paths
template = pather.format(template, {"root": avalon.api.registered_root()})
try:
context = pather.parse(template, path)
except ParseError:
log.error("Can't update the current task. Unable to parse the "
"task for: %s (pattern: %s)", path, template)
return
# Find the changes between current Session and the path's context.
current = {
"asset": avalon.api.Session["AVALON_ASSET"],
"task": avalon.api.Session["AVALON_TASK"],
"app": avalon.api.Session["AVALON_APP"]
}
changes = {key: context[key] for key, current_value in current.items()
if context[key] != current_value}
if changes:
log.info("Updating work task to: %s", context)
avalon.api.update_current_task(**changes)
def _rreplace(s, a, b, n=1):
"""Replace a with b in string s from right side n times"""
return b.join(s.rsplit(a, n))
def version_up(filepath):
"""Version up filepath to a new non-existing version.
Parses for a version identifier like `_v001` or `.v001`
When no version present _v001 is appended as suffix.
Returns:
str: filepath with increased version number
"""
dirname = os.path.dirname(filepath)
basename, ext = os.path.splitext(os.path.basename(filepath))
regex = "[._]v\d+"
matches = re.findall(regex, str(basename), re.IGNORECASE)
if not matches:
log.info("Creating version...")
new_label = "_v{version:03d}".format(version=1)
new_basename = "{}{}".format(basename, new_label)
else:
label = matches[-1]
version = re.search("\d+", label).group()
padding = len(version)
new_version = int(version) + 1
new_version = '{version:0{padding}d}'.format(version=new_version,
padding=padding)
new_label = label.replace(version, new_version, 1)
new_basename = _rreplace(basename, label, new_label)
new_filename = "{}{}".format(new_basename, ext)
new_filename = os.path.join(dirname, new_filename)
new_filename = os.path.normpath(new_filename)
if new_filename == filepath:
raise RuntimeError("Created path is the same as current file,"
"this is a bug")
if os.path.exists(new_filename):
log.info("Skipping existing version %s" % new_label)
return version_up(new_filename)
log.info("New version %s" % new_label)
return new_filename
def switch_item(container,
asset_name=None,
subset_name=None,
representation_name=None):
"""Switch container asset, subset or representation of a container by name.
It'll always switch to the latest version - of course a different
approach could be implemented.
Args:
container (dict): data of the item to switch with
asset_name (str): name of the asset
subset_name (str): name of the subset
representation_name (str): name of the representation
Returns:
dict
"""
if all(not x for x in [asset_name, subset_name, representation_name]):
raise ValueError("Must have at least one change provided to switch.")
# Collect any of current asset, subset and representation if not provided
# so we can use the original name from those.
if any(not x for x in [asset_name, subset_name, representation_name]):
_id = io.ObjectId(container["representation"])
representation = io.find_one({"type": "representation", "_id": _id})
version, subset, asset, project = io.parenthood(representation)
if asset_name is None:
asset_name = asset["name"]
if subset_name is None:
subset_name = subset["name"]
if representation_name is None:
representation_name = representation["name"]
# Find the new one
asset = io.find_one({"name": asset_name, "type": "asset"})
assert asset, ("Could not find asset in the database with the name "
"'%s'" % asset_name)
subset = io.find_one({"name": subset_name,
"type": "subset",
"parent": asset["_id"]})
assert subset, ("Could not find subset in the database with the name "
"'%s'" % subset_name)
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[('name', -1)])
assert version, "Could not find a version for {}.{}".format(
asset_name, subset_name
)
representation = io.find_one({"name": representation_name,
"type": "representation",
"parent": version["_id"]})
assert representation, ("Could not find representation in the database with"
" the name '%s'" % representation_name)
avalon.api.switch(container, representation)
return representation
def _get_host_name():
_host = avalon.api.registered_host()
# This covers nested module name like avalon.maya
return _host.__name__.rsplit(".", 1)[-1]
def collect_container_metadata(container):
"""Add additional data based on the current host
If the host application's lib module does not have a function to inject
additional data it will return the input container
Args:
container (dict): collection if representation data in host
Returns:
generator
"""
# TODO: Improve method of getting the host lib module
host_name = _get_host_name()
package_name = "colorbleed.{}.lib".format(host_name)
hostlib = importlib.import_module(package_name)
if not hasattr(hostlib, "get_additional_data"):
return {}
return hostlib.get_additional_data(container)
def get_project_fps():
"""Returns project's FPS, if not found will return 25 by default
Returns:
int, float
"""
project_name = io.active_project()
project = io.find_one({"name": project_name,
"type": "project"},
projection={"config": True})
config = project.get("config", None)
assert config, "This is a bug"
fps = config.get("fps", 25.0)
return fps

34
config/plugin.py Normal file
View file

@ -0,0 +1,34 @@
import tempfile
import pyblish.api
ValidatePipelineOrder = pyblish.api.ValidatorOrder + 0.05
ValidateContentsOrder = pyblish.api.ValidatorOrder + 0.1
ValidateSceneOrder = pyblish.api.ValidatorOrder + 0.2
ValidateMeshOrder = pyblish.api.ValidatorOrder + 0.3
class Extractor(pyblish.api.InstancePlugin):
"""Extractor base class.
The extractor base class implements a "staging_dir" function used to
generate a temporary directory for an instance to extract to.
This temporary directory is generated through `tempfile.mkdtemp()`
"""
order = 2.0
def staging_dir(self, instance):
"""Provide a temporary directory in which to store extracted files
Upon calling this method the staging directory is stored inside
the instance.data['stagingDir']
"""
staging_dir = instance.data.get('stagingDir', None)
if not staging_dir:
staging_dir = tempfile.mkdtemp(prefix="pyblish_tmp_")
instance.data['stagingDir'] = staging_dir
return staging_dir

View file

@ -0,0 +1,46 @@
import os
import avalon.api
from avalon import fusion
class CreateTiffSaver(avalon.api.Creator):
name = "tiffDefault"
label = "Create Tiff Saver"
hosts = ["fusion"]
family = "colorbleed.saver"
def process(self):
file_format = "TiffFormat"
comp = fusion.get_current_comp()
# todo: improve method of getting current environment
# todo: pref avalon.Session over os.environ
workdir = os.path.normpath(os.environ["AVALON_WORKDIR"])
filename = "{}..tiff".format(self.name)
filepath = os.path.join(workdir, "render", "preview", filename)
with fusion.comp_lock_and_undo_chunk(comp):
args = (-32768, -32768) # Magical position numbers
saver = comp.AddTool("Saver", *args)
saver.SetAttrs({"TOOLS_Name": self.name})
# Setting input attributes is different from basic attributes
# Not confused with "MainInputAttributes" which
saver["Clip"] = filepath
saver["OutputFormat"] = file_format
# # # Set standard TIFF settings
if saver[file_format] is None:
raise RuntimeError("File format is not set to TiffFormat, "
"this is a bug")
# Set file format attributes
saver[file_format]["Depth"] = 1 # int8 | int16 | float32 | other
saver[file_format]["SaveAlpha"] = 0

View file

@ -0,0 +1,25 @@
from avalon import api
class FusionSelectContainers(api.InventoryAction):
label = "Select Containers"
icon = "mouse-pointer"
color = "#d8d8d8"
def process(self, containers):
import avalon.fusion
tools = [i["_tool"] for i in containers]
comp = avalon.fusion.get_current_comp()
flow = comp.CurrentFrame.FlowView
with avalon.fusion.comp_lock_and_undo_chunk(comp, self.label):
# Clear selection
flow.Select()
# Select tool
for tool in tools:
flow.Select(tool)

View file

@ -0,0 +1,68 @@
from avalon import api, style
from avalon.vendor.Qt import QtGui, QtWidgets
import avalon.fusion
class FusionSetToolColor(api.InventoryAction):
"""Update the color of the selected tools"""
label = "Set Tool Color"
icon = "plus"
color = "#d8d8d8"
_fallback_color = QtGui.QColor(1.0, 1.0, 1.0)
def process(self, containers):
"""Color all selected tools the selected colors"""
result = []
comp = avalon.fusion.get_current_comp()
# Get tool color
first = containers[0]
tool = first["_tool"]
color = tool.TileColor
if color is not None:
qcolor = QtGui.QColor().fromRgbF(color["R"], color["G"], color["B"])
else:
qcolor = self._fallback_color
# Launch pick color
picked_color = self.get_color_picker(qcolor)
if not picked_color:
return
with avalon.fusion.comp_lock_and_undo_chunk(comp):
for container in containers:
# Convert color to RGB 0-1 floats
rgb_f = picked_color.getRgbF()
rgb_f_table = {"R": rgb_f[0], "G": rgb_f[1], "B": rgb_f[2]}
# Update tool
tool = container["_tool"]
tool.TileColor = rgb_f_table
result.append(container)
return result
def get_color_picker(self, color):
"""Launch color picker and return chosen color
Args:
color(QtGui.QColor): Start color to display
Returns:
QtGui.QColor
"""
color_dialog = QtWidgets.QColorDialog(color)
color_dialog.setStyleSheet(style.load_stylesheet())
accepted = color_dialog.exec_()
if not accepted:
return
return color_dialog.selectedColor()

View file

@ -0,0 +1,76 @@
"""A module containing generic loader actions that will display in the Loader.
"""
from avalon import api
class FusionSetFrameRangeLoader(api.Loader):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["colorbleed.animation",
"colorbleed.camera",
"colorbleed.imagesequence",
"colorbleed.yeticache",
"colorbleed.pointcache"]
representations = ["*"]
label = "Set frame range"
order = 11
icon = "clock-o"
color = "white"
def load(self, context, name, namespace, data):
from config.apps.fusion import lib
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
"end frame data is missing..")
return
lib.update_frame_range(start, end)
class FusionSetFrameRangeWithHandlesLoader(api.Loader):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["colorbleed.animation",
"colorbleed.camera",
"colorbleed.imagesequence",
"colorbleed.yeticache",
"colorbleed.pointcache"]
representations = ["*"]
label = "Set frame range (with handles)"
order = 12
icon = "clock-o"
color = "white"
def load(self, context, name, namespace, data):
from config.apps.fusion import lib
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
"end frame data is missing..")
return
# Include handles
handles = version_data.get("handles", 0)
start -= handles
end += handles
lib.update_frame_range(start, end)

View file

@ -0,0 +1,259 @@
import os
import contextlib
from avalon import api
import avalon.io as io
@contextlib.contextmanager
def preserve_inputs(tool, inputs):
"""Preserve the tool's inputs after context"""
comp = tool.Comp()
values = {}
for name in inputs:
tool_input = getattr(tool, name)
value = tool_input[comp.TIME_UNDEFINED]
values[name] = value
try:
yield
finally:
for name, value in values.items():
tool_input = getattr(tool, name)
tool_input[comp.TIME_UNDEFINED] = value
@contextlib.contextmanager
def preserve_trim(loader, log=None):
"""Preserve the relative trim of the Loader tool.
This tries to preserve the loader's trim (trim in and trim out) after
the context by reapplying the "amount" it trims on the clip's length at
start and end.
"""
# Get original trim as amount of "trimming" from length
time = loader.Comp().TIME_UNDEFINED
length = loader.GetAttrs()["TOOLIT_Clip_Length"][1] - 1
trim_from_start = loader["ClipTimeStart"][time]
trim_from_end = length - loader["ClipTimeEnd"][time]
try:
yield
finally:
length = loader.GetAttrs()["TOOLIT_Clip_Length"][1] - 1
if trim_from_start > length:
trim_from_start = length
if log:
log.warning("Reducing trim in to %d "
"(because of less frames)" % trim_from_start)
remainder = length - trim_from_start
if trim_from_end > remainder:
trim_from_end = remainder
if log:
log.warning("Reducing trim in to %d "
"(because of less frames)" % trim_from_end)
loader["ClipTimeStart"][time] = trim_from_start
loader["ClipTimeEnd"][time] = length - trim_from_end
def loader_shift(loader, frame, relative=True):
"""Shift global in time by i preserving duration
This moves the loader by i frames preserving global duration. When relative
is False it will shift the global in to the start frame.
Args:
loader (tool): The fusion loader tool.
frame (int): The amount of frames to move.
relative (bool): When True the shift is relative, else the shift will
change the global in to frame.
Returns:
int: The resulting relative frame change (how much it moved)
"""
comp = loader.Comp()
time = comp.TIME_UNDEFINED
old_in = loader["GlobalIn"][time]
old_out = loader["GlobalOut"][time]
if relative:
shift = frame
else:
shift = frame - old_in
# Shifting global in will try to automatically compensate for the change
# in the "ClipTimeStart" and "HoldFirstFrame" inputs, so we preserve those
# input values to "just shift" the clip
with preserve_inputs(loader, inputs=["ClipTimeStart",
"ClipTimeEnd",
"HoldFirstFrame",
"HoldLastFrame"]):
# GlobalIn cannot be set past GlobalOut or vice versa
# so we must apply them in the order of the shift.
if shift > 0:
loader["GlobalOut"][time] = old_out + shift
loader["GlobalIn"][time] = old_in + shift
else:
loader["GlobalIn"][time] = old_in + shift
loader["GlobalOut"][time] = old_out + shift
return int(shift)
class FusionLoadSequence(api.Loader):
"""Load image sequence into Fusion"""
families = ["colorbleed.imagesequence"]
representations = ["*"]
label = "Load sequence"
order = -10
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, data):
from avalon.fusion import (
imprint_container,
get_current_comp,
comp_lock_and_undo_chunk
)
# Fallback to asset name when namespace is None
if namespace is None:
namespace = context['asset']['name']
# Use the first file for now
path = self._get_first_image(self.fname)
# Create the Loader with the filename path set
comp = get_current_comp()
with comp_lock_and_undo_chunk(comp, "Create Loader"):
args = (-32768, -32768)
tool = comp.AddTool("Loader", *args)
tool["Clip"] = path
# Set global in point to start frame (if in version.data)
start = context["version"]["data"].get("startFrame", None)
if start is not None:
loader_shift(tool, start, relative=False)
imprint_container(tool,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__)
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
"""Update the Loader's path
Fusion automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
- ClipTimeStart: Fusion reset to 0 if duration changes
- We keep the trim in as close as possible to the previous value.
When there are less frames then the amount of trim we reduce
it accordingly.
- ClipTimeEnd: Fusion reset to 0 if duration changes
- We keep the trim out as close as possible to the previous value
within new amount of frames after trim in (ClipTimeStart) has
been set.
- GlobalIn: Fusion reset to comp's global in if duration changes
- We change it to the "startFrame"
- GlobalEnd: Fusion resets to globalIn + length if duration changes
- We do the same like Fusion - allow fusion to take control.
- HoldFirstFrame: Fusion resets this to 0
- We preverse the value.
- HoldLastFrame: Fusion resets this to 0
- We preverse the value.
- Reverse: Fusion resets to disabled if "Loop" is not enabled.
- We preserve the value.
- Depth: Fusion resets to "Format"
- We preverse the value.
- KeyCode: Fusion resets to ""
- We preverse the value.
- TimeCodeOffset: Fusion resets to 0
- We preverse the value.
"""
from avalon.fusion import comp_lock_and_undo_chunk
tool = container["_tool"]
assert tool.ID == "Loader", "Must be Loader"
comp = tool.Comp()
root = api.get_representation_path(representation)
path = self._get_first_image(root)
# Get start frame from version data
version = io.find_one({"type": "version",
"_id": representation["parent"]})
start = version["data"].get("startFrame")
if start is None:
self.log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})".format(tool.Name, representation))
start = 0
with comp_lock_and_undo_chunk(comp, "Update Loader"):
# Update the loader's path whilst preserving some values
with preserve_trim(tool, log=self.log):
with preserve_inputs(tool,
inputs=("HoldFirstFrame",
"HoldLastFrame",
"Reverse",
"Depth",
"KeyCode",
"TimeCodeOffset")):
tool["Clip"] = path
# Set the global in to the start frame of the sequence
global_in_changed = loader_shift(tool, start, relative=False)
if global_in_changed:
# Log this change to the user
self.log.debug("Changed '%s' global in: %d" % (tool.Name,
start))
# Update the imprinted representation
tool.SetData("avalon.representation", str(representation["_id"]))
def remove(self, container):
from avalon.fusion import comp_lock_and_undo_chunk
tool = container["_tool"]
assert tool.ID == "Loader", "Must be Loader"
comp = tool.Comp()
with comp_lock_and_undo_chunk(comp, "Remove Loader"):
tool.Delete()
def _get_first_image(self, root):
"""Get first file in representation root"""
files = sorted(os.listdir(root))
return os.path.join(root, files[0])

View file

@ -0,0 +1,24 @@
import os
import pyblish.api
from avalon import fusion
class CollectCurrentCompFusion(pyblish.api.ContextPlugin):
"""Collect current comp"""
order = pyblish.api.CollectorOrder - 0.4
label = "Collect Current Comp"
hosts = ["fusion"]
def process(self, context):
"""Collect all image sequence tools"""
current_comp = fusion.get_current_comp()
assert current_comp, "Must have active Fusion composition"
context.data["currentComp"] = current_comp
# Store path to current file
filepath = current_comp.GetAttrs().get("COMPS_FileName", "")
context.data['currentFile'] = filepath

View file

@ -0,0 +1,22 @@
import pyblish.api
class CollectFusionVersion(pyblish.api.ContextPlugin):
"""Collect current comp"""
order = pyblish.api.CollectorOrder
label = "Collect Fusion Version"
hosts = ["fusion"]
def process(self, context):
"""Collect all image sequence tools"""
comp = context.data.get("currentComp")
if not comp:
raise RuntimeError("No comp previously collected, unable to "
"retrieve Fusion version.")
version = comp.GetApp().Version
context.data["fusionVersion"] = version
self.log.info("Fusion version: %s" % version)

View file

@ -0,0 +1,96 @@
import os
import pyblish.api
def get_comp_render_range(comp):
"""Return comp's start and end render range."""
comp_attrs = comp.GetAttrs()
start = comp_attrs["COMPN_RenderStart"]
end = comp_attrs["COMPN_RenderEnd"]
# Whenever render ranges are undefined fall back
# to the comp's global start and end
if start == -1000000000:
start = comp_attrs["COMPN_GlobalEnd"]
if end == -1000000000:
end = comp_attrs["COMPN_GlobalStart"]
return start, end
class CollectInstances(pyblish.api.ContextPlugin):
"""Collect Fusion saver instances
This additionally stores the Comp start and end render range in the
current context's data as "startFrame" and "endFrame".
"""
order = pyblish.api.CollectorOrder
label = "Collect Instances"
hosts = ["fusion"]
def process(self, context):
"""Collect all image sequence tools"""
from avalon.fusion.lib import get_frame_path
comp = context.data["currentComp"]
# Get all savers in the comp
tools = comp.GetToolList(False).values()
savers = [tool for tool in tools if tool.ID == "Saver"]
start, end = get_comp_render_range(comp)
context.data["startFrame"] = start
context.data["endFrame"] = end
for tool in savers:
path = tool["Clip"][comp.TIME_UNDEFINED]
tool_attrs = tool.GetAttrs()
active = not tool_attrs["TOOLB_PassThrough"]
if not path:
self.log.warning("Skipping saver because it "
"has no path set: {}".format(tool.Name))
continue
filename = os.path.basename(path)
head, padding, tail = get_frame_path(filename)
ext = os.path.splitext(path)[1]
assert tail == ext, ("Tail does not match %s" % ext)
subset = head.rstrip("_. ") # subset is head of the filename
# Include start and end render frame in label
label = "{subset} ({start}-{end})".format(subset=subset,
start=int(start),
end=int(end))
instance = context.create_instance(subset)
instance.data.update({
"asset": os.environ["AVALON_ASSET"], # todo: not a constant
"subset": subset,
"path": path,
"outputDir": os.path.dirname(path),
"ext": ext, # todo: should be redundant
"label": label,
"families": ["colorbleed.saver"],
"family": "colorbleed.saver",
"active": active,
"publish": active # backwards compatibility
})
instance.append(tool)
self.log.info("Found: \"%s\" " % path)
# Sort/grouped by family (preserving local index)
context[:] = sorted(context, key=self.sort_by_family)
return context
def sort_by_family(self, instance):
"""Sort by family"""
return instance.data.get("families", instance.data.get("family"))

View file

@ -0,0 +1,44 @@
import pyblish.api
class CollectFusionRenderMode(pyblish.api.InstancePlugin):
"""Collect current comp's render Mode
Options:
renderlocal
deadline
Note that this value is set for each comp separately. When you save the
comp this information will be stored in that file. If for some reason the
available tool does not visualize which render mode is set for the
current comp, please run the following line in the console (Py2)
comp.GetData("colorbleed.rendermode")
This will return the name of the current render mode as seen above under
Options.
"""
order = pyblish.api.CollectorOrder + 0.4
label = "Collect Render Mode"
hosts = ["fusion"]
families = ["colorbleed.saver"]
def process(self, instance):
"""Collect all image sequence tools"""
options = ["renderlocal", "deadline"]
comp = instance.context.data.get("currentComp")
if not comp:
raise RuntimeError("No comp previously collected, unable to "
"retrieve Fusion version.")
rendermode = comp.GetData("colorbleed.rendermode") or "renderlocal"
assert rendermode in options, "Must be supported render mode"
self.log.info("Render mode: {0}".format(rendermode))
# Append family
family = "colorbleed.saver.{0}".format(rendermode)
instance.data["families"].append(family)

View file

@ -0,0 +1,34 @@
import pyblish.api
class FusionIncrementCurrentFile(pyblish.api.ContextPlugin):
"""Increment the current file.
Saves the current file with an increased version number.
"""
label = "Increment current file"
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["fusion"]
families = ["colorbleed.saver.deadline"]
optional = True
def process(self, context):
from colorbleed.lib import version_up
from colorbleed.action import get_errored_plugins_from_data
errored_plugins = get_errored_plugins_from_data(context)
if any(plugin.__name__ == "FusionSubmitDeadline"
for plugin in errored_plugins):
raise RuntimeError("Skipping incrementing current file because "
"submission to deadline failed.")
comp = context.data.get("currentComp")
assert comp, "Must have comp"
current_filepath = context.data["currentFile"]
new_filepath = version_up(current_filepath)
comp.Save(new_filepath)

View file

@ -0,0 +1,98 @@
import re
import os
import json
import subprocess
import pyblish.api
from colorbleed.action import get_errored_plugins_from_data
def _get_script():
"""Get path to the image sequence script"""
# todo: use a more elegant way to get the python script
try:
from colorbleed.scripts import publish_filesequence
except Exception:
raise RuntimeError("Expected module 'publish_imagesequence'"
"to be available")
module_path = publish_filesequence.__file__
if module_path.endswith(".pyc"):
module_path = module_path[:-len(".pyc")] + ".py"
return module_path
class PublishImageSequence(pyblish.api.InstancePlugin):
"""Publish the generated local image sequences."""
order = pyblish.api.IntegratorOrder
label = "Publish Rendered Image Sequence(s)"
hosts = ["fusion"]
families = ["colorbleed.saver.renderlocal"]
def process(self, instance):
# Skip this plug-in if the ExtractImageSequence failed
errored_plugins = get_errored_plugins_from_data(instance.context)
if any(plugin.__name__ == "FusionRenderLocal" for plugin in
errored_plugins):
raise RuntimeError("Fusion local render failed, "
"publishing images skipped.")
subset = instance.data["subset"]
ext = instance.data["ext"]
# Regex to match resulting renders
regex = "^{subset}.*[0-9]+{ext}+$".format(subset=re.escape(subset),
ext=re.escape(ext))
# The instance has most of the information already stored
metadata = {
"regex": regex,
"startFrame": instance.context.data["startFrame"],
"endFrame": instance.context.data["endFrame"],
"families": ["colorbleed.imagesequence"],
}
# Write metadata and store the path in the instance
output_directory = instance.data["outputDir"]
path = os.path.join(output_directory,
"{}_metadata.json".format(subset))
with open(path, "w") as f:
json.dump(metadata, f)
assert os.path.isfile(path), ("Stored path is not a file for %s"
% instance.data["name"])
# Suppress any subprocess console
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
process = subprocess.Popen(["python", _get_script(),
"--paths", path],
bufsize=1,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
while True:
output = process.stdout.readline()
# Break when there is no output or a return code has been given
if output == '' and process.poll() is not None:
process.stdout.close()
break
if output:
line = output.strip()
if line.startswith("ERROR"):
self.log.error(line)
else:
self.log.info(line)
if process.returncode != 0:
raise RuntimeError("Process quit with non-zero "
"return code: {}".format(process.returncode))

View file

@ -0,0 +1,42 @@
import pyblish.api
import avalon.fusion as fusion
class FusionRenderLocal(pyblish.api.InstancePlugin):
"""Render the current Fusion composition locally.
Extract the result of savers by starting a comp render
This will run the local render of Fusion.
"""
order = pyblish.api.ExtractorOrder
label = "Render Local"
hosts = ["fusion"]
families = ["colorbleed.saver.renderlocal"]
def process(self, instance):
# This should be a ContextPlugin, but this is a workaround
# for a bug in pyblish to run once for a family: issue #250
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)
if context.data.get(key, False):
return
else:
context.data[key] = True
current_comp = context.data["currentComp"]
start_frame = current_comp.GetAttrs("COMPN_RenderStart")
end_frame = current_comp.GetAttrs("COMPN_RenderEnd")
self.log.info("Starting render")
self.log.info("Start frame: {}".format(start_frame))
self.log.info("End frame: {}".format(end_frame))
with fusion.comp_lock_and_undo_chunk(current_comp):
result = current_comp.Render()
if not result:
raise RuntimeError("Comp render failed")

View file

@ -0,0 +1,21 @@
import pyblish.api
class FusionSaveComp(pyblish.api.ContextPlugin):
"""Save current comp"""
label = "Save current file"
order = pyblish.api.ExtractorOrder - 0.49
hosts = ["fusion"]
families = ["colorbleed.saver"]
def process(self, context):
comp = context.data.get("currentComp")
assert comp, "Must have comp"
current = comp.GetAttrs().get("COMPS_FileName", "")
assert context.data['currentFile'] == current
self.log.info("Saving current file..")
comp.Save()

View file

@ -0,0 +1,149 @@
import os
import json
import getpass
from avalon import api
from avalon.vendor import requests
import pyblish.api
class FusionSubmitDeadline(pyblish.api.InstancePlugin):
"""Submit current Comp to Deadline
Renders are submitted to a Deadline Web Service as
supplied via the environment variable AVALON_DEADLINE
"""
label = "Submit to Deadline"
order = pyblish.api.IntegratorOrder
hosts = ["fusion"]
families = ["colorbleed.saver.deadline"]
def process(self, instance):
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)
if context.data.get(key, False):
return
else:
context.data[key] = True
from avalon.fusion.lib import get_frame_path
AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE",
"http://localhost:8082")
assert AVALON_DEADLINE, "Requires AVALON_DEADLINE"
# Collect all saver instances in context that are to be rendered
saver_instances = []
for instance in context[:]:
if not self.families[0] in instance.data.get("families"):
# Allow only saver family instances
continue
if not instance.data.get("publish", True):
# Skip inactive instances
continue
self.log.debug(instance.data["name"])
saver_instances.append(instance)
if not saver_instances:
raise RuntimeError("No instances found for Deadline submittion")
fusion_version = int(context.data["fusionVersion"])
filepath = context.data["currentFile"]
filename = os.path.basename(filepath)
comment = context.data.get("comment", "")
deadline_user = context.data.get("deadlineUser", getpass.getuser())
# Documentation for keys available at:
# https://docs.thinkboxsoftware.com
# /products/deadline/8.0/1_User%20Manual/manual
# /manual-submission.html#job-info-file-options
payload = {
"JobInfo": {
# Top-level group name
"BatchName": filename,
# Job name, as seen in Monitor
"Name": filename,
# User, as seen in Monitor
"UserName": deadline_user,
# Use a default submission pool for Fusion
"Pool": "fusion",
"Plugin": "Fusion",
"Frames": "{start}-{end}".format(
start=int(context.data["startFrame"]),
end=int(context.data["endFrame"])
),
"Comment": comment,
},
"PluginInfo": {
# Input
"FlowFile": filepath,
# Mandatory for Deadline
"Version": str(fusion_version),
# Render in high quality
"HighQuality": True,
# Whether saver output should be checked after rendering
# is complete
"CheckOutput": True,
# Proxy: higher numbers smaller images for faster test renders
# 1 = no proxy quality
"Proxy": 1,
},
# Mandatory for Deadline, may be empty
"AuxFiles": []
}
# Enable going to rendered frames from Deadline Monitor
for index, instance in enumerate(saver_instances):
head, padding, tail = get_frame_path(instance.data["path"])
path = "{}{}{}".format(head, "#" * padding, tail)
folder, filename = os.path.split(path)
payload["JobInfo"]["OutputDirectory%d" % index] = folder
payload["JobInfo"]["OutputFilename%d" % index] = filename
# Include critical variables with submission
keys = [
# TODO: This won't work if the slaves don't have accesss to
# these paths, such as if slaves are running Linux and the
# submitter is on Windows.
"PYTHONPATH",
"OFX_PLUGIN_PATH",
"FUSION9_MasterPrefs"
]
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **api.Session)
payload["JobInfo"].update({
"EnvironmentKeyValue%d" % index: "{key}={value}".format(
key=key,
value=environment[key]
) for index, key in enumerate(environment)
})
self.log.info("Submitting..")
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
# E.g. http://192.168.0.1:8082/api/jobs
url = "{}/api/jobs".format(AVALON_DEADLINE)
response = requests.post(url, json=payload)
if not response.ok:
raise Exception(response.text)
# Store the response for dependent job submission plug-ins
for instance in saver_instances:
instance.data["deadlineSubmissionJob"] = response.json()

View file

@ -0,0 +1,40 @@
import pyblish.api
from colorbleed import action
class ValidateBackgroundDepth(pyblish.api.InstancePlugin):
"""Validate if all Background tool are set to float32 bit"""
order = pyblish.api.ValidatorOrder
label = "Validate Background Depth 32 bit"
actions = [action.RepairAction]
hosts = ["fusion"]
families = ["colorbleed.saver"]
optional = True
@classmethod
def get_invalid(cls, instance):
context = instance.context
comp = context.data.get("currentComp")
assert comp, "Must have Comp object"
backgrounds = comp.GetToolList(False, "Background").values()
if not backgrounds:
return []
return [i for i in backgrounds if i.GetInput("Depth") != 4.0]
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Found %i nodes which are not set to float32"
% len(invalid))
@classmethod
def repair(cls, instance):
comp = instance.context.data.get("currentComp")
invalid = cls.get_invalid(instance)
for i in invalid:
i.SetInput("Depth", 4.0, comp.TIME_UNDEFINED)

View file

@ -0,0 +1,29 @@
import os
import pyblish.api
class ValidateFusionCompSaved(pyblish.api.ContextPlugin):
"""Ensure current comp is saved"""
order = pyblish.api.ValidatorOrder
label = "Validate Comp Saved"
families = ["colorbleed.saver"]
hosts = ["fusion"]
def process(self, context):
comp = context.data.get("currentComp")
assert comp, "Must have Comp object"
attrs = comp.GetAttrs()
filename = attrs["COMPS_FileName"]
if not filename:
raise RuntimeError("Comp is not saved.")
if not os.path.exists(filename):
raise RuntimeError("Comp file does not exist: %s" % filename)
if attrs["COMPB_Modified"]:
self.log.warning("Comp is modified. Save your comp to ensure your "
"changes propagate correctly.")

View file

@ -0,0 +1,41 @@
import pyblish.api
from colorbleed import action
class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
"""Valid if all savers have the input attribute CreateDir checked on
This attribute ensures that the folders to which the saver will write
will be created.
"""
order = pyblish.api.ValidatorOrder
actions = [action.RepairAction]
label = "Validate Create Folder Checked"
families = ["colorbleed.saver"]
hosts = ["fusion"]
@classmethod
def get_invalid(cls, instance):
active = instance.data.get("active", instance.data.get("publish"))
if not active:
return []
tool = instance[0]
create_dir = tool.GetInput("CreateDir")
if create_dir == 0.0:
cls.log.error("%s has Create Folder turned off" % instance[0].Name)
return [tool]
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Found Saver with Create Folder During "
"Render checked off")
@classmethod
def repair(cls, instance):
invalid = cls.get_invalid(instance)
for tool in invalid:
tool.SetInput("CreateDir", 1.0)

View file

@ -0,0 +1,36 @@
import os
import pyblish.api
class ValidateFilenameHasExtension(pyblish.api.InstancePlugin):
"""Ensure the Saver has an extension in the filename path
This disallows files written as `filename` instead of `filename.frame.ext`.
Fusion does not always set an extension for your filename when
changing the file format of the saver.
"""
order = pyblish.api.ValidatorOrder
label = "Validate Filename Has Extension"
families = ["colorbleed.saver"]
hosts = ["fusion"]
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Found Saver without an extension")
@classmethod
def get_invalid(cls, instance):
path = instance.data["path"]
fname, ext = os.path.splitext(path)
if not ext:
tool = instance[0]
cls.log.error("%s has no extension specified" % tool.Name)
return [tool]
return []

View file

@ -0,0 +1,29 @@
import pyblish.api
class ValidateSaverHasInput(pyblish.api.InstancePlugin):
"""Validate saver has incoming connection
This ensures a Saver has at least an input connection.
"""
order = pyblish.api.ValidatorOrder
label = "Validate Saver Has Input"
families = ["colorbleed.saver"]
hosts = ["fusion"]
@classmethod
def get_invalid(cls, instance):
saver = instance[0]
if not saver.Input.GetConnectedOutput():
return [saver]
return []
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Saver has no incoming connection: "
"{} ({})".format(instance, invalid[0].Name))

View file

@ -0,0 +1,44 @@
import pyblish.api
class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
"""Validate saver passthrough is similar to Pyblish publish state"""
order = pyblish.api.ValidatorOrder
label = "Validate Saver Passthrough"
families = ["colorbleed.saver"]
hosts = ["fusion"]
def process(self, context):
# Workaround for ContextPlugin always running, even if no instance
# is present with the family
instances = pyblish.api.instances_by_plugin(instances=list(context),
plugin=self)
if not instances:
self.log.debug("Ignoring plugin.. (bugfix)")
invalid_instances = []
for instance in instances:
invalid = self.is_invalid(instance)
if invalid:
invalid_instances.append(instance)
if invalid_instances:
self.log.info("Reset pyblish to collect your current scene state, "
"that should fix error.")
raise RuntimeError("Invalid instances: "
"{0}".format(invalid_instances))
def is_invalid(self, instance):
saver = instance[0]
attr = saver.GetAttrs()
active = not attr["TOOLB_PassThrough"]
if active != instance.data["publish"]:
self.log.info("Saver has different passthrough state than "
"Pyblish: {} ({})".format(instance, saver.Name))
return [saver]
return []

View file

@ -0,0 +1,29 @@
import pyblish.api
class ValidateUniqueSubsets(pyblish.api.InstancePlugin):
"""Ensure all instances have a unique subset name"""
order = pyblish.api.ValidatorOrder
label = "Validate Unique Subsets"
families = ["colorbleed.saver"]
hosts = ["fusion"]
@classmethod
def get_invalid(cls, instance):
context = instance.context
subset = instance.data["subset"]
for other_instance in context[:]:
if other_instance == instance:
continue
if other_instance.data["subset"] == subset:
return [instance] # current instance is invalid
return []
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Animation content is invalid. See log.")

View file

@ -0,0 +1,33 @@
from avalon import api, style
class CopyFile(api.Loader):
"""Copy the published file to be pasted at the desired location"""
representations = ["*"]
families = ["*"]
label = "Copy File"
order = 10
icon = "copy"
color = style.colors.default
def load(self, context, name=None, namespace=None, data=None):
self.log.info("Added copy to clipboard: {0}".format(self.fname))
self.copy_file_to_clipboard(self.fname)
@staticmethod
def copy_file_to_clipboard(path):
from avalon.vendor.Qt import QtCore, QtWidgets
app = QtWidgets.QApplication.instance()
assert app, "Must have running QApplication instance"
# Build mime data for clipboard
data = QtCore.QMimeData()
url = QtCore.QUrl.fromLocalFile(path)
data.setUrls([url])
# Set to Clipboard
clipboard = app.clipboard()
clipboard.setMimeData(data)

View file

@ -0,0 +1,29 @@
import os
from avalon import api
class CopyFilePath(api.Loader):
"""Copy published file path to clipboard"""
representations = ["*"]
families = ["*"]
label = "Copy File Path"
order = 20
icon = "clipboard"
color = "#999999"
def load(self, context, name=None, namespace=None, data=None):
self.log.info("Added file path to clipboard: {0}".format(self.fname))
self.copy_path_to_clipboard(self.fname)
@staticmethod
def copy_path_to_clipboard(path):
from avalon.vendor.Qt import QtCore, QtWidgets
app = QtWidgets.QApplication.instance()
assert app, "Must have running QApplication instance"
# Set to Clipboard
clipboard = app.clipboard()
clipboard.setText(os.path.normpath(path))

View file

@ -0,0 +1,49 @@
import sys
import os
import subprocess
from avalon import api
def open(filepath):
"""Open file with system default executable"""
if sys.platform.startswith('darwin'):
subprocess.call(('open', filepath))
elif os.name == 'nt':
os.startfile(filepath)
elif os.name == 'posix':
subprocess.call(('xdg-open', filepath))
class PlayImageSequence(api.Loader):
"""Open Image Sequence with system default"""
families = ["colorbleed.imagesequence"]
representations = ["*"]
label = "Play sequence"
order = -10
icon = "play-circle"
color = "orange"
def load(self, context, name, namespace, data):
directory = self.fname
from avalon.vendor import clique
pattern = clique.PATTERNS["frames"]
files = os.listdir(directory)
collections, remainder = clique.assemble(files,
patterns=[pattern],
minimum_items=1)
assert not remainder, ("There shouldn't have been a remainder for "
"'%s': %s" % (directory, remainder))
seqeunce = collections[0]
first_image = list(seqeunce)[0]
filepath = os.path.normpath(os.path.join(directory, first_image))
self.log.info("Opening : {}".format(filepath))
open(filepath)

View file

@ -0,0 +1,33 @@
import os
import shutil
import pyblish.api
class CleanUp(pyblish.api.InstancePlugin):
"""Cleans up the staging directory after a successful publish.
The removal will only happen for staging directories which are inside the
temporary folder, otherwise the folder is ignored.
"""
order = pyblish.api.IntegratorOrder + 10
label = "Clean Up"
def process(self, instance):
import tempfile
staging_dir = instance.data.get("stagingDir", None)
if not staging_dir or not os.path.exists(staging_dir):
self.log.info("No staging directory found: %s" % staging_dir)
return
temp_root = tempfile.gettempdir()
if not os.path.normpath(staging_dir).startswith(temp_root):
self.log.info("Skipping cleanup. Staging directory is not in the "
"temp folder: %s" % staging_dir)
return
self.log.info("Removing temporary folder ...")
shutil.rmtree(staging_dir)

View file

@ -0,0 +1,108 @@
import pyblish.api
import os
from avalon import io, api
class CollectAssumedDestination(pyblish.api.InstancePlugin):
"""Generate the assumed destination path where the file will be stored"""
label = "Collect Assumed Destination"
order = pyblish.api.CollectorOrder + 0.499
def process(self, instance):
self.create_destination_template(instance)
template_data = instance.data["assumedTemplateData"]
template = instance.data["template"]
mock_template = template.format(**template_data)
# For now assume resources end up in a "resources" folder in the
# published folder
mock_destination = os.path.join(os.path.dirname(mock_template),
"resources")
# Clean the path
mock_destination = os.path.abspath(os.path.normpath(mock_destination))
# Define resource destination and transfers
resources = instance.data.get("resources", list())
transfers = instance.data.get("transfers", list())
for resource in resources:
# Add destination to the resource
source_filename = os.path.basename(resource["source"])
destination = os.path.join(mock_destination, source_filename)
resource['destination'] = destination
# Collect transfers for the individual files of the resource
# e.g. all individual files of a cache or UDIM textures.
files = resource['files']
for fsrc in files:
fname = os.path.basename(fsrc)
fdest = os.path.join(mock_destination, fname)
transfers.append([fsrc, fdest])
instance.data["resources"] = resources
instance.data["transfers"] = transfers
def create_destination_template(self, instance):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
project = io.find_one({"type": "project",
"name": project_name},
projection={"config": True})
template = project["config"]["template"]["publish"]
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset['silo']
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": project_name,
"silo": silo,
"asset": asset_name,
"subset": subset_name,
"version": version_number,
"representation": "TEMP"}
instance.data["assumedTemplateData"] = template_data
instance.data["template"] = template

View file

@ -0,0 +1,11 @@
import pyblish.api
class CollectColorbleedComment(pyblish.api.ContextPlugin):
"""This plug-ins displays the comment dialog box per default"""
label = "Collect Comment"
order = pyblish.api.CollectorOrder
def process(self, context):
context.data["comment"] = ""

View file

@ -0,0 +1,22 @@
import os
import pyblish.api
class CollectContextLabel(pyblish.api.ContextPlugin):
"""Labelize context using the registered host and current file"""
order = pyblish.api.CollectorOrder + 0.25
label = "Context Label"
def process(self, context):
# Get last registered host
host = pyblish.api.registered_hosts()[-1]
# Get scene name from "currentFile"
path = context.data.get("currentFile") or "<Unsaved>"
base = os.path.basename(path)
# Set label
label = "{host} - {scene}".format(host=host.title(), scene=base)
context.data["label"] = label

View file

@ -0,0 +1,14 @@
import os
import pyblish.api
class CollectCurrentShellFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Current File"
hosts = ["shell"]
def process(self, context):
"""Inject the current working file"""
context.data["currentFile"] = os.path.join(os.getcwd(), "<shell>")

View file

@ -0,0 +1,52 @@
import os
import subprocess
import pyblish.api
CREATE_NO_WINDOW = 0x08000000
def deadline_command(cmd):
# Find Deadline
path = os.environ.get("DEADLINE_PATH", None)
assert path is not None, "Variable 'DEADLINE_PATH' must be set"
executable = os.path.join(path, "deadlinecommand")
if os.name == "nt":
executable += ".exe"
assert os.path.exists(
executable), "Deadline executable not found at %s" % executable
assert cmd, "Must have a command"
query = (executable, cmd)
process = subprocess.Popen(query, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
creationflags=CREATE_NO_WINDOW)
out, err = process.communicate()
return out
class CollectDeadlineUser(pyblish.api.ContextPlugin):
"""Retrieve the local active Deadline user"""
order = pyblish.api.CollectorOrder + 0.499
label = "Deadline User"
hosts = ['maya', 'fusion']
families = ["colorbleed.renderlayer", "colorbleed.saver.deadline"]
def process(self, context):
"""Inject the current working file"""
user = deadline_command("GetCurrentUserName").strip()
if not user:
self.log.warning("No Deadline user found. "
"Do you have Deadline installed?")
return
self.log.info("Found Deadline user: {}".format(user))
context.data['deadlineUser'] = user

View file

@ -0,0 +1,184 @@
import os
import re
import copy
import json
import pprint
import pyblish.api
from avalon import api
def collect(root,
regex=None,
exclude_regex=None,
startFrame=None,
endFrame=None):
"""Collect sequence collections in root"""
from avalon.vendor import clique
files = list()
for filename in os.listdir(root):
# Must have extension
ext = os.path.splitext(filename)[1]
if not ext:
continue
# Only files
if not os.path.isfile(os.path.join(root, filename)):
continue
# Include and exclude regex
if regex and not re.search(regex, filename):
continue
if exclude_regex and re.search(exclude_regex, filename):
continue
files.append(filename)
# Match collections
# Support filenames like: projectX_shot01_0010.tiff with this regex
pattern = r"(?P<index>(?P<padding>0*)\d+)\.\D+\d?$"
collections, remainder = clique.assemble(files,
patterns=[pattern],
minimum_items=1)
# Ignore any remainders
if remainder:
print("Skipping remainder {}".format(remainder))
# Exclude any frames outside start and end frame.
for collection in collections:
for index in list(collection.indexes):
if startFrame is not None and index < startFrame:
collection.indexes.discard(index)
continue
if endFrame is not None and index > endFrame:
collection.indexes.discard(index)
continue
# Keep only collections that have at least a single frame
collections = [c for c in collections if c.indexes]
return collections
class CollectFileSequences(pyblish.api.ContextPlugin):
"""Gather file sequences from working directory
When "FILESEQUENCE" environment variable is set these paths (folders or
.json files) are parsed for image sequences. Otherwise the current
working directory is searched for file sequences.
The json configuration may have the optional keys:
asset (str): The asset to publish to. If not provided fall back to
api.Session["AVALON_ASSET"]
subset (str): The subset to publish to. If not provided the sequence's
head (up to frame number) will be used.
startFrame (int): The start frame for the sequence
endFrame (int): The end frame for the sequence
root (str): The path to collect from (can be relative to the .json)
regex (str): A regex for the sequence filename
exclude_regex (str): A regex for filename to exclude from collection
metadata (dict): Custom metadata for instance.data["metadata"]
"""
order = pyblish.api.CollectorOrder
targets = ["filesequence"]
label = "File Sequences"
def process(self, context):
if os.environ.get("FILESEQUENCE"):
paths = os.environ["FILESEQUENCE"].split(os.pathsep)
else:
cwd = context.get("workspaceDir", os.getcwd())
paths = [cwd]
for path in paths:
self.log.info("Loading: {}".format(path))
if path.endswith(".json"):
# Search using .json configuration
with open(path, "r") as f:
try:
data = json.load(f)
except Exception as exc:
self.log.error("Error loading json: "
"{} - Exception: {}".format(path, exc))
raise
cwd = os.path.dirname(path)
root_override = data.get("root")
if root_override:
if os.path.isabs(root_override):
root = root_override
else:
root = os.path.join(cwd, root_override)
else:
root = cwd
else:
# Search in directory
data = dict()
root = path
self.log.info("Collecting: {}".format(root))
regex = data.get("regex")
if regex:
self.log.info("Using regex: {}".format(regex))
collections = collect(root=root,
regex=regex,
exclude_regex=data.get("exclude_regex"),
startFrame=data.get("startFrame"),
endFrame=data.get("endFrame"))
self.log.info("Found collections: {}".format(collections))
if data.get("subset"):
# If subset is provided for this json then it must be a single
# collection.
if len(collections) > 1:
self.log.error("Forced subset can only work with a single "
"found sequence")
raise RuntimeError("Invalid sequence")
# Get family from the data
families = data.get("families", ["colorbleed.imagesequence"])
assert isinstance(families, (list, tuple)), "Must be iterable"
assert families, "Must have at least a single family"
for collection in collections:
instance = context.create_instance(str(collection))
self.log.info("Collection: %s" % list(collection))
# Ensure each instance gets a unique reference to the data
data = copy.deepcopy(data)
# If no subset provided, get it from collection's head
subset = data.get("subset", collection.head.rstrip("_. "))
# If no start or end frame provided, get it from collection
indices = list(collection.indexes)
start = data.get("startFrame", indices[0])
end = data.get("endFrame", indices[-1])
instance.data.update({
"name": str(collection),
"family": families[0], # backwards compatibility / pyblish
"families": list(families),
"subset": subset,
"asset": data.get("asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"files": [list(collection)],
"startFrame": start,
"endFrame": end
})
instance.append(collection)
self.log.debug("Collected instance:\n"
"{}".format(pprint.pformat(instance.data)))

View file

@ -0,0 +1,14 @@
import os
import pyblish.api
class CollectShellWorkspace(pyblish.api.ContextPlugin):
"""Inject the current workspace into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Shell Workspace"
hosts = ["shell"]
def process(self, context):
context.data["workspaceDir"] = os.getcwd()

View file

@ -0,0 +1,12 @@
import pyblish.api
from avalon import api
class CollectMindbenderTime(pyblish.api.ContextPlugin):
"""Store global time at the time of publish"""
label = "Collect Current Time"
order = pyblish.api.CollectorOrder
def process(self, context):
context.data["time"] = api.time()

View file

@ -0,0 +1,349 @@
import os
import logging
import shutil
import errno
import pyblish.api
from avalon import api, io
log = logging.getLogger(__name__)
class IntegrateAsset(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
label = "Integrate Asset"
order = pyblish.api.IntegratorOrder
families = ["colorbleed.animation",
"colorbleed.camera",
"colorbleed.imagesequence",
"colorbleed.look",
"config.apps.mayaAscii",
"colorbleed.model",
"colorbleed.pointcache",
"colorbleed.setdress",
"colorbleed.rig",
"colorbleed.vrayproxy",
"colorbleed.yetiRig",
"colorbleed.yeticache"]
def process(self, instance):
self.register(instance)
self.log.info("Integrating Asset in to the database ...")
self.integrate(instance)
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
LOCATION = api.Session["AVALON_LOCATION"]
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
assert all(result["success"] for result in context.data["results"]), (
"Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
stagingdir = instance.data.get("stagingDir")
assert stagingdir, ("Incomplete instance \"%s\": "
"Missing reference to staging area." % instance)
# extra check if stagingDir actually exists and is available
self.log.debug("Establishing staging directory @ %s" % stagingdir)
project = io.find_one({"type": "project"},
projection={"config.template.publish": True})
asset = io.find_one({"type": "asset",
"name": ASSET,
"parent": project["_id"]})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one({"type": "version",
"parent": subset["_id"]},
{"name": True},
sort=[("name", -1)])
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
self.log.info("Verifying version from assumed destination")
assumed_data = instance.data["assumedTemplateData"]
assumed_version = assumed_data["version"]
if assumed_version != next_version:
raise AttributeError("Assumed version 'v{0:03d}' does not match"
"next version in database "
"('v{1:03d}')".format(assumed_version,
next_version))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_data = self.create_version_data(context, instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
self.log.debug("Creating version ...")
version_id = io.insert_one(version).inserted_id
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
template_data = {"root": root,
"project": PROJECT,
"silo": asset['silo'],
"asset": ASSET,
"subset": subset["name"],
"version": version["name"]}
template_publish = project["config"]["template"]["publish"]
# Find the representations to transfer amongst the files
# Each should be a single representation (as such, a single extension)
representations = []
for files in instance.data["files"]:
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
if isinstance(files, list):
collection = files
# Assert that each member has identical suffix
_, ext = os.path.splitext(collection[0])
assert all(ext == os.path.splitext(name)[1]
for name in collection), (
"Files had varying suffixes, this is a bug"
)
assert not any(os.path.isabs(name) for name in collection)
template_data["representation"] = ext[1:]
for fname in collection:
src = os.path.join(stagingdir, fname)
dst = os.path.join(
template_publish.format(**template_data),
fname
)
instance.data["transfers"].append([src, dst])
else:
# Single file
# _______
# | |\
# | |
# | |
# | |
# |_______|
#
fname = files
assert not os.path.isabs(fname), (
"Given file name is a full path"
)
_, ext = os.path.splitext(fname)
template_data["representation"] = ext[1:]
src = os.path.join(stagingdir, fname)
dst = template_publish.format(**template_data)
instance.data["transfers"].append([src, dst])
representation = {
"schema": "avalon-core:representation-2.0",
"type": "representation",
"parent": version_id,
"name": ext[1:],
"data": {},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
# for performance reasons.
"context": {
"project": PROJECT,
"asset": ASSET,
"silo": asset['silo'],
"subset": subset["name"],
"version": version["name"],
"representation": ext[1:]
}
}
representations.append(representation)
self.log.info("Registering {} items".format(len(representations)))
io.insert_many(representations)
def integrate(self, instance):
"""Move the files
Through `instance.data["transfers"]`
Args:
instance: the instance to integrate
"""
transfers = instance.data["transfers"]
for src, dest in transfers:
self.log.info("Copying file .. {} -> {}".format(src, dest))
self.copy_file(src, dest)
def copy_file(self, src, dst):
""" Copy given source to destination
Arguments:
src (str): the source file which needs to be copied
dst (str): the destination of the sourc file
Returns:
None
"""
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
shutil.copy(src, dst)
def get_subset(self, asset, instance):
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
_id = io.insert_one({
"schema": "avalon-core:subset-2.0",
"type": "subset",
"name": subset_name,
"data": {},
"parent": asset["_id"]
}).inserted_id
subset = io.find_one({"_id": _id})
return subset
def create_version(self, subset, version_number, locations, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "avalon-core:version-2.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
"""Create the data collection for the version
Args:
context: the current context
instance: the current instance being published
Returns:
dict: the required information with instance.data as key
"""
families = []
current_families = instance.data.get("families", list())
instance_family = instance.data.get("family", None)
if instance_family is not None:
families.append(instance_family)
families += current_families
# create relative source path for DB
relative_path = os.path.relpath(context.data["currentFile"],
api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
version_data = {"families": families,
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment")}
# Include optional data if present in
optionals = ["startFrame", "endFrame", "step", "handles"]
for key in optionals:
if key in instance.data:
version_data[key] = instance.data[key]
return version_data

View file

@ -0,0 +1,316 @@
import os
import json
import pprint
import re
from avalon import api, io
from avalon.vendor import requests, clique
import pyblish.api
def _get_script():
"""Get path to the image sequence script"""
try:
from colorbleed.scripts import publish_filesequence
except Exception as e:
raise RuntimeError("Expected module 'publish_imagesequence'"
"to be available")
module_path = publish_filesequence.__file__
if module_path.endswith(".pyc"):
module_path = module_path[:-len(".pyc")] + ".py"
return module_path
# Logic to retrieve latest files concerning extendFrames
def get_latest_version(asset_name, subset_name, family):
# Get asset
asset_name = io.find_one({"type": "asset",
"name": asset_name},
projection={"name": True})
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset_name["_id"]},
projection={"_id": True, "name": True})
# Check if subsets actually exists (pre-run check)
assert subset, "No subsets found, please publish with `extendFrames` off"
# Get version
version_projection = {"name": True,
"data.startFrame": True,
"data.endFrame": True,
"parent": True}
version = io.find_one({"type": "version",
"parent": subset["_id"],
"data.families": family},
projection=version_projection,
sort=[("name", -1)])
assert version, "No version found, this is a bug"
return version
def get_resources(version, extension=None):
"""
Get the files from the specific version
"""
query = {"type": "representation", "parent": version["_id"]}
if extension:
query["name"] = extension
representation = io.find_one(query)
assert representation, "This is a bug"
directory = api.get_representation_path(representation)
print("Source: ", directory)
resources = sorted([os.path.normpath(os.path.join(directory, fname))
for fname in os.listdir(directory)])
return resources
def get_resource_files(resources, frame_range, override=True):
res_collections, _ = clique.assemble(resources)
assert len(res_collections) == 1, "Multiple collections found"
res_collection = res_collections[0]
# Remove any frames
if override:
for frame in frame_range:
if frame not in res_collection.indexes:
continue
res_collection.indexes.remove(frame)
return list(res_collection)
class SubmitDependentImageSequenceJobDeadline(pyblish.api.InstancePlugin):
"""Submit image sequence publish jobs to Deadline.
These jobs are dependent on a deadline job submission prior to this
plug-in.
Renders are submitted to a Deadline Web Service as
supplied via the environment variable AVALON_DEADLINE
Options in instance.data:
- deadlineSubmission (dict, Required): The returned .json
data from the job submission to deadline.
- outputDir (str, Required): The output directory where the metadata
file should be generated. It's assumed that this will also be
final folder containing the output files.
- ext (str, Optional): The extension (including `.`) that is required
in the output filename to be picked up for image sequence
publishing.
- publishJobState (str, Optional): "Active" or "Suspended"
This defaults to "Suspended"
This requires a "startFrame" and "endFrame" to be present in instance.data
or in context.data.
"""
label = "Submit image sequence jobs to Deadline"
order = pyblish.api.IntegratorOrder + 0.1
hosts = ["fusion", "maya"]
families = ["colorbleed.saver.deadline", "colorbleed.renderlayer"]
def process(self, instance):
AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE",
"http://localhost:8082")
assert AVALON_DEADLINE, "Requires AVALON_DEADLINE"
# Get a submission job
job = instance.data.get("deadlineSubmissionJob")
if not job:
raise RuntimeError("Can't continue without valid deadline "
"submission prior to this plug-in.")
data = instance.data.copy()
subset = data["subset"]
state = data.get("publishJobState", "Suspended")
job_name = "{batch} - {subset} [publish image sequence]".format(
batch=job["Props"]["Name"],
subset=subset
)
# Add in start/end frame
context = instance.context
start = instance.data.get("startFrame", context.data["startFrame"])
end = instance.data.get("endFrame", context.data["endFrame"])
resources = []
# Add in regex for sequence filename
# This assumes the output files start with subset name and ends with
# a file extension.
if "ext" in instance.data:
ext = re.escape(instance.data["ext"])
else:
ext = "\.\D+"
regex = "^{subset}.*\d+{ext}$".format(subset=re.escape(subset),
ext=ext)
# Write metadata for publish job
render_job = data.pop("deadlineSubmissionJob")
metadata = {
"regex": regex,
"startFrame": start,
"endFrame": end,
"families": ["colorbleed.imagesequence"],
# Optional metadata (for debugging)
"metadata": {
"instance": data,
"job": job,
"session": api.Session.copy()
}
}
# Ensure output dir exists
output_dir = instance.data["outputDir"]
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
if data.get("extendFrames", False):
family = "colorbleed.imagesequence"
override = data["overrideExistingFrame"]
# override = data.get("overrideExistingFrame", False)
out_file = render_job.get("OutFile")
if not out_file:
raise RuntimeError("OutFile not found in render job!")
extension = os.path.splitext(out_file[0])[1]
_ext = extension[1:]
# Frame comparison
prev_start = None
prev_end = None
resource_range = range(int(start), int(end)+1)
# Gather all the subset files (one subset per render pass!)
subset_names = [data["subset"]]
subset_names.extend(data.get("renderPasses", []))
for subset_name in subset_names:
version = get_latest_version(asset_name=data["asset"],
subset_name=subset_name,
family=family)
# Set prev start / end frames for comparison
if not prev_start and not prev_end:
prev_start = version["data"]["startFrame"]
prev_end = version["data"]["endFrame"]
subset_resources = get_resources(version, _ext)
resource_files = get_resource_files(subset_resources,
resource_range,
override)
resources.extend(resource_files)
updated_start = min(start, prev_start)
updated_end = max(end, prev_end)
# Update metadata and instance start / end frame
self.log.info("Updating start / end frame : "
"{} - {}".format(updated_start, updated_end))
# TODO : Improve logic to get new frame range for the
# publish job (publish_filesequence.py)
# The current approach is not following Pyblish logic which is based
# on Collect / Validate / Extract.
# ---- Collect Plugins ---
# Collect Extend Frames - Only run if extendFrames is toggled
# # # Store in instance:
# # # Previous rendered files per subset based on frames
# # # --> Add to instance.data[resources]
# # # Update publish frame range
# ---- Validate Plugins ---
# Validate Extend Frames
# # # Check if instance has the requirements to extend frames
# There might have been some things which can be added to the list
# Please do so when fixing this.
# Start frame
metadata["startFrame"] = updated_start
metadata["metadata"]["instance"]["startFrame"] = updated_start
# End frame
metadata["endFrame"] = updated_end
metadata["metadata"]["instance"]["endFrame"] = updated_end
metadata_filename = "{}_metadata.json".format(subset)
metadata_path = os.path.join(output_dir, metadata_filename)
with open(metadata_path, "w") as f:
json.dump(metadata, f, indent=4, sort_keys=True)
# Generate the payload for Deadline submission
payload = {
"JobInfo": {
"Plugin": "Python",
"BatchName": job["Props"]["Batch"],
"Name": job_name,
"JobType": "Normal",
"JobDependency0": job["_id"],
"UserName": job["Props"]["User"],
"Comment": instance.context.data.get("comment", ""),
"InitialStatus": state
},
"PluginInfo": {
"Version": "3.6",
"ScriptFile": _get_script(),
"Arguments": '--path "{}"'.format(metadata_path),
"SingleFrameOnly": "True"
},
# Mandatory for Deadline, may be empty
"AuxFiles": []
}
# Transfer the environment from the original job to this dependent
# job so they use the same environment
environment = job["Props"].get("Env", {})
payload["JobInfo"].update({
"EnvironmentKeyValue%d" % index: "{key}={value}".format(
key=key,
value=environment[key]
) for index, key in enumerate(environment)
})
self.log.info("Submitting..")
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
url = "{}/api/jobs".format(AVALON_DEADLINE)
response = requests.post(url, json=payload)
if not response.ok:
raise Exception(response.text)
# Copy files from previous render if extendFrame is True
if data.get("extendFrames", False):
self.log.info("Preparing to copy ..")
import shutil
dest_path = data["outputDir"]
for source in resources:
src_file = os.path.basename(source)
dest = os.path.join(dest_path, src_file)
shutil.copy(source, dest)
self.log.info("Finished copying %i files" % len(resources))

View file

@ -0,0 +1,15 @@
import pyblish.api
class ValidateCurrentSaveFile(pyblish.api.ContextPlugin):
"""File must be saved before publishing"""
label = "Validate File Saved"
order = pyblish.api.ValidatorOrder - 0.1
hosts = ["maya", "houdini"]
def process(self, context):
current_file = context.data["currentFile"]
if not current_file:
raise RuntimeError("File not saved")

View file

@ -0,0 +1,34 @@
import pyblish.api
class ValidateSequenceFrames(pyblish.api.InstancePlugin):
"""Ensure the sequence of frames is complete
The files found in the folder are checked against the startFrame and
endFrame of the instance. If the first or last file is not
corresponding with the first or last frame it is flagged as invalid.
"""
order = pyblish.api.ValidatorOrder
label = "Validate Sequence Frames"
families = ["colorbleed.imagesequence"]
hosts = ["shell"]
def process(self, instance):
collection = instance[0]
self.log.info(collection)
frames = list(collection.indexes)
current_range = (frames[0], frames[-1])
required_range = (instance.data["startFrame"],
instance.data["endFrame"])
if current_range != required_range:
raise ValueError("Invalid frame range: {0} - "
"expected: {1}".format(current_range,
required_range))
missing = collection.holes().indexes
assert not missing, "Missing frames: %s" % (missing,)

View file

@ -0,0 +1,36 @@
from collections import OrderedDict
import avalon.maya
from config.apps.maya import lib
class CreateAnimation(avalon.maya.Creator):
"""Animation output for character rigs"""
name = "animationDefault"
label = "Animation"
family = "colorbleed.animation"
icon = "male"
def __init__(self, *args, **kwargs):
super(CreateAnimation, self).__init__(*args, **kwargs)
# create an ordered dict with the existing data first
data = OrderedDict(**self.data)
# get basic animation data : start / end / handles / steps
for key, value in lib.collect_animation_data().items():
data[key] = value
# Write vertex colors with the geometry.
data["writeColorSets"] = False
# Include only renderable visible shapes.
# Skips locators and empty transforms
data["renderableOnly"] = False
# Include only nodes that are visible at least once during the
# frame range.
data["visibleOnly"] = False
self.data = data

View file

@ -0,0 +1,27 @@
from collections import OrderedDict
import avalon.maya
from config.apps.maya import lib
class CreateCamera(avalon.maya.Creator):
"""Single baked camera"""
name = "cameraDefault"
label = "Camera"
family = "colorbleed.camera"
icon = "video-camera"
def __init__(self, *args, **kwargs):
super(CreateCamera, self).__init__(*args, **kwargs)
# get basic animation data : start / end / handles / steps
data = OrderedDict(**self.data)
animation_data = lib.collect_animation_data()
for key, value in animation_data.items():
data[key] = value
# Bake to world space by default, when this is False it will also
# include the parent hierarchy in the baked results
data['bakeToWorldSpace'] = True
self.data = data

View file

@ -0,0 +1,20 @@
from collections import OrderedDict
import avalon.maya
from config.apps.maya import lib
class CreateLook(avalon.maya.Creator):
"""Shader connections defining shape look"""
name = "look"
label = "Look"
family = "colorbleed.look"
icon = "paint-brush"
def __init__(self, *args, **kwargs):
super(CreateLook, self).__init__(*args, **kwargs)
data = OrderedDict(**self.data)
data["renderlayer"] = lib.get_current_renderlayer()
self.data = data

View file

@ -0,0 +1,10 @@
import avalon.maya
class CreateMayaAscii(avalon.maya.Creator):
"""Raw Maya Ascii file export"""
name = "mayaAscii"
label = "Maya Ascii"
family = "config.apps.mayaAscii"
icon = "file-archive-o"

View file

@ -0,0 +1,10 @@
import avalon.maya
class CreateModel(avalon.maya.Creator):
"""Polygonal static geometry"""
name = "modelDefault"
label = "Model"
family = "colorbleed.model"
icon = "cube"

View file

@ -0,0 +1,36 @@
from collections import OrderedDict
import avalon.maya
from config.apps.maya import lib
class CreatePointCache(avalon.maya.Creator):
"""Alembic pointcache for animated data"""
name = "pointcache"
label = "Point Cache"
family = "colorbleed.pointcache"
icon = "gears"
def __init__(self, *args, **kwargs):
super(CreatePointCache, self).__init__(*args, **kwargs)
# create an ordered dict with the existing data first
data = OrderedDict(**self.data)
# get basic animation data : start / end / handles / steps
for key, value in lib.collect_animation_data().items():
data[key] = value
# Write vertex colors with the geometry.
data["writeColorSets"] = False
# Include only renderable visible shapes.
# Skips locators and empty transforms
data["renderableOnly"] = False
# Include only nodes that are visible at least once during the
# frame range.
data["visibleOnly"] = False
self.data = data

View file

@ -0,0 +1,66 @@
from collections import OrderedDict
from maya import cmds
from avalon.vendor import requests
import avalon.maya
from avalon import api
class CreateRenderGlobals(avalon.maya.Creator):
label = "Render Globals"
family = "colorbleed.renderglobals"
icon = "gears"
def __init__(self, *args, **kwargs):
super(CreateRenderGlobals, self).__init__(*args, **kwargs)
# We won't be publishing this one
self.data["id"] = "avalon.renderglobals"
# get pools
AVALON_DEADLINE = api.Session["AVALON_DEADLINE"]
argument = "{}/api/pools?NamesOnly=true".format(AVALON_DEADLINE)
response = requests.get(argument)
if not response.ok:
self.log.warning("No pools retrieved")
pools = []
else:
pools = response.json()
# We don't need subset or asset attributes
self.data.pop("subset", None)
self.data.pop("asset", None)
self.data.pop("active", None)
data = OrderedDict(**self.data)
data["suspendPublishJob"] = False
data["extendFrames"] = False
data["overrideExistingFrame"] = True
data["useLegacyRenderLayers"] = True
data["priority"] = 50
data["framesPerTask"] = 1
data["whitelist"] = False
data["machineList"] = ""
data["useMayaBatch"] = True
data["primaryPool"] = pools
# We add a string "-" to allow the user to not set any secondary pools
data["secondaryPool"] = ["-"] + pools
self.data = data
self.options = {"useSelection": False} # Force no content
def process(self):
exists = cmds.ls(self.name)
assert len(exists) <= 1, (
"More than one renderglobal exists, this is a bug")
if exists:
return cmds.warning("%s already exists." % exists[0])
super(CreateRenderGlobals, self).process()
cmds.setAttr("{}.machineList".format(self.name), lock=True)

View file

@ -0,0 +1,21 @@
from maya import cmds
import avalon.maya
class CreateRig(avalon.maya.Creator):
"""Artist-friendly rig with controls to direct motion"""
name = "rigDefault"
label = "Rig"
family = "colorbleed.rig"
icon = "wheelchair"
def process(self):
instance = super(CreateRig, self).process()
self.log.info("Creating Rig instance set up ...")
controls = cmds.sets(name="controls_SET", empty=True)
pointcache = cmds.sets(name="out_SET", empty=True)
cmds.sets([controls, pointcache], forceElement=instance)

View file

@ -0,0 +1,10 @@
import avalon.maya
class CreateSetDress(avalon.maya.Creator):
"""A grouped package of loaded content"""
name = "setdress"
label = "Set Dress"
family = "colorbleed.setdress"
icon = "cubes"

View file

@ -0,0 +1,23 @@
from collections import OrderedDict
import avalon.maya
class CreateVrayProxy(avalon.maya.Creator):
"""Alembic pointcache for animated data"""
name = "vrayproxy"
label = "VRay Proxy"
family = "colorbleed.vrayproxy"
icon = "gears"
def __init__(self, *args, **kwargs):
super(CreateVrayProxy, self).__init__(*args, **kwargs)
data = OrderedDict(**self.data)
data["animation"] = False
data["startFrame"] = 1
data["endFrame"] = 1
self.data.update(data)

View file

@ -0,0 +1,25 @@
from collections import OrderedDict
import avalon.maya
from config.apps.maya import lib
class CreateYetiCache(avalon.maya.Creator):
"""Output for procedural plugin nodes of Yeti """
name = "yetiDefault"
label = "Yeti Cache"
family = "colorbleed.yeticache"
icon = "pagelines"
def __init__(self, *args, **kwargs):
super(CreateYetiCache, self).__init__(*args, **kwargs)
data = OrderedDict(self.data)
data["peroll"] = 0
anim_data = lib.collect_animation_data()
data.update({"startFrame": anim_data["startFrame"],
"endFrame": anim_data["endFrame"]})
self.data = data

View file

@ -0,0 +1,20 @@
from maya import cmds
import avalon.maya
class CreateYetiRig(avalon.maya.Creator):
"""Output for procedural plugin nodes ( Yeti / XGen / etc)"""
label = "Yeti Rig"
family = "colorbleed.yetiRig"
icon = "usb"
def process(self):
instance = super(CreateYetiRig, self).process()
self.log.info("Creating Rig instance set up ...")
input_meshes = cmds.sets(name="input_SET", empty=True)
cmds.sets(input_meshes, forceElement=instance)

View file

@ -0,0 +1,48 @@
import config.apps.maya.plugin
class AbcLoader(config.apps.maya.plugin.ReferenceLoader):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["colorbleed.animation",
"colorbleed.camera",
"colorbleed.pointcache"]
representations = ["abc"]
label = "Reference animation"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
import maya.cmds as cmds
from avalon import maya
cmds.loadPlugin("AbcImport.mll", quiet=True)
# Prevent identical alembic nodes from being shared
# Create unique namespace for the cameras
# Get name from asset being loaded
# Assuming name is subset name from the animation, we split the number
# suffix from the name to ensure the namespace is unique
name = name.split("_")[0]
namespace = maya.unique_namespace("{}_".format(name),
format="%03d",
suffix="_abc")
# hero_001 (abc)
# asset_counter{optional}
nodes = cmds.file(self.fname,
namespace=namespace,
sharedReferenceFile=False,
groupReference=True,
groupName="{}:{}".format(namespace, name),
reference=True,
returnNewNodes=True)
# load colorbleed ID attribute
self[:] = nodes
return nodes

View file

@ -0,0 +1,147 @@
"""A module containing generic loader actions that will display in the Loader.
"""
from avalon import api
class SetFrameRangeLoader(api.Loader):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["colorbleed.animation",
"colorbleed.camera",
"colorbleed.pointcache"]
representations = ["abc"]
label = "Set frame range"
order = 11
icon = "clock-o"
color = "white"
def load(self, context, name, namespace, data):
import maya.cmds as cmds
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
"end frame data is missing..")
return
cmds.playbackOptions(minTime=start,
maxTime=end,
animationStartTime=start,
animationEndTime=end)
class SetFrameRangeWithHandlesLoader(api.Loader):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["colorbleed.animation",
"colorbleed.camera",
"colorbleed.pointcache"]
representations = ["abc"]
label = "Set frame range (with handles)"
order = 12
icon = "clock-o"
color = "white"
def load(self, context, name, namespace, data):
import maya.cmds as cmds
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
"end frame data is missing..")
return
# Include handles
handles = version_data.get("handles", 0)
start -= handles
end += handles
cmds.playbackOptions(minTime=start,
maxTime=end,
animationStartTime=start,
animationEndTime=end)
class ImportMayaLoader(api.Loader):
"""Import action for Maya (unmanaged)
Warning:
The loaded content will be unmanaged and is *not* visible in the
scene inventory. It's purely intended to merge content into your scene
so you could also use it as a new base.
"""
representations = ["ma"]
families = ["*"]
label = "Import"
order = 10
icon = "arrow-circle-down"
color = "#775555"
def load(self, context, name=None, namespace=None, data=None):
import maya.cmds as cmds
from avalon import maya
from avalon.maya import lib
choice = self.display_warning()
if choice is False:
return
asset = context['asset']
namespace = namespace or lib.unique_namespace(
asset["name"] + "_",
prefix="_" if asset["name"][0].isdigit() else "",
suffix="_",
)
with maya.maintained_selection():
cmds.file(self.fname,
i=True,
namespace=namespace,
returnNewNodes=True,
groupReference=True,
groupName="{}:{}".format(namespace, name))
# We do not containerize imported content, it remains unmanaged
return
def display_warning(self):
"""Show warning to ensure the user can't import models by accident
Returns:
bool
"""
from avalon.vendor.Qt import QtWidgets
accept = QtWidgets.QMessageBox.Ok
buttons = accept | QtWidgets.QMessageBox.Cancel
message = "Are you sure you want import this"
state = QtWidgets.QMessageBox.warning(None,
"Are you sure?",
message,
buttons=buttons,
defaultButton=accept)
return state == accept

View file

@ -0,0 +1,33 @@
import config.apps.maya.plugin
class AbcLoader(config.apps.maya.plugin.ReferenceLoader):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["colorbleed.animation",
"colorbleed.pointcache"]
label = "Reference animation"
representations = ["abc"]
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
import maya.cmds as cmds
cmds.loadPlugin("AbcImport.mll", quiet=True)
nodes = cmds.file(self.fname,
namespace=namespace,
sharedReferenceFile=False,
groupReference=True,
groupName="{}:{}".format(namespace, name),
reference=True,
returnNewNodes=True)
self[:] = nodes
return nodes
def switch(self, container, representation):
self.update(container, representation)

View file

@ -0,0 +1,45 @@
import config.apps.maya.plugin
class CameraLoader(config.apps.maya.plugin.ReferenceLoader):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["colorbleed.camera"]
label = "Reference camera"
representations = ["abc", "ma"]
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
import maya.cmds as cmds
# Get family type from the context
cmds.loadPlugin("AbcImport.mll", quiet=True)
nodes = cmds.file(self.fname,
namespace=namespace,
sharedReferenceFile=False,
groupReference=True,
groupName="{}:{}".format(namespace, name),
reference=True,
returnNewNodes=True)
cameras = cmds.ls(nodes, type="camera")
# Check the Maya version, lockTransform has been introduced since
# Maya 2016.5 Ext 2
version = int(cmds.about(version=True))
if version >= 2016:
for camera in cameras:
cmds.camera(camera, edit=True, lockTransform=True)
else:
self.log.warning("This version of Maya does not support locking of"
" transforms of cameras.")
self[:] = nodes
return nodes
def switch(self, container, representation):
self.update(container, representation)

View file

@ -0,0 +1,40 @@
import config.apps.maya.plugin
class LookLoader(config.apps.maya.plugin.ReferenceLoader):
"""Specific loader for lookdev"""
families = ["colorbleed.look"]
representations = ["ma"]
label = "Reference look"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
"""
Load and try to ssign Lookdev to nodes based on relationship data
Args:
name:
namespace:
context:
data:
Returns:
"""
import maya.cmds as cmds
from avalon import maya
with maya.maintained_selection():
nodes = cmds.file(self.fname,
namespace=namespace,
reference=True,
returnNewNodes=True)
self[:] = nodes
def switch(self, container, representation):
self.update(container, representation)

View file

@ -0,0 +1,33 @@
import config.apps.maya.plugin
class MayaAsciiLoader(config.apps.maya.plugin.ReferenceLoader):
"""Load the model"""
families = ["config.apps.mayaAscii"]
representations = ["ma"]
label = "Reference Maya Ascii"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
import maya.cmds as cmds
from avalon import maya
with maya.maintained_selection():
nodes = cmds.file(self.fname,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName="{}:{}".format(namespace, name))
self[:] = nodes
return nodes
def switch(self, container, representation):
self.update(container, representation)

View file

@ -0,0 +1,125 @@
from avalon import api
import config.apps.maya.plugin
class ModelLoader(config.apps.maya.plugin.ReferenceLoader):
"""Load the model"""
families = ["colorbleed.model"]
representations = ["ma"]
label = "Reference Model"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
import maya.cmds as cmds
from avalon import maya
with maya.maintained_selection():
nodes = cmds.file(self.fname,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName="{}:{}".format(namespace, name))
self[:] = nodes
return nodes
def switch(self, container, representation):
self.update(container, representation)
class GpuCacheLoader(api.Loader):
"""Load model Alembic as gpuCache"""
families = ["colorbleed.model"]
representations = ["abc"]
label = "Import Gpu Cache"
order = -5
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, data):
import maya.cmds as cmds
import avalon.maya.lib as lib
from avalon.maya.pipeline import containerise
asset = context['asset']['name']
namespace = namespace or lib.unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
cmds.loadPlugin("gpuCache", quiet=True)
# Root group
label = "{}:{}".format(namespace, name)
root = cmds.group(name=label, empty=True)
# Create transform with shape
transform_name = label + "_GPU"
transform = cmds.createNode("transform", name=transform_name,
parent=root)
cache = cmds.createNode("gpuCache",
parent=transform,
name="{0}Shape".format(transform_name))
# Set the cache filepath
cmds.setAttr(cache + '.cacheFileName', self.fname, type="string")
cmds.setAttr(cache + '.cacheGeomPath', "|", type="string") # root
# Lock parenting of the transform and cache
cmds.lockNode([transform, cache], lock=True)
nodes = [root, transform, cache]
self[:] = nodes
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def update(self, container, representation):
import maya.cmds as cmds
path = api.get_representation_path(representation)
# Update the cache
members = cmds.sets(container['objectName'], query=True)
caches = cmds.ls(members, type="gpuCache", long=True)
assert len(caches) == 1, "This is a bug"
for cache in caches:
cmds.setAttr(cache + ".cacheFileName", path, type="string")
cmds.setAttr(container["objectName"] + ".representation",
str(representation["_id"]),
type="string")
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
import maya.cmds as cmds
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass

View file

@ -0,0 +1,70 @@
from maya import cmds
import config.apps.maya.plugin
from avalon import api, maya
class RigLoader(config.apps.maya.plugin.ReferenceLoader):
"""Specific loader for rigs
This automatically creates an instance for animators upon load.
"""
families = ["colorbleed.rig"]
representations = ["ma"]
label = "Reference rig"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
nodes = cmds.file(self.fname,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName="{}:{}".format(namespace, name))
# Store for post-process
self[:] = nodes
if data.get("post_process", True):
self._post_process(name, namespace, context, data)
return nodes
def _post_process(self, name, namespace, context, data):
# TODO(marcus): We are hardcoding the name "out_SET" here.
# Better register this keyword, so that it can be used
# elsewhere, such as in the Integrator plug-in,
# without duplication.
output = next((node for node in self if
node.endswith("out_SET")), None)
controls = next((node for node in self if
node.endswith("controls_SET")), None)
assert output, "No out_SET in rig, this is a bug."
assert controls, "No controls_SET in rig, this is a bug."
# Find the roots amongst the loaded nodes
roots = cmds.ls(self[:], assemblies=True, long=True)
assert roots, "No root nodes in rig, this is a bug."
asset = api.Session["AVALON_ASSET"]
dependency = str(context["representation"]["_id"])
# Create the animation instance
with maya.maintained_selection():
cmds.select([output, controls] + roots, noExpand=True)
api.create(name=namespace,
asset=asset,
family="colorbleed.animation",
options={"useSelection": True},
data={"dependencies": dependency})
def switch(self, container, representation):
self.update(container, representation)

View file

@ -0,0 +1,80 @@
from avalon import api
class SetDressLoader(api.Loader):
families = ["colorbleed.setdress"]
representations = ["json"]
label = "Load Set Dress"
order = -9
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, data):
from avalon.maya.pipeline import containerise
from avalon.maya import lib
asset = context['asset']['name']
namespace = namespace or lib.unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
from colorbleed import setdress_api
containers = setdress_api.load_package(filepath=self.fname,
name=name,
namespace=namespace)
self[:] = containers
# Only containerize if any nodes were loaded by the Loader
nodes = self[:]
if not nodes:
return
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def update(self, container, representation):
from colorbleed import setdress_api
return setdress_api.update_package(container,
representation)
def remove(self, container):
"""Remove all sub containers"""
from avalon import api
from colorbleed import setdress_api
import maya.cmds as cmds
# Remove all members
member_containers = setdress_api.get_contained_containers(container)
for member_container in member_containers:
self.log.info("Removing container %s",
member_container['objectName'])
api.remove(member_container)
# Remove alembic hierarchy reference
# TODO: Check whether removing all contained references is safe enough
members = cmds.sets(container['objectName'], query=True) or []
references = cmds.ls(members, type="reference")
for reference in references:
self.log.info("Removing %s", reference)
fname = cmds.referenceQuery(reference, filename=True)
cmds.file(fname, removeReference=True)
# Delete container and its contents
if cmds.objExists(container['objectName']):
members = cmds.sets(container['objectName'], query=True) or []
cmds.delete([container['objectName']] + members)
# TODO: Ensure namespace is gone

View file

@ -0,0 +1,144 @@
from avalon.maya import lib
from avalon import api
import maya.cmds as cmds
class VRayProxyLoader(api.Loader):
"""Load VRayMesh proxy"""
families = ["colorbleed.vrayproxy"]
representations = ["vrmesh"]
label = "Import VRay Proxy"
order = -10
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, data):
from avalon.maya.pipeline import containerise
from config.apps.maya.lib import namespaced
asset_name = context['asset']["name"]
namespace = namespace or lib.unique_namespace(
asset_name + "_",
prefix="_" if asset_name[0].isdigit() else "",
suffix="_",
)
# Ensure V-Ray for Maya is loaded.
cmds.loadPlugin("vrayformaya", quiet=True)
with lib.maintained_selection():
cmds.namespace(addNamespace=namespace)
with namespaced(namespace, new=False):
nodes = self.create_vray_proxy(name,
filename=self.fname)
self[:] = nodes
if not nodes:
return
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def update(self, container, representation):
node = container['objectName']
assert cmds.objExists(node), "Missing container"
members = cmds.sets(node, query=True) or []
vraymeshes = cmds.ls(members, type="VRayMesh")
assert vraymeshes, "Cannot find VRayMesh in container"
filename = api.get_representation_path(representation)
for vray_mesh in vraymeshes:
cmds.setAttr("{}.fileName".format(vray_mesh),
filename,
type="string")
# Update metadata
cmds.setAttr("{}.representation".format(node),
str(representation["_id"]),
type="string")
def remove(self, container):
# Delete container and its contents
if cmds.objExists(container['objectName']):
members = cmds.sets(container['objectName'], query=True) or []
cmds.delete([container['objectName']] + members)
# Remove the namespace, if empty
namespace = container['namespace']
if cmds.namespace(exists=namespace):
members = cmds.namespaceInfo(namespace, listNamespace=True)
if not members:
cmds.namespace(removeNamespace=namespace)
else:
self.log.warning("Namespace not deleted because it "
"still has members: %s", namespace)
def switch(self, container, representation):
self.update(container, representation)
def create_vray_proxy(self, name, filename):
"""Re-create the structure created by VRay to support vrmeshes
Args:
name(str): name of the asset
Returns:
nodes(list)
"""
# Create nodes
vray_mesh = cmds.createNode('VRayMesh', name="{}_VRMS".format(name))
mesh_shape = cmds.createNode("mesh", name="{}_GEOShape".format(name))
vray_mat = cmds.createNode("VRayMeshMaterial",
name="{}_VRMM".format(name))
vray_mat_sg = cmds.createNode("shadingEngine",
name="{}_VRSG".format(name))
cmds.setAttr("{}.fileName".format(vray_mesh),
filename,
type="string")
# Create important connections
cmds.connectAttr("time1.outTime",
"{0}.currentFrame".format(vray_mesh))
cmds.connectAttr("{}.fileName2".format(vray_mesh),
"{}.fileName".format(vray_mat))
cmds.connectAttr("{}.instancing".format(vray_mesh),
"{}.instancing".format(vray_mat))
cmds.connectAttr("{}.output".format(vray_mesh),
"{}.inMesh".format(mesh_shape))
cmds.connectAttr("{}.overrideFileName".format(vray_mesh),
"{}.overrideFileName".format(vray_mat))
cmds.connectAttr("{}.currentFrame".format(vray_mesh),
"{}.currentFrame".format(vray_mat))
# Set surface shader input
cmds.connectAttr("{}.outColor".format(vray_mat),
"{}.surfaceShader".format(vray_mat_sg))
# Connect mesh to shader
cmds.sets([mesh_shape], addElement=vray_mat_sg)
group_node = cmds.group(empty=True, name="{}_GRP".format(name))
mesh_transform = cmds.listRelatives(mesh_shape,
parent=True, fullPath=True)
cmds.parent(mesh_transform, group_node)
nodes = [vray_mesh, mesh_shape, vray_mat, vray_mat_sg, group_node]
# Fix: Force refresh so the mesh shows correctly after creation
cmds.refresh()
cmds.setAttr("{}.geomType".format(vray_mesh), 2)
return nodes

View file

@ -0,0 +1,298 @@
import os
import json
import re
import glob
from collections import defaultdict
from maya import cmds
from avalon import api
from avalon.maya import lib as avalon_lib, pipeline
from config.apps.maya import lib
class YetiCacheLoader(api.Loader):
families = ["colorbleed.yeticache", "colorbleed.yetiRig"]
representations = ["fur"]
label = "Load Yeti Cache"
order = -9
icon = "code-fork"
color = "orange"
def load(self, context, name=None, namespace=None, data=None):
# Build namespace
asset = context["asset"]
if namespace is None:
namespace = self.create_namespace(asset["name"])
# Ensure Yeti is loaded
if not cmds.pluginInfo("pgYetiMaya", query=True, loaded=True):
cmds.loadPlugin("pgYetiMaya", quiet=True)
# Get JSON
fname, ext = os.path.splitext(self.fname)
settings_fname = "{}.fursettings".format(fname)
with open(settings_fname, "r") as fp:
fursettings = json.load(fp)
# Check if resources map exists
# Get node name from JSON
if "nodes" not in fursettings:
raise RuntimeError("Encountered invalid data, expect 'nodes' in "
"fursettings.")
node_data = fursettings["nodes"]
nodes = self.create_nodes(namespace, node_data)
group_name = "{}:{}".format(namespace, name)
group_node = cmds.group(nodes, name=group_name)
nodes.append(group_node)
self[:] = nodes
return pipeline.containerise(name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def remove(self, container):
from maya import cmds
namespace = container["namespace"]
container_name = container["objectName"]
self.log.info("Removing '%s' from Maya.." % container["name"])
container_content = cmds.sets(container_name, query=True)
nodes = cmds.ls(container_content, long=True)
nodes.append(container_name)
try:
cmds.delete(nodes)
except ValueError:
# Already implicitly deleted by Maya upon removing reference
pass
cmds.namespace(removeNamespace=namespace, deleteNamespaceContent=True)
def update(self, container, representation):
namespace = container["namespace"]
container_node = container["objectName"]
path = api.get_representation_path(representation)
# Get all node data
fname, ext = os.path.splitext(path)
settings_fname = "{}.fursettings".format(fname)
with open(settings_fname, "r") as fp:
settings = json.load(fp)
# Collect scene information of asset
set_members = cmds.sets(container["objectName"], query=True)
container_root = lib.get_container_transforms(container,
members=set_members,
root=True)
scene_nodes = cmds.ls(set_members, type="pgYetiMaya", long=True)
# Build lookup with cbId as keys
scene_lookup = defaultdict(list)
for node in scene_nodes:
cb_id = lib.get_id(node)
scene_lookup[cb_id].append(node)
# Re-assemble metadata with cbId as keys
meta_data_lookup = {n["cbId"]: n for n in settings["nodes"]}
# Compare look ups and get the nodes which ar not relevant any more
to_delete_lookup = {cb_id for cb_id in scene_lookup.keys() if
cb_id not in meta_data_lookup}
if to_delete_lookup:
# Get nodes and remove entry from lookup
to_remove = []
for _id in to_delete_lookup:
# Get all related nodes
shapes = scene_lookup[_id]
# Get the parents of all shapes under the ID
transforms = cmds.listRelatives(shapes,
parent=True,
fullPath=True) or []
to_remove.extend(shapes + transforms)
# Remove id from look uop
scene_lookup.pop(_id, None)
cmds.delete(to_remove)
for cb_id, data in meta_data_lookup.items():
# Update cache file name
file_name = data["name"].replace(":", "_")
cache_file_path = "{}.%04d.fur".format(file_name)
data["attrs"]["cacheFileName"] = os.path.join(path, cache_file_path)
if cb_id not in scene_lookup:
self.log.info("Creating new nodes ..")
new_nodes = self.create_nodes(namespace, [data])
cmds.sets(new_nodes, addElement=container_node)
cmds.parent(new_nodes, container_root)
else:
# Update the matching nodes
scene_nodes = scene_lookup[cb_id]
lookup_result = meta_data_lookup[cb_id]["name"]
# Remove namespace if any (e.g.: "character_01_:head_YNShape")
node_name = lookup_result.rsplit(":", 1)[-1]
for scene_node in scene_nodes:
# Get transform node, this makes renaming easier
transforms = cmds.listRelatives(scene_node,
parent=True,
fullPath=True) or []
assert len(transforms) == 1, "This is a bug!"
# Get scene node's namespace and rename the transform node
lead = scene_node.rsplit(":", 1)[0]
namespace = ":{}".format(lead.rsplit("|")[-1])
new_shape_name = "{}:{}".format(namespace, node_name)
new_trans_name = new_shape_name.rsplit("Shape", 1)[0]
transform_node = transforms[0]
cmds.rename(transform_node,
new_trans_name,
ignoreShape=False)
# Get the newly named shape node
yeti_nodes = cmds.listRelatives(new_trans_name,
children=True)
yeti_node = yeti_nodes[0]
for attr, value in data["attrs"].items():
lib.set_attribute(attr, value, yeti_node)
cmds.setAttr("{}.representation".format(container_node),
str(representation["_id"]),
typ="string")
def switch(self, container, representation):
self.update(container, representation)
# helper functions
def create_namespace(self, asset):
"""Create a unique namespace
Args:
asset (dict): asset information
"""
asset_name = "{}_".format(asset)
prefix = "_" if asset_name[0].isdigit()else ""
namespace = avalon_lib.unique_namespace(asset_name,
prefix=prefix,
suffix="_")
return namespace
def validate_cache(self, filename, pattern="%04d"):
"""Check if the cache has more than 1 frame
All caches with more than 1 frame need to be called with `%04d`
If the cache has only one frame we return that file name as we assume
it is a snapshot.
Args:
filename(str)
pattern(str)
Returns:
str
"""
glob_pattern = filename.replace(pattern, "*")
escaped = re.escape(filename)
re_pattern = escaped.replace(pattern, "-?[0-9]+")
files = glob.glob(glob_pattern)
files = [str(f) for f in files if re.match(re_pattern, f)]
if len(files) == 1:
return files[0]
elif len(files) == 0:
self.log.error("Could not find cache files for '%s'" % filename)
return filename
def create_nodes(self, namespace, settings):
"""Create nodes with the correct namespace and settings
Args:
namespace(str): namespace
settings(list): list of dictionaries
Returns:
list
"""
nodes = []
for node_settings in settings:
# Create pgYetiMaya node
original_node = node_settings["name"]
node_name = "{}:{}".format(namespace, original_node)
yeti_node = cmds.createNode("pgYetiMaya", name=node_name)
# Create transform node
transform_node = node_name.rstrip("Shape")
lib.set_id(transform_node, node_settings["transform"]["cbId"])
lib.set_id(yeti_node, node_settings["cbId"])
nodes.extend([transform_node, yeti_node])
# Ensure the node has no namespace identifiers
attributes = node_settings["attrs"]
# Check if cache file name is stored
if "cacheFileName" not in attributes:
file_name = original_node.replace(":", "_")
cache_name = "{}.%04d.fur".format(file_name)
cache = os.path.join(self.fname, cache_name)
self.validate_cache(cache)
attributes["cacheFileName"] = cache
# Update attributes with requirements
attributes.update({"viewportDensity": 0.1,
"verbosity": 2,
"fileMode": 1})
# Apply attributes to pgYetiMaya node
for attr, value in attributes.items():
lib.set_attribute(attr, value, yeti_node)
# Fix for : YETI-6
# Fixes the render stats (this is literally taken from Perigrene's
# ../scripts/pgYetiNode.mel script)
cmds.setAttr("{}.visibleInReflections".format(yeti_node), True)
cmds.setAttr("{}.visibleInRefractions".format(yeti_node), True)
# Connect to the time node
cmds.connectAttr("time1.outTime", "%s.currentTime" % yeti_node)
return nodes

View file

@ -0,0 +1,31 @@
import config.apps.maya.plugin
class YetiRigLoader(config.apps.maya.plugin.ReferenceLoader):
families = ["colorbleed.yetiRig"]
representations = ["ma"]
label = "Load Yeti Rig"
order = -9
icon = "code-fork"
color = "orange"
def process_reference(self, context, name=None, namespace=None, data=None):
import maya.cmds as cmds
from avalon import maya
with maya.maintained_selection():
nodes = cmds.file(self.fname,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName="{}:{}".format(namespace, name))
self[:] = nodes
self.log.info("Yeti Rig Connection Manager will be available soon")
return nodes

View file

@ -0,0 +1,53 @@
import pyblish.api
import maya.cmds as cmds
class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin):
"""Collect out hierarchy data for instance.
Collect all hierarchy nodes which reside in the out_SET of the animation
instance or point cache instance. This is to unify the logic of retrieving
that specific data. This eliminates the need to write two separate pieces
of logic to fetch all hierarchy nodes.
Results in a list of nodes from the content of the instances
"""
order = pyblish.api.CollectorOrder + 0.4
families = ["colorbleed.animation"]
label = "Collect Animation Output Geometry"
hosts = ["maya"]
ignore_type = ["constraints"]
def process(self, instance):
"""Collect the hierarchy nodes"""
family = instance.data["family"]
out_set = next((i for i in instance.data["setMembers"] if
i.endswith("out_SET")), None)
assert out_set, ("Expecting out_SET for instance of family"
" '%s'" % family)
members = cmds.ls(cmds.sets(out_set, query=True), long=True)
# Get all the relatives of the members
descendants = cmds.listRelatives(members,
allDescendents=True,
fullPath=True) or []
descendants = cmds.ls(descendants, noIntermediate=True, long=True)
# Add members and descendants together for a complete overview
hierarchy = members + descendants
# Ignore certain node types (e.g. constraints)
ignore = cmds.ls(hierarchy, type=self.ignore_type, long=True)
if ignore:
ignore = set(ignore)
hierarchy = [node for node in hierarchy if node not in ignore]
# Store data in the instance for the validator
instance.data["out_hierarchy"] = hierarchy

View file

@ -0,0 +1,16 @@
from maya import cmds
import pyblish.api
class CollectMayaCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Maya Current File"
hosts = ['maya']
def process(self, context):
"""Inject the current working file"""
current_file = cmds.file(query=True, sceneName=True)
context.data['currentFile'] = current_file

View file

@ -0,0 +1,40 @@
from maya import cmds
import pyblish.api
class CollectMayaHistory(pyblish.api.InstancePlugin):
"""Collect history for instances from the Maya scene
Note:
This removes render layers collected in the history
This is separate from Collect Instances so we can target it towards only
specific family types.
"""
order = pyblish.api.CollectorOrder + 0.1
hosts = ["maya"]
label = "Maya History"
families = ["colorbleed.rig"]
verbose = False
def process(self, instance):
# Collect the history with long names
history = cmds.listHistory(instance, leaf=False) or []
history = cmds.ls(history, long=True)
# Remove invalid node types (like renderlayers)
invalid = cmds.ls(history, type="renderLayer", long=True)
if invalid:
invalid = set(invalid) # optimize lookup
history = [x for x in history if x not in invalid]
# Combine members with history
members = instance[:] + history
members = list(set(members)) # ensure unique
# Update the instance
instance[:] = members

View file

@ -0,0 +1,141 @@
from maya import cmds
import pyblish.api
class CollectInstances(pyblish.api.ContextPlugin):
"""Gather instances by objectSet and pre-defined attribute
This collector takes into account assets that are associated with
an objectSet and marked with a unique identifier;
Identifier:
id (str): "pyblish.avalon.instance"
Supported Families:
avalon.model: Geometric representation of artwork
avalon.rig: An articulated model for animators.
A rig may contain a series of sets in which to identify
its contents.
- cache_SEL: Should contain cachable polygonal meshes
- controls_SEL: Should contain animatable controllers for animators
- resources_SEL: Should contain nodes that reference external files
Limitations:
- Only Maya is supported
- One (1) rig per scene file
- Unmanaged history, it is up to the TD to ensure
history is up to par.
avalon.animation: Pointcache of `avalon.rig`
Limitations:
- Does not take into account nodes connected to those
within an objectSet. Extractors are assumed to export
with history preserved, but this limits what they will
be able to achieve and the amount of data available
to validators.
"""
label = "Collect Instances"
order = pyblish.api.CollectorOrder
hosts = ["maya"]
def process(self, context):
objectset = cmds.ls("*.id", long=True, type="objectSet",
recursive=True, objectsOnly=True)
for objset in objectset:
if not cmds.attributeQuery("id", node=objset, exists=True):
continue
id_attr = "{}.id".format(objset)
if cmds.getAttr(id_attr) != "pyblish.avalon.instance":
continue
# The developer is responsible for specifying
# the family of each instance.
has_family = cmds.attributeQuery("family",
node=objset,
exists=True)
assert has_family, "\"%s\" was missing a family" % objset
members = cmds.sets(objset, query=True)
if members is None:
self.log.warning("Skipped empty instance: \"%s\" " % objset)
continue
self.log.info("Creating instance for {}".format(objset))
data = dict()
# Apply each user defined attribute as data
for attr in cmds.listAttr(objset, userDefined=True) or list():
try:
value = cmds.getAttr("%s.%s" % (objset, attr))
except Exception:
# Some attributes cannot be read directly,
# such as mesh and color attributes. These
# are considered non-essential to this
# particular publishing pipeline.
value = None
data[attr] = value
# temporarily translation of `active` to `publish` till issue has
# been resolved, https://github.com/pyblish/pyblish-base/issues/307
if "active" in data:
data["publish"] = data["active"]
# Collect members
members = cmds.ls(members, long=True) or []
# `maya.cmds.listRelatives(noIntermediate=True)` only works when
# `shapes=True` argument is passed, since we also want to include
# transforms we filter afterwards.
children = cmds.listRelatives(members,
allDescendents=True,
fullPath=True) or []
children = cmds.ls(children, noIntermediate=True, long=True)
parents = self.get_all_parents(members)
members_hierarchy = list(set(members + children + parents))
# Create the instance
name = cmds.ls(objset, long=False)[0] # use short name
instance = context.create_instance(data.get("name", name))
instance[:] = members_hierarchy
instance.data["setMembers"] = members
instance.data.update(data)
# Produce diagnostic message for any graphical
# user interface interested in visualising it.
self.log.info("Found: \"%s\" " % instance.data["name"])
def sort_by_family(instance):
"""Sort by family"""
return instance.data.get("families", instance.data.get("family"))
# Sort/grouped by family (preserving local index)
context[:] = sorted(context, key=sort_by_family)
return context
def get_all_parents(self, nodes):
"""Get all parents by using string operations (optimization)
Args:
nodes (list): the nodes which are found in the objectSet
Returns:
list
"""
parents = []
for node in nodes:
splitted = node.split("|")
items = ["|".join(splitted[0:i]) for i in range(2, len(splitted))]
parents.extend(items)
return list(set(parents))

View file

@ -0,0 +1,279 @@
from maya import cmds
import pyblish.api
import config.apps.maya.lib as lib
from cb.utils.maya import context, shaders
SHAPE_ATTRS = ["castsShadows",
"receiveShadows",
"motionBlur",
"primaryVisibility",
"smoothShading",
"visibleInReflections",
"visibleInRefractions",
"doubleSided",
"opposite"]
SHAPE_ATTRS = set(SHAPE_ATTRS)
def get_look_attrs(node):
"""Returns attributes of a node that are important for the look.
These are the "changed" attributes (those that have edits applied
in the current scene).
Returns:
list: Attribute names to extract
"""
# When referenced get only attributes that are "changed since file open"
# which includes any reference edits, otherwise take *all* user defined
# attributes
is_referenced = cmds.referenceQuery(node, isNodeReferenced=True)
result = cmds.listAttr(node, userDefined=True,
changedSinceFileOpen=is_referenced) or []
# `cbId` is added when a scene is saved, ignore by default
if "cbId" in result:
result.remove("cbId")
# For shapes allow render stat changes
if cmds.objectType(node, isAType="shape"):
attrs = cmds.listAttr(node, changedSinceFileOpen=True) or []
for attr in attrs:
if attr in SHAPE_ATTRS:
result.append(attr)
return result
class CollectLook(pyblish.api.InstancePlugin):
"""Collect look data for instance.
For the shapes/transforms of the referenced object to collect look for
retrieve the user-defined attributes (like V-ray attributes) and their
values as they were created in the current scene.
For the members of the instance collect the sets (shadingEngines and
other sets, e.g. VRayDisplacement) they are in along with the exact
membership relations.
Collects:
lookAttribtutes (list): Nodes in instance with their altered attributes
lookSetRelations (list): Sets and their memberships
lookSets (list): List of set names included in the look
"""
order = pyblish.api.CollectorOrder + 0.4
families = ["colorbleed.look"]
label = "Collect Look"
hosts = ["maya"]
def process(self, instance):
"""Collect the Look in the instance with the correct layer settings"""
with context.renderlayer(instance.data["renderlayer"]):
self.collect(instance)
def collect(self, instance):
self.log.info("Looking for look associations "
"for %s" % instance.data['name'])
# Discover related object sets
self.log.info("Gathering sets..")
sets = self.collect_sets(instance)
# Lookup set (optimization)
instance_lookup = set(cmds.ls(instance, long=True))
self.log.info("Gathering set relations..")
# Ensure iteration happen in a list so we can remove keys from the
# dict within the loop
for objset in list(sets):
self.log.debug("From %s.." % objset)
# Get all nodes of the current objectSet (shadingEngine)
for member in cmds.ls(cmds.sets(objset, query=True), long=True):
member_data = self.collect_member_data(member,
instance_lookup)
if not member_data:
continue
# Add information of the node to the members list
sets[objset]["members"].append(member_data)
# Remove sets that didn't have any members assigned in the end
# Thus the data will be limited to only what we need.
if not sets[objset]["members"]:
self.log.info("Removing redundant set information: "
"%s" % objset)
sets.pop(objset, None)
self.log.info("Gathering attribute changes to instance members..")
attributes = self.collect_attributes_changed(instance)
# Store data on the instance
instance.data["lookData"] = {"attributes": attributes,
"relationships": sets}
# Collect file nodes used by shading engines (if we have any)
files = list()
looksets = sets.keys()
if looksets:
self.log.info("Found the following sets:\n{}".format(looksets))
# Get the entire node chain of the look sets
history = cmds.listHistory(looksets)
files = cmds.ls(history, type="file", long=True)
# Collect textures if any file nodes are found
instance.data["resources"] = [self.collect_resource(n)
for n in files]
# Log a warning when no relevant sets were retrieved for the look.
if not instance.data["lookData"]["relationships"]:
self.log.warning("No sets found for the nodes in the instance: "
"%s" % instance[:])
# Ensure unique shader sets
# Add shader sets to the instance for unify ID validation
instance.extend(shader for shader in looksets if shader
not in instance_lookup)
self.log.info("Collected look for %s" % instance)
def collect_sets(self, instance):
"""Collect all objectSets which are of importance for publishing
It checks if all nodes in the instance are related to any objectSet
which need to be
Args:
instance (list): all nodes to be published
Returns:
dict
"""
sets = dict()
for node in instance:
related_sets = lib.get_related_sets(node)
if not related_sets:
continue
for objset in related_sets:
if objset in sets:
continue
sets[objset] = {"uuid": lib.get_id(objset), "members": list()}
return sets
def collect_member_data(self, member, instance_members):
"""Get all information of the node
Args:
member (str): the name of the node to check
instance_members (set): the collected instance members
Returns:
dict
"""
node, components = (member.rsplit(".", 1) + [None])[:2]
# Only include valid members of the instance
if node not in instance_members:
return
node_id = lib.get_id(node)
if not node_id:
self.log.error("Member '{}' has no attribute 'cbId'".format(node))
return
member_data = {"name": node, "uuid": node_id}
if components:
member_data["components"] = components
return member_data
def collect_attributes_changed(self, instance):
"""Collect all userDefined attributes which have changed
Each node gets checked for user defined attributes which have been
altered during development. Each changes gets logged in a dictionary
[{name: node,
uuid: uuid,
attributes: {attribute: value}}]
Args:
instance (list): all nodes which will be published
Returns:
list
"""
attributes = []
for node in instance:
# Collect changes to "custom" attributes
node_attrs = get_look_attrs(node)
# Only include if there are any properties we care about
if not node_attrs:
continue
node_attributes = {}
for attr in node_attrs:
if not cmds.attributeQuery(attr, node=node, exists=True):
continue
attribute = "{}.{}".format(node, attr)
node_attributes[attr] = cmds.getAttr(attribute)
attributes.append({"name": node,
"uuid": lib.get_id(node),
"attributes": node_attributes})
return attributes
def collect_resource(self, node):
"""Collect the link to the file(s) used (resource)
Args:
node (str): name of the node
Returns:
dict
"""
attribute = "{}.fileTextureName".format(node)
source = cmds.getAttr(attribute)
# Compare with the computed file path, e.g. the one with the <UDIM>
# pattern in it, to generate some logging information about this
# difference
computed_attribute = "{}.computedFileTextureNamePattern".format(node)
computed_source = cmds.getAttr(computed_attribute)
if source != computed_source:
self.log.debug("Detected computed file pattern difference "
"from original pattern: {0} "
"({1} -> {2})".format(node,
source,
computed_source))
# We replace backslashes with forward slashes because V-Ray
# can't handle the UDIM files with the backslashes in the
# paths as the computed patterns
source = source.replace("\\", "/")
files = shaders.get_file_node_files(node)
if len(files) == 0:
self.log.error("No valid files found from node `%s`" % node)
# Define the resource
return {"node": node,
"attribute": attribute,
"source": source, # required for resources
"files": files} # required for resources

View file

@ -0,0 +1,30 @@
import maya.cmds as cmds
import maya.mel as mel
import pyblish.api
class CollectMayaUnits(pyblish.api.ContextPlugin):
"""Collect Maya's scene units."""
label = "Maya Units"
order = pyblish.api.CollectorOrder
hosts = ["maya"]
def process(self, context):
# Get the current linear units
units = cmds.currentUnit(query=True, linear=True)
# Get the current angular units ('deg' or 'rad')
units_angle = cmds.currentUnit(query=True, angle=True)
# Get the current time units
# Using the mel command is simpler than using
# `cmds.currentUnit(q=1, time=1)`. Otherwise we
# have to parse the returned string value to FPS
fps = mel.eval('currentTimeUnitToFPS()')
context.data['linearUnits'] = units
context.data['angularUnits'] = units_angle
context.data['fps'] = fps

View file

@ -0,0 +1,27 @@
import os
import pyblish.api
from maya import cmds
class CollectMayaWorkspace(pyblish.api.ContextPlugin):
"""Inject the current workspace into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Maya Workspace"
hosts = ['maya']
version = (0, 1, 0)
def process(self, context):
workspace = cmds.workspace(rootDirectory=True, query=True)
if not workspace:
# Project has not been set. Files will
# instead end up next to the working file.
workspace = cmds.workspace(dir=True, query=True)
# Maya returns forward-slashes by default
normalised = os.path.normpath(workspace)
context.set_data('workspaceDir', value=normalised)

View file

@ -0,0 +1,26 @@
from maya import cmds
import pyblish.api
class CollectModelData(pyblish.api.InstancePlugin):
"""Collect model data
Ensures always only a single frame is extracted (current frame).
Note:
This is a workaround so that the `colorbleed.model` family can use the
same pointcache extractor implementation as animation and pointcaches.
This always enforces the "current" frame to be published.
"""
order = pyblish.api.CollectorOrder + 0.499
label = 'Collect Model Data'
families = ["colorbleed.model"]
def process(self, instance):
# Extract only current frame (override)
frame = cmds.currentTime(query=True)
instance.data['startFrame'] = frame
instance.data['endFrame'] = frame

View file

@ -0,0 +1,95 @@
from maya import cmds
import pyblish.api
import config.apps.maya.lib as lib
class CollectRenderLayerAOVS(pyblish.api.InstancePlugin):
"""Validate all render layer's AOVs / Render Elements are registered in
the database
This validator is important to be able to Extend Frames
Technical information:
Each renderer uses different logic to work with render passes.
VRay - RenderElement
Simple node connection to the actual renderLayer node
Arnold - AOV:
Uses its own render settings node and connects an aiOAV to it
Redshift - AOV:
Uses its own render settings node and RedshiftAOV node. It is not
connected but all AOVs are enabled for all render layers by default.
"""
order = pyblish.api.CollectorOrder + 0.01
label = "Render Elements / AOVs"
hosts = ["maya"]
families = ["colorbleed.renderlayer"]
def process(self, instance):
# Check if Extend Frames is toggled
if not instance.data("extendFrames", False):
return
# Get renderer
renderer = cmds.getAttr("defaultRenderGlobals.currentRenderer")
self.log.info("Renderer found: {}".format(renderer))
rp_node_types = {"vray": "VRayRenderElement",
"arnold": "aiAOV",
"redshift": "RedshiftAOV"}
if renderer not in rp_node_types.keys():
self.log.error("Unsupported renderer found: '{}'".format(renderer))
return
result = []
# Collect all AOVs / Render Elements
with lib.renderlayer(instance.name):
node_type = rp_node_types[renderer]
render_elements = cmds.ls(type=node_type)
# Check if AOVs / Render Elements are enabled
for element in render_elements:
enabled = cmds.getAttr("{}.enabled".format(element))
if not enabled:
continue
pass_name = self.get_pass_name(renderer, element)
render_pass = "%s.%s" % (instance.name, pass_name)
result.append(render_pass)
self.log.info("Found {} render elements / AOVs for "
"'{}'".format(len(result), instance.name))
instance.data["renderPasses"] = result
def get_pass_name(self, renderer, node):
if renderer == "vray":
vray_node_attr = next(attr for attr in cmds.listAttr(node)
if attr.startswith("vray_name"))
pass_type = vray_node_attr.rsplit("_", 1)[-1]
if pass_type == "extratex":
vray_node_attr = "vray_explicit_name_extratex"
# Node type is in the attribute name but we need to check if value
# of the attribute as it can be changed
pass_name = cmds.getAttr("{}.{}".format(node, vray_node_attr))
elif renderer in ["arnold", "redshift"]:
pass_name = cmds.getAttr("{}.name".format(node))
else:
raise RuntimeError("Unsupported renderer: '{}'".format(renderer))
return pass_name

View file

@ -0,0 +1,191 @@
from maya import cmds
import pyblish.api
from avalon import maya, api
import config.apps.maya.lib as lib
class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
"""Gather instances by active render layers"""
order = pyblish.api.CollectorOrder
hosts = ["maya"]
label = "Render Layers"
def process(self, context):
asset = api.Session["AVALON_ASSET"]
filepath = context.data["currentFile"].replace("\\", "/")
# Get render globals node
try:
render_globals = cmds.ls("renderglobalsDefault")[0]
except IndexError:
self.log.error("Cannot collect renderlayers without "
"renderGlobals node")
return
# Get start and end frame
start_frame = self.get_render_attribute("startFrame")
end_frame = self.get_render_attribute("endFrame")
context.data["startFrame"] = start_frame
context.data["endFrame"] = end_frame
# Get all valid renderlayers
# This is how Maya populates the renderlayer display
rlm_attribute = "renderLayerManager.renderLayerId"
connected_layers = cmds.listConnections(rlm_attribute) or []
valid_layers = set(connected_layers)
# Get all renderlayers and check their state
renderlayers = [i for i in cmds.ls(type="renderLayer") if
cmds.getAttr("{}.renderable".format(i)) and not
cmds.referenceQuery(i, isNodeReferenced=True)]
# Sort by displayOrder
def sort_by_display_order(layer):
return cmds.getAttr("%s.displayOrder" % layer)
renderlayers = sorted(renderlayers, key=sort_by_display_order)
for layer in renderlayers:
# Check if layer is in valid (linked) layers
if layer not in valid_layers:
self.log.warning("%s is invalid, skipping" % layer)
continue
if layer.endswith("defaultRenderLayer"):
layername = "masterLayer"
else:
layername = layer.split("rs_", 1)[-1]
# Get layer specific settings, might be overrides
with lib.renderlayer(layer):
data = {
"subset": layername,
"setMembers": layer,
"publish": True,
"startFrame": self.get_render_attribute("startFrame"),
"endFrame": self.get_render_attribute("endFrame"),
"byFrameStep": self.get_render_attribute("byFrameStep"),
"renderer": self.get_render_attribute("currentRenderer"),
# instance subset
"family": "Render Layers",
"families": ["colorbleed.renderlayer"],
"asset": asset,
"time": api.time(),
"author": context.data["user"],
# Add source to allow tracing back to the scene from
# which was submitted originally
"source": filepath
}
# Apply each user defined attribute as data
for attr in cmds.listAttr(layer, userDefined=True) or list():
try:
value = cmds.getAttr("{}.{}".format(layer, attr))
except Exception:
# Some attributes cannot be read directly,
# such as mesh and color attributes. These
# are considered non-essential to this
# particular publishing pipeline.
value = None
data[attr] = value
# Include (optional) global settings
# TODO(marcus): Take into account layer overrides
# Get global overrides and translate to Deadline values
overrides = self.parse_options(render_globals)
data.update(**overrides)
instance = context.create_instance(layername)
instance.data.update(data)
def get_render_attribute(self, attr):
return cmds.getAttr("defaultRenderGlobals.{}".format(attr))
def parse_options(self, render_globals):
"""Get all overrides with a value, skip those without
Here's the kicker. These globals override defaults in the submission
integrator, but an empty value means no overriding is made.
Otherwise, Frames would override the default frames set under globals.
Args:
render_globals (str): collection of render globals
Returns:
dict: only overrides with values
"""
attributes = maya.read(render_globals)
options = {"renderGlobals": {}}
options["renderGlobals"]["Priority"] = attributes["priority"]
# Check for specific pools
pool_a, pool_b = self._discover_pools(attributes)
options["renderGlobals"].update({"Pool": pool_a})
if pool_b:
options["renderGlobals"].update({"SecondaryPool": pool_b})
legacy = attributes["useLegacyRenderLayers"]
options["renderGlobals"]["UseLegacyRenderLayers"] = legacy
# Machine list
machine_list = attributes["machineList"]
if machine_list:
key = "Whitelist" if attributes["whitelist"] else "Blacklist"
options['renderGlobals'][key] = machine_list
# Suspend publish job
state = "Suspended" if attributes["suspendPublishJob"] else "Active"
options["publishJobState"] = state
chunksize = attributes.get("framesPerTask", 1)
options["renderGlobals"]["ChunkSize"] = chunksize
# Override frames should be False if extendFrames is False. This is
# to ensure it doesn't go off doing crazy unpredictable things
override_frames = False
extend_frames = attributes.get("extendFrames", False)
if extend_frames:
override_frames = attributes.get("overrideExistingFrame", False)
options["extendFrames"] = extend_frames
options["overrideExistingFrame"] = override_frames
maya_render_plugin = "MayaBatch"
if not attributes.get("useMayaBatch", True):
maya_render_plugin = "MayaCmd"
options["mayaRenderPlugin"] = maya_render_plugin
return options
def _discover_pools(self, attributes):
pool_a = None
pool_b = None
# Check for specific pools
if "primaryPool" in attributes:
pool_a = attributes["primaryPool"]
pool_b = attributes["secondaryPool"]
else:
# Backwards compatibility
pool_str = attributes.get("pools", None)
if pool_str:
pool_a, pool_b = pool_str.split(";")
# Ensure empty entry token is caught
if pool_b == "-":
pool_b = None
return pool_a, pool_b

View file

@ -0,0 +1,91 @@
from collections import defaultdict
import pyblish.api
from maya import cmds, mel
from avalon import maya as avalon
from config.apps.maya import lib
# TODO : Publish of setdress: -unique namespace for all assets, VALIDATOR!
class CollectSetDress(pyblish.api.InstancePlugin):
"""Collect all relevant setdress items
Collected data:
* File name
* Compatible loader
* Matrix per instance
* Namespace
Note: GPU caches are currently not supported in the pipeline. There is no
logic yet which supports the swapping of GPU cache to renderable objects.
"""
order = pyblish.api.CollectorOrder + 0.49
label = "Set Dress"
families = ["colorbleed.setdress"]
def process(self, instance):
# Find containers
containers = avalon.ls()
# Get all content from the instance
instance_lookup = set(cmds.ls(instance, type="transform", long=True))
data = defaultdict(list)
hierarchy_nodes = []
for container in containers:
root = lib.get_container_transforms(container, root=True)
if not root or root not in instance_lookup:
continue
# Retrieve the hierarchy
parent = cmds.listRelatives(root, parent=True, fullPath=True)[0]
hierarchy_nodes.append(parent)
# Temporary warning for GPU cache which are not supported yet
loader = container["loader"]
if loader == "GpuCacheLoader":
self.log.warning("GPU Cache Loader is currently not supported"
"in the pipeline, we will export it tho")
# Gather info for new data entry
representation_id = container["representation"]
instance_data = {"loader": loader,
"parent": parent,
"namespace": container["namespace"]}
# Check if matrix differs from default and store changes
matrix_data = self.get_matrix_data(root)
if matrix_data:
instance_data["matrix"] = matrix_data
data[representation_id].append(instance_data)
instance.data["scenedata"] = dict(data)
instance.data["hierarchy"] = list(set(hierarchy_nodes))
def get_file_rule(self, rule):
return mel.eval('workspace -query -fileRuleEntry "{}"'.format(rule))
def get_matrix_data(self, node):
"""Get the matrix of all members when they are not default
Each matrix which differs from the default will be stored in a
dictionary
Args:
members (list): list of transform nmodes
Returns:
dict
"""
matrix = cmds.xform(node, query=True, matrix=True)
if matrix == lib.DEFAULT_MATRIX:
return
return matrix

View file

@ -0,0 +1,64 @@
from maya import cmds
import pyblish.api
from config.apps.maya import lib
SETTINGS = {"renderDensity",
"renderWidth",
"renderLength",
"increaseRenderBounds",
"imageSearchPath",
"cbId"}
class CollectYetiCache(pyblish.api.InstancePlugin):
"""Collect all information of the Yeti caches
The information contains the following attributes per Yeti node
- "renderDensity"
- "renderWidth"
- "renderLength"
- "increaseRenderBounds"
- "imageSearchPath"
Other information is the name of the transform and it's Colorbleed ID
"""
order = pyblish.api.CollectorOrder + 0.45
label = "Collect Yeti Cache"
families = ["colorbleed.yetiRig", "colorbleed.yeticache"]
hosts = ["maya"]
tasks = ["animation", "fx"]
def process(self, instance):
# Collect fur settings
settings = {"nodes": []}
# Get yeti nodes and their transforms
yeti_shapes = cmds.ls(instance, type="pgYetiMaya")
for shape in yeti_shapes:
shape_data = {"transform": None,
"name": shape,
"cbId": lib.get_id(shape),
"attrs": None}
# Get specific node attributes
attr_data = {}
for attr in SETTINGS:
current = cmds.getAttr("%s.%s" % (shape, attr))
attr_data[attr] = current
# Get transform data
parent = cmds.listRelatives(shape, parent=True)[0]
transform_data = {"name": parent, "cbId": lib.get_id(parent)}
# Store collected data
shape_data["attrs"] = attr_data
shape_data["transform"] = transform_data
settings["nodes"].append(shape_data)
instance.data["fursettings"] = settings

View file

@ -0,0 +1,156 @@
import os
import re
from maya import cmds
import pyblish.api
from config.apps.maya import lib
SETTINGS = {"renderDensity",
"renderWidth",
"renderLength",
"increaseRenderBounds",
"imageSearchPath",
"cbId"}
class CollectYetiRig(pyblish.api.InstancePlugin):
"""Collect all information of the Yeti Rig"""
order = pyblish.api.CollectorOrder + 0.4
label = "Collect Yeti Rig"
families = ["colorbleed.yetiRig"]
hosts = ["maya"]
def process(self, instance):
assert "input_SET" in cmds.sets(instance.name, query=True), (
"Yeti Rig must have an input_SET")
# Get the input meshes information
input_content = cmds.sets("input_SET", query=True)
input_nodes = cmds.listRelatives(input_content,
allDescendents=True,
fullPath=True) or input_content
# Get all the shapes
input_shapes = cmds.ls(input_nodes, long=True, noIntermediate=True)
# Store all connections
connections = cmds.listConnections(input_shapes,
source=True,
destination=False,
connections=True,
plugs=True) or []
# Group per source, destination pair. We need to reverse the connection
# list as it comes in with the shape used to query first while that
# shape is the destination of the connection
grouped = [(connections[i+1], item) for i, item in
enumerate(connections) if i % 2 == 0]
inputs = []
for src, dest in grouped:
source_node, source_attr = src.split(".", 1)
dest_node, dest_attr = dest.split(".", 1)
inputs.append({"connections": [source_attr, dest_attr],
"sourceID": lib.get_id(source_node),
"destinationID": lib.get_id(dest_node)})
# Collect any textures if used
yeti_resources = []
yeti_nodes = cmds.ls(instance[:], type="pgYetiMaya")
for node in yeti_nodes:
# Get Yeti resources (textures)
# TODO: referenced files in Yeti Graph
resources = self.get_yeti_resources(node)
yeti_resources.extend(resources)
instance.data["rigsettings"] = {"inputs": inputs}
instance.data["resources"] = yeti_resources
# Force frame range for export
instance.data["startFrame"] = 1
instance.data["endFrame"] = 1
def get_yeti_resources(self, node):
"""Get all texture file paths
If a texture is a sequence it gathers all sibling files to ensure
the texture sequence is complete.
Args:
node (str): node name of the pgYetiMaya node
Returns:
list
"""
resources = []
image_search_path = cmds.getAttr("{}.imageSearchPath".format(node))
texture_filenames = cmds.pgYetiCommand(node, listTextures=True)
if texture_filenames and not image_search_path:
raise ValueError("pgYetiMaya node '%s' is missing the path to the "
"files in the 'imageSearchPath "
"atttribute'" % node)
for texture in texture_filenames:
node_resources = {"files": [], "source": texture, "node": node}
texture_filepath = os.path.join(image_search_path, texture)
if len(texture.split(".")) > 2:
# For UDIM based textures (tiles)
if "<UDIM>" in texture:
sequences = self.get_sequence(texture_filepath,
pattern="<UDIM>")
node_resources["files"].extend(sequences)
# Based textures (animated masks f.e)
elif "%04d" in texture:
sequences = self.get_sequence(texture_filepath,
pattern="%04d")
node_resources["files"].extend(sequences)
# Assuming it is a fixed name
else:
node_resources["files"].append(texture_filepath)
else:
node_resources["files"].append(texture_filepath)
resources.append(node_resources)
return resources
def get_sequence(self, filename, pattern="%04d"):
"""Get sequence from filename
Supports negative frame ranges like -001, 0000, 0001 and -0001,
0000, 0001.
Arguments:
filename (str): The full path to filename containing the given
pattern.
pattern (str): The pattern to swap with the variable frame number.
Returns:
list: file sequence.
"""
from avalon.vendor import clique
escaped = re.escape(filename)
re_pattern = escaped.replace(pattern, "-?[0-9]+")
source_dir = os.path.dirname(filename)
files = [f for f in os.listdir(source_dir)
if re.match(re_pattern, f)]
pattern = [clique.PATTERNS["frames"]]
collection, remainder = clique.assemble(files,
patterns=pattern)
return collection

View file

@ -0,0 +1,78 @@
import os
from maya import cmds
import avalon.maya
import colorbleed.api
from config.apps.maya.lib import extract_alembic
class ExtractColorbleedAnimation(colorbleed.api.Extractor):
"""Produce an alembic of just point positions and normals.
Positions and normals, uvs, creases are preserved, but nothing more,
for plain and predictable point caches.
"""
label = "Extract Animation"
hosts = ["maya"]
families = ["colorbleed.animation"]
def process(self, instance):
# Collect the out set nodes
out_sets = [node for node in instance if node.endswith("out_SET")]
if len(out_sets) != 1:
raise RuntimeError("Couldn't find exactly one out_SET: "
"{0}".format(out_sets))
out_set = out_sets[0]
nodes = cmds.sets(out_set, query=True)
# Include all descendants
nodes += cmds.listRelatives(nodes,
allDescendents=True,
fullPath=True) or []
# Collect the start and end including handles
start = instance.data["startFrame"]
end = instance.data["endFrame"]
handles = instance.data.get("handles", 0)
if handles:
start -= handles
end += handles
self.log.info("Extracting animation..")
dirname = self.staging_dir(instance)
parent_dir = self.staging_dir(instance)
filename = "{name}.abc".format(**instance.data)
path = os.path.join(parent_dir, filename)
options = {
"step": instance.data.get("step", 1.0),
"attr": ["cbId"],
"writeVisibility": True,
"writeCreases": True,
"uvWrite": True,
"selection": True
}
if int(cmds.about(version=True)) >= 2017:
# Since Maya 2017 alembic supports multiple uv sets - write them.
options["writeUVSets"] = True
with avalon.maya.suspended_refresh():
with avalon.maya.maintained_selection():
cmds.select(nodes, noExpand=True)
extract_alembic(file=path,
startFrame=start,
endFrame=end,
**options)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
self.log.info("Extracted {} to {}".format(instance, dirname))

View file

@ -0,0 +1,79 @@
import os
from maya import cmds
import avalon.maya
import colorbleed.api
import cb.utils.maya.context as context
class ExtractCameraAlembic(colorbleed.api.Extractor):
"""Extract a Camera as Alembic.
The cameras gets baked to world space by default. Only when the instance's
`bakeToWorldSpace` is set to False it will include its full hierarchy.
"""
label = "Camera (Alembic)"
hosts = ["maya"]
families = ["colorbleed.camera"]
def process(self, instance):
# get settings
framerange = [instance.data.get("startFrame", 1),
instance.data.get("endFrame", 1)]
handles = instance.data.get("handles", 0)
step = instance.data.get("step", 1.0)
bake_to_worldspace = instance.data("bakeToWorldSpace", True)
# get cameras
members = instance.data['setMembers']
cameras = cmds.ls(members, leaf=True, shapes=True, long=True,
dag=True, type="camera")
# validate required settings
assert len(cameras) == 1, "Not a single camera found in extraction"
assert isinstance(step, float), "Step must be a float value"
camera = cameras[0]
# Define extract output file path
dir_path = self.staging_dir(instance)
filename = "{0}.abc".format(instance.name)
path = os.path.join(dir_path, filename)
# Perform alembic extraction
with avalon.maya.maintained_selection():
cmds.select(camera, replace=True, noExpand=True)
# Enforce forward slashes for AbcExport because we're
# embedding it into a job string
path = path.replace("\\", "/")
job_str = ' -selection -dataFormat "ogawa" '
job_str += ' -attrPrefix cb'
job_str += ' -frameRange {0} {1} '.format(framerange[0] - handles,
framerange[1] + handles)
job_str += ' -step {0} '.format(step)
if bake_to_worldspace:
transform = cmds.listRelatives(camera,
parent=True,
fullPath=True)[0]
job_str += ' -worldSpace -root {0}'.format(transform)
job_str += ' -file "{0}"'.format(path)
with context.evaluation("off"):
with context.no_refresh():
cmds.AbcExport(j=job_str, verbose=False)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
self.log.info("Extracted instance '{0}' to: {1}".format(
instance.name, path))

View file

@ -0,0 +1,134 @@
import os
from maya import cmds
import avalon.maya
import colorbleed.api
import cb.utils.maya.context as context
from cb.utils.maya.animation import bakeToWorldSpace
def massage_ma_file(path):
"""Clean up .ma file for backwards compatibility.
Massage the .ma of baked camera to stay
backwards compatible with older versions
of Fusion (6.4)
"""
# Get open file's lines
f = open(path, "r+")
lines = f.readlines()
f.seek(0) # reset to start of file
# Rewrite the file
for line in lines:
# Skip all 'rename -uid' lines
stripped = line.strip()
if stripped.startswith("rename -uid "):
continue
f.write(line)
f.truncate() # remove remainder
f.close()
class ExtractCameraMayaAscii(colorbleed.api.Extractor):
"""Extract a Camera as Maya Ascii.
This will create a duplicate of the camera that will be baked *with*
substeps and handles for the required frames. This temporary duplicate
will be published.
The cameras gets baked to world space by default. Only when the instance's
`bakeToWorldSpace` is set to False it will include its full hierarchy.
Note:
The extracted Maya ascii file gets "massaged" removing the uuid values
so they are valid for older versions of Fusion (e.g. 6.4)
"""
label = "Camera (Maya Ascii)"
hosts = ["maya"]
families = ["colorbleed.camera"]
def process(self, instance):
# get settings
framerange = [instance.data.get("startFrame", 1),
instance.data.get("endFrame", 1)]
handles = instance.data.get("handles", 0)
step = instance.data.get("step", 1.0)
bake_to_worldspace = instance.data("bakeToWorldSpace", True)
# TODO: Implement a bake to non-world space
# Currently it will always bake the resulting camera to world-space
# and it does not allow to include the parent hierarchy, even though
# with `bakeToWorldSpace` set to False it should include its hierarchy
# to be correct with the family implementation.
if not bake_to_worldspace:
self.log.warning("Camera (Maya Ascii) export only supports world"
"space baked camera extractions. The disabled "
"bake to world space is ignored...")
# get cameras
members = instance.data['setMembers']
cameras = cmds.ls(members, leaf=True, shapes=True, long=True,
dag=True, type="camera")
range_with_handles = [framerange[0] - handles,
framerange[1] + handles]
# validate required settings
assert len(cameras) == 1, "Not a single camera found in extraction"
assert isinstance(step, float), "Step must be a float value"
camera = cameras[0]
transform = cmds.listRelatives(camera, parent=True, fullPath=True)
# Define extract output file path
dir_path = self.staging_dir(instance)
filename = "{0}.ma".format(instance.name)
path = os.path.join(dir_path, filename)
# Perform extraction
self.log.info("Performing camera bakes for: {0}".format(transform))
with avalon.maya.maintained_selection():
with context.evaluation("off"):
with context.no_refresh():
baked = bakeToWorldSpace(transform,
frameRange=range_with_handles,
step=step)
baked_shapes = cmds.ls(baked,
type="camera",
dag=True,
shapes=True,
long=True)
self.log.info("Performing extraction..")
cmds.select(baked_shapes, noExpand=True)
cmds.file(path,
force=True,
typ="mayaAscii",
exportSelected=True,
preserveReferences=False,
constructionHistory=False,
channels=True, # allow animation
constraints=False,
shader=False,
expressions=False)
# Delete the baked hierarchy
cmds.delete(baked)
massage_ma_file(path)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
self.log.info("Extracted instance '{0}' to: {1}".format(
instance.name, path))

View file

@ -0,0 +1,97 @@
import os
import json
from collections import OrderedDict
from maya import cmds
import pyblish.api
import avalon.maya
import colorbleed.api
import config.apps.maya.lib as maya
from cb.utils.maya import context
class ExtractLook(colorbleed.api.Extractor):
"""Extract Look (Maya Ascii + JSON)
Only extracts the sets (shadingEngines and alike) alongside a .json file
that stores it relationships for the sets and "attribute" data for the
instance members.
"""
label = "Extract Look (Maya ASCII + JSON)"
hosts = ["maya"]
families = ["colorbleed.look"]
order = pyblish.api.ExtractorOrder + 0.2
def process(self, instance):
# Define extract output file path
dir_path = self.staging_dir(instance)
maya_fname = "{0}.ma".format(instance.name)
json_fname = "{0}.json".format(instance.name)
# Make texture dump folder
maya_path = os.path.join(dir_path, maya_fname)
json_path = os.path.join(dir_path, json_fname)
self.log.info("Performing extraction..")
# Remove all members of the sets so they are not included in the
# exported file by accident
self.log.info("Extract sets (Maya ASCII) ...")
lookdata = instance.data["lookData"]
relationships = lookdata["relationships"]
sets = relationships.keys()
resources = instance.data["resources"]
remap = OrderedDict() # needs to be ordered, see color space values
for resource in resources:
attr = resource['attribute']
remap[attr] = resource['destination']
# Preserve color space values (force value after filepath change)
# This will also trigger in the same order at end of context to
# ensure after context it's still the original value.
color_space_attr = resource['node'] + ".colorSpace"
remap[color_space_attr] = cmds.getAttr(color_space_attr)
self.log.info("Finished remapping destinations ...")
# Extract in correct render layer
layer = instance.data.get("renderlayer", "defaultRenderLayer")
with context.renderlayer(layer):
# TODO: Ensure membership edits don't become renderlayer overrides
with context.empty_sets(sets, force=True):
with maya.attribute_values(remap):
with avalon.maya.maintained_selection():
cmds.select(sets, noExpand=True)
cmds.file(maya_path,
force=True,
typ="mayaAscii",
exportSelected=True,
preserveReferences=False,
channels=True,
constraints=True,
expressions=True,
constructionHistory=True)
# Write the JSON data
self.log.info("Extract json..")
data = {"attributes": lookdata["attributes"],
"relationships": relationships}
with open(json_path, "w") as f:
json.dump(data, f)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(maya_fname)
instance.data["files"].append(json_fname)
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
maya_path))

View file

@ -0,0 +1,54 @@
import os
from maya import cmds
import avalon.maya
import colorbleed.api
class ExtractMayaAsciiRaw(colorbleed.api.Extractor):
"""Extract as Maya Ascii (raw)
This will preserve all references, construction history, etc.
"""
label = "Maya ASCII (Raw)"
hosts = ["maya"]
families = ["config.apps.mayaAscii"]
def process(self, instance):
# Define extract output file path
dir_path = self.staging_dir(instance)
filename = "{0}.ma".format(instance.name)
path = os.path.join(dir_path, filename)
# Whether to include all nodes in the instance (including those from
# history) or only use the exact set members
members_only = instance.data.get("exactSetMembersOnly", False)
if members_only:
members = instance.data.get("setMembers", list())
if not members:
raise RuntimeError("Can't export 'exact set members only' "
"when set is empty.")
else:
members = instance[:]
# Perform extraction
self.log.info("Performing extraction..")
with avalon.maya.maintained_selection():
cmds.select(members, noExpand=True)
cmds.file(path,
force=True,
typ="mayaAscii",
exportSelected=True,
preserveReferences=True,
constructionHistory=True)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -0,0 +1,78 @@
import os
from maya import cmds
import avalon.maya
import colorbleed.api
from cb.utils.maya import context
class ExtractModel(colorbleed.api.Extractor):
"""Extract as Model (Maya Ascii)
Only extracts contents based on the original "setMembers" data to ensure
publishing the least amount of required shapes. From that it only takes
the shapes that are not intermediateObjects
During export it sets a temporary context to perform a clean extraction.
The context ensures:
- Smooth preview is turned off for the geometry
- Default shader is assigned (no materials are exported)
- Remove display layers
"""
label = "Model (Maya ASCII)"
hosts = ["maya"]
families = ["colorbleed.model"]
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = "{0}.ma".format(instance.name)
path = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
# Get only the shape contents we need in such a way that we avoid
# taking along intermediateObjects
members = instance.data("setMembers")
members = cmds.ls(members,
dag=True,
shapes=True,
type=("mesh", "nurbsCurve"),
noIntermediate=True,
long=True)
with context.no_display_layers(instance):
with context.displaySmoothness(members,
divisionsU=0,
divisionsV=0,
pointsWire=4,
pointsShaded=1,
polygonObject=1):
with context.shader(members,
shadingEngine="initialShadingGroup"):
with avalon.maya.maintained_selection():
cmds.select(members, noExpand=True)
cmds.file(path,
force=True,
typ="mayaAscii",
exportSelected=True,
preserveReferences=False,
channels=False,
constraints=False,
expressions=False,
constructionHistory=False)
# Store reference for integration
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -0,0 +1,74 @@
import os
from maya import cmds
import avalon.maya
import colorbleed.api
from config.apps.maya.lib import extract_alembic
class ExtractColorbleedAlembic(colorbleed.api.Extractor):
"""Produce an alembic of just point positions and normals.
Positions and normals, uvs, creases are preserved, but nothing more,
for plain and predictable point caches.
"""
label = "Extract Pointcache (Alembic)"
hosts = ["maya"]
families = ["colorbleed.pointcache",
"colorbleed.model"]
def process(self, instance):
nodes = instance[:]
# Collect the start and end including handles
start = instance.data.get("startFrame", 1)
end = instance.data.get("endFrame", 1)
handles = instance.data.get("handles", 0)
if handles:
start -= handles
end += handles
# Get extra export arguments
writeColorSets = instance.data.get("writeColorSets", False)
self.log.info("Extracting animation..")
dirname = self.staging_dir(instance)
self.log.info("nodes: %s" % str(nodes))
parent_dir = self.staging_dir(instance)
filename = "{name}.abc".format(**instance.data)
path = os.path.join(parent_dir, filename)
options = {
"step": instance.data.get("step", 1.0),
"attr": ["cbId"],
"writeVisibility": True,
"writeCreases": True,
"writeColorSets": writeColorSets,
"uvWrite": True,
"selection": True
}
if int(cmds.about(version=True)) >= 2017:
# Since Maya 2017 alembic supports multiple uv sets - write them.
options["writeUVSets"] = True
with avalon.maya.suspended_refresh():
with avalon.maya.maintained_selection():
cmds.select(nodes, noExpand=True)
extract_alembic(file=path,
startFrame=start,
endFrame=end,
**options)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
self.log.info("Extracted {} to {}".format(instance, dirname))

View file

@ -0,0 +1,42 @@
import os
from maya import cmds
import avalon.maya
import colorbleed.api
class ExtractColorbleedRig(colorbleed.api.Extractor):
"""Extract rig as Maya Ascii"""
label = "Extract Rig (Maya ASCII)"
hosts = ["maya"]
families = ["colorbleed.rig"]
def process(self, instance):
# Define extract output file path
dir_path = self.staging_dir(instance)
filename = "{0}.ma".format(instance.name)
path = os.path.join(dir_path, filename)
# Perform extraction
self.log.info("Performing extraction..")
with avalon.maya.maintained_selection():
cmds.select(instance, noExpand=True)
cmds.file(path,
force=True,
typ="mayaAscii",
exportSelected=True,
preserveReferences=False,
channels=True,
constraints=True,
expressions=True,
constructionHistory=True)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -0,0 +1,54 @@
import json
import os
import colorbleed.api
from config.apps.maya.lib import extract_alembic
from maya import cmds
class ExtractSetDress(colorbleed.api.Extractor):
"""Produce an alembic of just point positions and normals.
Positions and normals are preserved, but nothing more,
for plain and predictable point caches.
"""
label = "Extract Set Dress"
hosts = ["maya"]
families = ["colorbleed.setdress"]
def process(self, instance):
parent_dir = self.staging_dir(instance)
hierarchy_filename = "{}.abc".format(instance.name)
hierarchy_path = os.path.join(parent_dir, hierarchy_filename)
json_filename = "{}.json".format(instance.name)
json_path = os.path.join(parent_dir, json_filename)
self.log.info("Dumping scene data for debugging ..")
with open(json_path, "w") as filepath:
json.dump(instance.data["scenedata"], filepath, ensure_ascii=False)
self.log.info("Extracting point cache ..")
cmds.select(instance.data["hierarchy"])
# Run basic alembic exporter
extract_alembic(file=hierarchy_path,
startFrame=1.0,
endFrame=1.0,
**{"step": 1.0,
"attr": ["cbId"],
"writeVisibility": True,
"writeCreases": True,
"uvWrite": True,
"selection": True})
instance.data["files"] = [json_filename, hierarchy_filename]
# Remove data
instance.data.pop("scenedata", None)
cmds.select(clear=True)

Some files were not shown because too many files have changed in this diff Show more