Merge pull request #204 from pypeclub/feature/143-publishing_of_rendered_vrscenes

Refactor Maya deadline submission to support multiple job types
This commit is contained in:
Milan Kolar 2020-06-11 18:12:06 +02:00 committed by GitHub
commit 6b0cb54ae5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
31 changed files with 2091 additions and 1417 deletions

View file

@ -0,0 +1,869 @@
# -*- coding: utf-8 -*-
"""Module handling expected render output from Maya.
This module is used in :mod:`collect_render` and :mod:`collect_vray_scene`.
Note:
To implement new renderer, just create new class inheriting from
:class:`AExpectedFiles` and add it to :func:`ExpectedFiles.get()`.
Attributes:
R_SINGLE_FRAME (:class:`re.Pattern`): Find single frame number.
R_FRAME_RANGE (:class:`re.Pattern`): Find frame range.
R_FRAME_NUMBER (:class:`re.Pattern`): Find frame number in string.
R_LAYER_TOKEN (:class:`re.Pattern`): Find layer token in image prefixes.
R_AOV_TOKEN (:class:`re.Pattern`): Find AOV token in image prefixes.
R_SUBSTITUTE_AOV_TOKEN (:class:`re.Pattern`): Find and substitute AOV token
in image prefixes.
R_REMOVE_AOV_TOKEN (:class:`re.Pattern`): Find and remove AOV token in
image prefixes.
R_CLEAN_FRAME_TOKEN (:class:`re.Pattern`): Find and remove unfilled
Renderman frame token in image prefix.
R_CLEAN_EXT_TOKEN (:class:`re.Pattern`): Find and remove unfilled Renderman
extension token in image prefix.
R_SUBSTITUTE_LAYER_TOKEN (:class:`re.Pattern`): Find and substitute render
layer token in image prefixes.
R_SUBSTITUTE_SCENE_TOKEN (:class:`re.Pattern`): Find and substitute scene
token in image prefixes.
R_SUBSTITUTE_CAMERA_TOKEN (:class:`re.Pattern`): Find and substitute camera
token in image prefixes.
RENDERER_NAMES (dict): Renderer names mapping between reported name and
*human readable* name.
ImagePrefixes (dict): Mapping between renderers and their respective
image prefix atrribute names.
"""
import types
import re
import os
from abc import ABCMeta, abstractmethod
import six
import pype.hosts.maya.lib as lib
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
R_SINGLE_FRAME = re.compile(r"^(-?)\d+$")
R_FRAME_RANGE = re.compile(r"^(?P<sf>(-?)\d+)-(?P<ef>(-?)\d+)$")
R_FRAME_NUMBER = re.compile(r".+\.(?P<frame>[0-9]+)\..+")
R_LAYER_TOKEN = re.compile(
r".*((?:%l)|(?:<layer>)|(?:<renderlayer>)).*", re.IGNORECASE
)
R_AOV_TOKEN = re.compile(r".*%a.*|.*<aov>.*|.*<renderpass>.*", re.IGNORECASE)
R_SUBSTITUTE_AOV_TOKEN = re.compile(r"%a|<aov>|<renderpass>", re.IGNORECASE)
R_REMOVE_AOV_TOKEN = re.compile(r"_%a|_<aov>|_<renderpass>", re.IGNORECASE)
# to remove unused renderman tokens
R_CLEAN_FRAME_TOKEN = re.compile(r"\.?<f\d>\.?", re.IGNORECASE)
R_CLEAN_EXT_TOKEN = re.compile(r"\.?<ext>\.?", re.IGNORECASE)
R_SUBSTITUTE_LAYER_TOKEN = re.compile(
r"%l|<layer>|<renderlayer>", re.IGNORECASE
)
R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r"%c|<camera>", re.IGNORECASE)
R_SUBSTITUTE_SCENE_TOKEN = re.compile(r"%s|<scene>", re.IGNORECASE)
RENDERER_NAMES = {
"mentalray": "MentalRay",
"vray": "V-Ray",
"arnold": "Arnold",
"renderman": "Renderman",
"redshift": "Redshift",
}
# not sure about the renderman image prefix
ImagePrefixes = {
"mentalray": "defaultRenderGlobals.imageFilePrefix",
"vray": "vraySettings.fileNamePrefix",
"arnold": "defaultRenderGlobals.imageFilePrefix",
"renderman": "rmanGlobals.imageFileFormat",
"redshift": "defaultRenderGlobals.imageFilePrefix",
}
class ExpectedFiles:
"""Class grouping functionality for all supported renderers.
Attributes:
multipart (bool): Flag if multipart exrs are used.
"""
multipart = False
def get(self, renderer, layer):
"""Get expected files for given renderer and render layer.
Args:
renderer (str): Name of renderer
layer (str): Name of render layer
Returns:
dict: Expected rendered files by AOV
Raises:
:exc:`UnsupportedRendererException`: If requested renderer
is not supported. It needs to be implemented by extending
:class:`AExpectedFiles` and added to this methods ``if``
statement.
"""
renderSetup.instance().switchToLayerUsingLegacyName(layer)
if renderer.lower() == "arnold":
return self._get_files(ExpectedFilesArnold(layer))
elif renderer.lower() == "vray":
return self._get_files(ExpectedFilesVray(layer))
elif renderer.lower() == "redshift":
return self._get_files(ExpectedFilesRedshift(layer))
elif renderer.lower() == "mentalray":
return self._get_files(ExpectedFilesMentalray(layer))
elif renderer.lower() == "renderman":
return self._get_files(ExpectedFilesRenderman(layer))
else:
raise UnsupportedRendererException(
"unsupported {}".format(renderer)
)
def _get_files(self, renderer):
files = renderer.get_files()
self.multipart = renderer.multipart
return files
@six.add_metaclass(ABCMeta)
class AExpectedFiles:
"""Abstract class with common code for all renderers.
Attributes:
renderer (str): name of renderer.
layer (str): name of render layer.
multipart (bool): flag for multipart exrs.
"""
renderer = None
layer = None
multipart = False
def __init__(self, layer):
"""Constructor."""
self.layer = layer
@abstractmethod
def get_aovs(self):
"""To be implemented by renderer class."""
pass
def get_renderer_prefix(self):
"""Return prefix for specific renderer.
This is for most renderers the same and can be overriden if needed.
Returns:
str: String with image prefix containing tokens
Raises:
:exc:`UnsupportedRendererException`: If we requested image
prefix for renderer we know nothing about.
See :data:`ImagePrefixes` for mapping of renderers and
image prefixes.
"""
try:
file_prefix = cmds.getAttr(ImagePrefixes[self.renderer])
except KeyError:
raise UnsupportedRendererException(
"Unsupported renderer {}".format(self.renderer)
)
return file_prefix
def _get_layer_data(self):
# ______________________________________________
# ____________________/ ____________________________________________/
# 1 - get scene name /__________________/
# ____________________/
scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True))
scene_name, _ = os.path.splitext(scene_basename)
# ______________________________________________
# ____________________/ ____________________________________________/
# 2 - detect renderer /__________________/
# ____________________/
renderer = self.renderer
# ________________________________________________
# __________________/ ______________________________________________/
# 3 - image prefix /__________________/
# __________________/
file_prefix = self.get_renderer_prefix()
if not file_prefix:
raise RuntimeError("Image prefix not set")
default_ext = cmds.getAttr("defaultRenderGlobals.imfPluginKey")
# ________________________________________________
# __________________/ ______________________________________________/
# 4 - get renderable cameras_____________/
# __________________/
# if we have <camera> token in prefix path we'll expect output for
# every renderable camera in layer.
renderable_cameras = self.get_renderable_cameras()
# ________________________________________________
# __________________/ ______________________________________________/
# 5 - get AOVs /____________________/
# __________________/
enabled_aovs = self.get_aovs()
layer_name = self.layer
if self.layer.startswith("rs_"):
layer_name = self.layer[3:]
start_frame = int(self.get_render_attribute("startFrame"))
end_frame = int(self.get_render_attribute("endFrame"))
frame_step = int(self.get_render_attribute("byFrameStep"))
padding = int(self.get_render_attribute("extensionPadding"))
scene_data = {
"frameStart": start_frame,
"frameEnd": end_frame,
"frameStep": frame_step,
"padding": padding,
"cameras": renderable_cameras,
"sceneName": scene_name,
"layerName": layer_name,
"renderer": renderer,
"defaultExt": default_ext,
"filePrefix": file_prefix,
"enabledAOVs": enabled_aovs,
}
return scene_data
def _generate_single_file_sequence(self, layer_data):
expected_files = []
file_prefix = layer_data["filePrefix"]
for cam in layer_data["cameras"]:
mappings = (
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
(R_SUBSTITUTE_CAMERA_TOKEN, cam),
# this is required to remove unfilled aov token, for example
# in Redshift
(R_REMOVE_AOV_TOKEN, ""),
(R_CLEAN_FRAME_TOKEN, ""),
(R_CLEAN_EXT_TOKEN, ""),
)
for regex, value in mappings:
file_prefix = re.sub(regex, value, file_prefix)
for frame in range(
int(layer_data["frameStart"]),
int(layer_data["frameEnd"]) + 1,
int(layer_data["frameStep"]),
):
expected_files.append(
"{}.{}.{}".format(
file_prefix,
str(frame).rjust(layer_data["padding"], "0"),
layer_data["defaultExt"],
)
)
return expected_files
def _generate_aov_file_sequences(self, layer_data):
expected_files = []
aov_file_list = {}
file_prefix = layer_data["filePrefix"]
for aov in layer_data["enabledAOVs"]:
for cam in layer_data["cameras"]:
mappings = (
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
(R_SUBSTITUTE_CAMERA_TOKEN, cam),
(R_SUBSTITUTE_AOV_TOKEN, aov[0]),
(R_CLEAN_FRAME_TOKEN, ""),
(R_CLEAN_EXT_TOKEN, ""),
)
for regex, value in mappings:
file_prefix = re.sub(regex, value, file_prefix)
aov_files = []
for frame in range(
int(layer_data["frameStart"]),
int(layer_data["frameEnd"]) + 1,
int(layer_data["frameStep"]),
):
aov_files.append(
"{}.{}.{}".format(
file_prefix,
str(frame).rjust(layer_data["padding"], "0"),
aov[1],
)
)
# if we have more then one renderable camera, append
# camera name to AOV to allow per camera AOVs.
aov_name = aov[0]
if len(layer_data["cameras"]) > 1:
aov_name = "{}_{}".format(aov[0], cam)
aov_file_list[aov_name] = aov_files
file_prefix = layer_data["filePrefix"]
expected_files.append(aov_file_list)
return expected_files
def get_files(self):
"""Return list of expected files.
It will translate render token strings ('<RenderPass>', etc.) to
their values. This task is tricky as every renderer deals with this
differently. It depends on `get_aovs()` abstract method implemented
for every supported renderer.
"""
layer_data = self._get_layer_data()
expected_files = []
if layer_data.get("enabledAOVs"):
expected_files = self._generate_aov_file_sequences(layer_data)
else:
expected_files = self._generate_single_file_sequence(layer_data)
return expected_files
def get_renderable_cameras(self):
"""Get all renderable cameras.
Returns:
list: list of renderable cameras.
"""
cam_parents = [
cmds.listRelatives(x, ap=True)[-1] for x in cmds.ls(cameras=True)
]
renderable_cameras = []
for cam in cam_parents:
renderable = False
if self.maya_is_true(cmds.getAttr("{}.renderable".format(cam))):
renderable = True
for override in self.get_layer_overrides(
"{}.renderable".format(cam), self.layer
):
renderable = self.maya_is_true(override)
if renderable:
renderable_cameras.append(cam)
return renderable_cameras
def maya_is_true(self, attr_val):
"""Whether a Maya attr evaluates to True.
When querying an attribute value from an ambiguous object the
Maya API will return a list of values, which need to be properly
handled to evaluate properly.
Args:
attr_val (mixed): Maya attribute to be evaluated as bool.
Returns:
bool: cast Maya attribute to Pythons boolean value.
"""
if isinstance(attr_val, types.BooleanType):
return attr_val
elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
return any(attr_val)
else:
return bool(attr_val)
def get_layer_overrides(self, attr, layer):
"""Get overrides for attribute on given render layer.
Args:
attr (str): Maya attribute name.
layer (str): Maya render layer name.
Returns:
Value of attribute override.
"""
connections = cmds.listConnections(attr, plugs=True)
if connections:
for connection in connections:
if connection:
node_name = connection.split(".")[0]
if cmds.nodeType(node_name) == "renderLayer":
attr_name = "%s.value" % ".".join(
connection.split(".")[:-1]
)
if node_name == layer:
yield cmds.getAttr(attr_name)
def get_render_attribute(self, attr):
"""Get attribute from render options.
Args:
attr (str): name of attribute to be looked up.
Returns:
Attribute value
"""
return lib.get_attr_in_layer(
"defaultRenderGlobals.{}".format(attr), layer=self.layer
)
class ExpectedFilesArnold(AExpectedFiles):
"""Expected files for Arnold renderer.
Attributes:
aiDriverExtension (dict): Arnold AOV driver extension mapping.
Is there a better way?
renderer (str): name of renderer.
"""
aiDriverExtension = {
"jpeg": "jpg",
"exr": "exr",
"deepexr": "exr",
"png": "png",
"tiff": "tif",
"mtoa_shaders": "ass", # TODO: research what those last two should be
"maya": "",
}
def __init__(self, layer):
"""Constructor."""
super(ExpectedFilesArnold, self).__init__(layer)
self.renderer = "arnold"
def get_aovs(self):
"""Get all AOVs.
See Also:
:func:`AExpectedFiles.get_aovs()`
Raises:
:class:`AOVError`: If AOV cannot be determined.
"""
enabled_aovs = []
try:
if not (
cmds.getAttr("defaultArnoldRenderOptions.aovMode")
and not cmds.getAttr("defaultArnoldDriver.mergeAOVs") # noqa: W503, E501
):
# AOVs are merged in mutli-channel file
self.multipart = True
return enabled_aovs
except ValueError:
# this occurs when Render Setting windows was not opened yet. In
# such case there are no Arnold options created so query for AOVs
# will fail. We terminate here as there are no AOVs specified then.
# This state will most probably fail later on some Validator
# anyway.
return enabled_aovs
# AOVs are set to be rendered separately. We should expect
# <RenderPass> token in path.
ai_aovs = [n for n in cmds.ls(type="aiAOV")]
for aov in ai_aovs:
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
ai_driver = cmds.listConnections("{}.outputs".format(aov))[0]
ai_translator = cmds.getAttr("{}.aiTranslator".format(ai_driver))
try:
aov_ext = self.aiDriverExtension[ai_translator]
except KeyError:
msg = (
"Unrecognized arnold " "driver format for AOV - {}"
).format(cmds.getAttr("{}.name".format(aov)))
raise AOVError(msg)
for override in self.get_layer_overrides(
"{}.enabled".format(aov), self.layer
):
enabled = self.maya_is_true(override)
if enabled:
# If aov RGBA is selected, arnold will translate it to `beauty`
aov_name = cmds.getAttr("%s.name" % aov)
if aov_name == "RGBA":
aov_name = "beauty"
enabled_aovs.append((aov_name, aov_ext))
# Append 'beauty' as this is arnolds
# default. If <RenderPass> token is specified and no AOVs are
# defined, this will be used.
enabled_aovs.append(
(u"beauty", cmds.getAttr("defaultRenderGlobals.imfPluginKey"))
)
return enabled_aovs
class ExpectedFilesVray(AExpectedFiles):
"""Expected files for V-Ray renderer."""
def __init__(self, layer):
"""Constructor."""
super(ExpectedFilesVray, self).__init__(layer)
self.renderer = "vray"
def get_renderer_prefix(self):
"""Get image prefix for V-Ray.
This overrides :func:`AExpectedFiles.get_renderer_prefix()` as
we must add `<aov>` token manually.
See also:
:func:`AExpectedFiles.get_renderer_prefix()`
"""
prefix = super(ExpectedFilesVray, self).get_renderer_prefix()
prefix = "{}_<aov>".format(prefix)
return prefix
def get_files(self):
"""Get expected files.
This overrides :func:`AExpectedFiles.get_files()` as we
we need to add one sequence for plain beauty if AOVs are enabled
as vray output beauty without 'beauty' in filename.
"""
expected_files = super(ExpectedFilesVray, self).get_files()
layer_data = self._get_layer_data()
if layer_data.get("enabledAOVs"):
expected_files[0][u"beauty"] = self._generate_single_file_sequence(
layer_data
) # noqa: E501
return expected_files
def get_aovs(self):
"""Get all AOVs.
See Also:
:func:`AExpectedFiles.get_aovs()`
"""
enabled_aovs = []
try:
# really? do we set it in vray just by selecting multichannel exr?
if (
cmds.getAttr("vraySettings.imageFormatStr")
== "exr (multichannel)" # noqa: W503
):
# AOVs are merged in mutli-channel file
self.multipart = True
return enabled_aovs
except ValueError:
# this occurs when Render Setting windows was not opened yet. In
# such case there are no VRay options created so query for AOVs
# will fail. We terminate here as there are no AOVs specified then.
# This state will most probably fail later on some Validator
# anyway.
return enabled_aovs
default_ext = cmds.getAttr("vraySettings.imageFormatStr")
if default_ext == "exr (multichannel)" or default_ext == "exr (deep)":
default_ext = "exr"
vr_aovs = [
n
for n in cmds.ls(
type=["VRayRenderElement", "VRayRenderElementSet"]
)
]
for aov in vr_aovs:
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
for override in self.get_layer_overrides(
"{}.enabled".format(aov), "rs_{}".format(self.layer)
):
enabled = self.maya_is_true(override)
if enabled:
# todo: find how vray set format for AOVs
enabled_aovs.append(
(self._get_vray_aov_name(aov), default_ext))
return enabled_aovs
def _get_vray_aov_name(self, node):
# Get render element pass type
vray_node_attr = next(
attr
for attr in cmds.listAttr(node)
if attr.startswith("vray_name")
)
pass_type = vray_node_attr.rsplit("_", 1)[-1]
# Support V-Ray extratex explicit name (if set by user)
if pass_type == "extratex":
explicit_attr = "{}.vray_explicit_name_extratex".format(node)
explicit_name = cmds.getAttr(explicit_attr)
if explicit_name:
return explicit_name
# Node type is in the attribute name but we need to check if value
# of the attribute as it can be changed
return cmds.getAttr("{}.{}".format(node, vray_node_attr))
class ExpectedFilesRedshift(AExpectedFiles):
"""Expected files for Redshift renderer.
Attributes:
ext_mapping (list): Mapping redshift extension dropdown values
to strings.
unmerged_aovs (list): Name of aovs that are not merged into resulting
exr and we need them specified in expectedFiles output.
"""
unmerged_aovs = ["Cryptomatte"]
ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"]
def __init__(self, layer):
"""Construtor."""
super(ExpectedFilesRedshift, self).__init__(layer)
self.renderer = "redshift"
def get_renderer_prefix(self):
"""Get image prefix for Redshift.
This overrides :func:`AExpectedFiles.get_renderer_prefix()` as
we must add `<aov>` token manually.
See also:
:func:`AExpectedFiles.get_renderer_prefix()`
"""
prefix = super(ExpectedFilesRedshift, self).get_renderer_prefix()
prefix = "{}_<aov>".format(prefix)
return prefix
def get_files(self):
"""Get expected files.
This overrides :func:`AExpectedFiles.get_files()` as we
we need to add one sequence for plain beauty if AOVs are enabled
as vray output beauty without 'beauty' in filename.
"""
expected_files = super(ExpectedFilesRedshift, self).get_files()
# we need to add one sequence for plain beauty if AOVs are enabled.
# as redshift output beauty without 'beauty' in filename.
layer_data = self._get_layer_data()
if layer_data.get("enabledAOVs"):
expected_files[0][u"beauty"] = self._generate_single_file_sequence(
layer_data
)
# Redshift doesn't merge Cryptomatte AOV to final exr. We need to check
# for such condition and add it to list of expected files.
for aov in layer_data.get("enabledAOVs"):
if aov[0].lower() == "cryptomatte":
aov_name = aov[0]
expected_files.append(
{aov_name: self._generate_single_file_sequence(
layer_data, aov_name=aov_name)})
return expected_files
def get_aovs(self):
"""Get all AOVs.
See Also:
:func:`AExpectedFiles.get_aovs()`
"""
enabled_aovs = []
try:
if self.maya_is_true(
cmds.getAttr("redshiftOptions.exrForceMultilayer")
):
# AOVs are merged in mutli-channel file
self.multipart = True
return enabled_aovs
except ValueError:
# this occurs when Render Setting windows was not opened yet. In
# such case there are no Redshift options created so query for AOVs
# will fail. We terminate here as there are no AOVs specified then.
# This state will most probably fail later on some Validator
# anyway.
return enabled_aovs
default_ext = self.ext_mapping[
cmds.getAttr("redshiftOptions.imageFormat")
]
rs_aovs = [n for n in cmds.ls(type="RedshiftAOV")]
# todo: find out how to detect multichannel exr for redshift
for aov in rs_aovs:
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
for override in self.get_layer_overrides(
"{}.enabled".format(aov), self.layer
):
enabled = self.maya_is_true(override)
if enabled:
# If AOVs are merged into multipart exr, append AOV only if it
# is in the list of AOVs that renderer cannot (or will not)
# merge into final exr.
if self.maya_is_true(
cmds.getAttr("redshiftOptions.exrForceMultilayer")
):
if cmds.getAttr("%s.name" % aov) in self.unmerged_aovs:
enabled_aovs.append(
(cmds.getAttr("%s.name" % aov), default_ext)
)
else:
enabled_aovs.append(
(cmds.getAttr("%s.name" % aov), default_ext)
)
if self.maya_is_true(
cmds.getAttr("redshiftOptions.exrForceMultilayer")
):
# AOVs are merged in mutli-channel file
self.multipart = True
return enabled_aovs
class ExpectedFilesRenderman(AExpectedFiles):
"""Expected files for Renderman renderer.
Warning:
This is very rudimentary and needs more love and testing.
"""
def __init__(self, layer):
"""Constructor."""
super(ExpectedFilesRenderman, self).__init__(layer)
self.renderer = "renderman"
def get_aovs(self):
"""Get all AOVs.
See Also:
:func:`AExpectedFiles.get_aovs()`
"""
enabled_aovs = []
default_ext = "exr"
displays = cmds.listConnections("rmanGlobals.displays")
for aov in displays:
aov_name = str(aov)
if aov_name == "rmanDefaultDisplay":
aov_name = "beauty"
enabled = self.maya_is_true(cmds.getAttr("{}.enable".format(aov)))
for override in self.get_layer_overrides(
"{}.enable".format(aov), self.layer
):
enabled = self.maya_is_true(override)
if enabled:
enabled_aovs.append((aov_name, default_ext))
return enabled_aovs
def get_files(self):
"""Get expected files.
This overrides :func:`AExpectedFiles.get_files()` as we
we need to add one sequence for plain beauty if AOVs are enabled
as vray output beauty without 'beauty' in filename.
In renderman we hack it with prepending path. This path would
normally be translated from `rmanGlobals.imageOutputDir`. We skip
this and hardcode prepend path we expect. There is no place for user
to mess around with this settings anyway and it is enforced in
render settings validator.
"""
layer_data = self._get_layer_data()
new_aovs = {}
expected_files = super(ExpectedFilesRenderman, self).get_files()
# we always get beauty
for aov, files in expected_files[0].items():
new_files = []
for file in files:
new_file = "{}/{}/{}".format(
layer_data["sceneName"], layer_data["layerName"], file
)
new_files.append(new_file)
new_aovs[aov] = new_files
return [new_aovs]
class ExpectedFilesMentalray(AExpectedFiles):
"""Skeleton unimplemented class for Mentalray renderer."""
def __init__(self, layer):
"""Constructor.
Raises:
:exc:`UnimplementedRendererException`: as it is not implemented.
"""
raise UnimplementedRendererException("Mentalray not implemented")
def get_aovs(self):
"""Get all AOVs.
See Also:
:func:`AExpectedFiles.get_aovs()`
"""
return []
class AOVError(Exception):
"""Custom exception for determining AOVs."""
pass
class UnsupportedRendererException(Exception):
"""Custom exception.
Raised when requesting data from unsupported renderer.
"""
pass
class UnimplementedRendererException(Exception):
"""Custom exception.
Raised when requesting data from renderer that is not implemented yet.
"""
pass

View file

@ -0,0 +1,33 @@
import os
from avalon import api
import pyblish.api
def install():
print("Installing Pype config...")
plugins_directory = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
"plugins",
"photoshop"
)
pyblish.api.register_plugin_path(
os.path.join(plugins_directory, "publish")
)
api.register_plugin_path(
api.Loader, os.path.join(plugins_directory, "load")
)
api.register_plugin_path(
api.Creator, os.path.join(plugins_directory, "create")
)
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled
)
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle layer visibility on instance toggles."""
instance[0].Visible = new_value

View file

@ -1,5 +1,5 @@
from Qt import QtWidgets, QtCore
from .widgets import LogsWidget, LogDetailWidget
from .widgets import LogsWidget, OutputWidget
from avalon import style
@ -10,7 +10,7 @@ class LogsWindow(QtWidgets.QWidget):
self.setStyleSheet(style.load_stylesheet())
self.resize(1200, 800)
logs_widget = LogsWidget(parent=self)
log_detail = LogDetailWidget(parent=self)
log_detail = OutputWidget(parent=self)
main_layout = QtWidgets.QHBoxLayout()
@ -33,7 +33,5 @@ class LogsWindow(QtWidgets.QWidget):
def on_selection_changed(self):
index = self.logs_widget.selected_log()
if not index or not index.isValid():
return
node = index.data(self.logs_widget.model.NodeRole)
self.log_detail.set_detail(node)

View file

@ -1,4 +1,5 @@
import os
import collections
from Qt import QtCore
from pype.api import Logger
from pypeapp.lib.log import _bootstrap_mongo_log
@ -8,31 +9,32 @@ log = Logger().get_logger("LogModel", "LoggingModule")
class LogModel(QtCore.QAbstractItemModel):
COLUMNS = [
"user",
"host",
"lineNumber",
"method",
"module",
"fileName",
"loggerName",
"message",
"level",
"timestamp",
"process_name",
"hostname",
"hostip",
"username",
"system_name",
"started"
]
colums_mapping = {
"user": "User",
"host": "Host",
"lineNumber": "Line n.",
"method": "Method",
"module": "Module",
"fileName": "File name",
"loggerName": "Logger name",
"message": "Message",
"level": "Level",
"timestamp": "Timestamp",
"process_name": "Process Name",
"process_id": "Process Id",
"hostname": "Hostname",
"hostip": "Host IP",
"username": "Username",
"system_name": "System name",
"started": "Started at"
}
process_keys = [
"process_id", "hostname", "hostip",
"username", "system_name", "process_name"
]
log_keys = [
"timestamp", "level", "thread", "threadName", "message", "loggerName",
"fileName", "module", "method", "lineNumber"
]
default_value = "- Not set -"
NodeRole = QtCore.Qt.UserRole + 1
def __init__(self, parent=None):
@ -50,14 +52,47 @@ class LogModel(QtCore.QAbstractItemModel):
self._root_node.add_child(node)
def refresh(self):
self.log_by_process = collections.defaultdict(list)
self.process_info = {}
self.clear()
self.beginResetModel()
if self.dbcon:
result = self.dbcon.find({})
for item in result:
self.add_log(item)
self.endResetModel()
process_id = item.get("process_id")
# backwards (in)compatibility
if not process_id:
continue
if process_id not in self.process_info:
proc_dict = {}
for key in self.process_keys:
proc_dict[key] = (
item.get(key) or self.default_value
)
self.process_info[process_id] = proc_dict
if "_logs" not in self.process_info[process_id]:
self.process_info[process_id]["_logs"] = []
log_item = {}
for key in self.log_keys:
log_item[key] = item.get(key) or self.default_value
if "exception" in item:
log_item["exception"] = item["exception"]
self.process_info[process_id]["_logs"].append(log_item)
for item in self.process_info.values():
item["_logs"] = sorted(
item["_logs"], key=lambda item: item["timestamp"]
)
item["started"] = item["_logs"][0]["timestamp"]
self.add_log(item)
self.endResetModel()
def data(self, index, role):
if not index.isValid():
@ -68,7 +103,7 @@ class LogModel(QtCore.QAbstractItemModel):
column = index.column()
key = self.COLUMNS[column]
if key == "timestamp":
if key == "started":
return str(node.get(key, None))
return node.get(key, None)
@ -86,8 +121,7 @@ class LogModel(QtCore.QAbstractItemModel):
child_item = parent_node.child(row)
if child_item:
return self.createIndex(row, column, child_item)
else:
return QtCore.QModelIndex()
return QtCore.QModelIndex()
def rowCount(self, parent):
node = self._root_node

View file

@ -1,5 +1,5 @@
import getpass
from Qt import QtCore, QtWidgets, QtGui
from PyQt5.QtCore import QVariant
from .models import LogModel
@ -97,7 +97,6 @@ class SelectableMenu(QtWidgets.QMenu):
class CustomCombo(QtWidgets.QWidget):
selection_changed = QtCore.Signal()
checked_changed = QtCore.Signal(bool)
def __init__(self, title, parent=None):
super(CustomCombo, self).__init__(parent)
@ -126,27 +125,12 @@ class CustomCombo(QtWidgets.QWidget):
self.toolmenu.clear()
self.addItems(items)
def select_items(self, items, ignore_input=False):
if not isinstance(items, list):
items = [items]
for action in self.toolmenu.actions():
check = True
if (
action.text() in items and ignore_input or
action.text() not in items and not ignore_input
):
check = False
action.setChecked(check)
def addItems(self, items):
for item in items:
action = self.toolmenu.addAction(item)
action.setCheckable(True)
self.toolmenu.addAction(action)
action.setChecked(True)
action.triggered.connect(self.checked_changed)
self.toolmenu.addAction(action)
def items(self):
for action in self.toolmenu.actions():
@ -200,42 +184,15 @@ class CheckableComboBox(QtWidgets.QComboBox):
for text, checked in items:
text_item = QtGui.QStandardItem(text)
checked_item = QtGui.QStandardItem()
checked_item.setData(
QtCore.QVariant(checked), QtCore.Qt.CheckStateRole
)
checked_item.setData(QVariant(checked), QtCore.Qt.CheckStateRole)
self.model.appendRow([text_item, checked_item])
class FilterLogModel(QtCore.QSortFilterProxyModel):
sub_dict = ["$gt", "$lt", "$not"]
def __init__(self, key_values, parent=None):
super(FilterLogModel, self).__init__(parent)
self.allowed_key_values = key_values
def filterAcceptsRow(self, row, parent):
"""
Reimplemented from base class.
"""
model = self.sourceModel()
for key, values in self.allowed_key_values.items():
col_indx = model.COLUMNS.index(key)
value = model.index(row, col_indx, parent).data(
QtCore.Qt.DisplayRole
)
if value not in values:
return False
return True
class LogsWidget(QtWidgets.QWidget):
"""A widget that lists the published subsets for an asset"""
active_changed = QtCore.Signal()
_level_order = [
"DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"
]
def __init__(self, parent=None):
super(LogsWidget, self).__init__(parent=parent)
@ -243,41 +200,47 @@ class LogsWidget(QtWidgets.QWidget):
filter_layout = QtWidgets.QHBoxLayout()
# user_filter = SearchComboBox(self, "Users")
user_filter = CustomCombo("Users", self)
users = model.dbcon.distinct("user")
user_filter.populate(users)
user_filter.checked_changed.connect(self.user_changed)
user_filter.select_items(getpass.getuser())
user_filter.selection_changed.connect(self.user_changed)
level_filter = CustomCombo("Levels", self)
# levels = [(level, True) for level in model.dbcon.distinct("level")]
levels = model.dbcon.distinct("level")
_levels = []
for level in self._level_order:
if level in levels:
_levels.append(level)
level_filter.populate(_levels)
level_filter.checked_changed.connect(self.level_changed)
level_filter.addItems(levels)
# date_from_label = QtWidgets.QLabel("From:")
# date_filter_from = QtWidgets.QDateTimeEdit()
#
# date_from_layout = QtWidgets.QVBoxLayout()
# date_from_layout.addWidget(date_from_label)
# date_from_layout.addWidget(date_filter_from)
#
# date_to_label = QtWidgets.QLabel("To:")
# date_filter_to = QtWidgets.QDateTimeEdit()
#
# date_to_layout = QtWidgets.QVBoxLayout()
# date_to_layout.addWidget(date_to_label)
# date_to_layout.addWidget(date_filter_to)
date_from_label = QtWidgets.QLabel("From:")
date_filter_from = QtWidgets.QDateTimeEdit()
date_from_layout = QtWidgets.QVBoxLayout()
date_from_layout.addWidget(date_from_label)
date_from_layout.addWidget(date_filter_from)
# now = datetime.datetime.now()
# QtCore.QDateTime(
# now.year,
# now.month,
# now.day,
# now.hour,
# now.minute,
# second=0,
# msec=0,
# timeSpec=0
# )
date_to_label = QtWidgets.QLabel("To:")
date_filter_to = QtWidgets.QDateTimeEdit()
date_to_layout = QtWidgets.QVBoxLayout()
date_to_layout.addWidget(date_to_label)
date_to_layout.addWidget(date_filter_to)
filter_layout.addWidget(user_filter)
filter_layout.addWidget(level_filter)
filter_layout.setAlignment(QtCore.Qt.AlignLeft)
# filter_layout.addLayout(date_from_layout)
# filter_layout.addLayout(date_to_layout)
filter_layout.addLayout(date_from_layout)
filter_layout.addLayout(date_to_layout)
view = QtWidgets.QTreeView(self)
view.setAllColumnsShowFocus(True)
@ -290,58 +253,28 @@ class LogsWidget(QtWidgets.QWidget):
view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
view.setSortingEnabled(True)
view.sortByColumn(
model.COLUMNS.index("timestamp"),
model.COLUMNS.index("started"),
QtCore.Qt.AscendingOrder
)
key_val = {
"user": users,
"level": levels
}
proxy_model = FilterLogModel(key_val, view)
proxy_model.setSourceModel(model)
view.setModel(proxy_model)
view.customContextMenuRequested.connect(self.on_context_menu)
view.selectionModel().selectionChanged.connect(self.active_changed)
# WARNING this is cool but slows down widget a lot
# header = view.header()
# # Enforce the columns to fit the data (purely cosmetic)
# if Qt.__binding__ in ("PySide2", "PyQt5"):
# header.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
# else:
# header.setResizeMode(QtWidgets.QHeaderView.ResizeToContents)
view.setModel(model)
view.pressed.connect(self._on_activated)
# prepare
model.refresh()
# Store to memory
self.model = model
self.proxy_model = proxy_model
self.view = view
self.user_filter = user_filter
self.level_filter = level_filter
def _on_activated(self, *args, **kwargs):
self.active_changed.emit()
def user_changed(self):
valid_actions = []
for action in self.user_filter.items():
if action.isChecked():
valid_actions.append(action.text())
self.proxy_model.allowed_key_values["user"] = valid_actions
self.proxy_model.invalidate()
def level_changed(self):
valid_actions = []
for action in self.level_filter.items():
if action.isChecked():
valid_actions.append(action.text())
self.proxy_model.allowed_key_values["level"] = valid_actions
self.proxy_model.invalidate()
print(action)
def on_context_menu(self, point):
# TODO will be any actions? it's ready
@ -360,10 +293,74 @@ class LogsWidget(QtWidgets.QWidget):
rows = selection.selectedRows(column=0)
if len(rows) == 1:
return rows[0]
return None
class OutputWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(OutputWidget, self).__init__(parent=parent)
layout = QtWidgets.QVBoxLayout(self)
output_text = QtWidgets.QTextEdit()
output_text.setReadOnly(True)
# output_text.setLineWrapMode(QtWidgets.QTextEdit.FixedPixelWidth)
layout.addWidget(output_text)
self.setLayout(layout)
self.output_text = output_text
def add_line(self, line):
self.output_text.append(line)
def set_detail(self, node):
self.output_text.clear()
for log in node["_logs"]:
level = log["level"].lower()
line_f = "<font color=\"White\">{message}"
if level == "debug":
line_f = (
"<font color=\"Yellow\"> -"
" <font color=\"Lime\">{{ {loggerName} }}: ["
" <font color=\"White\">{message}"
" <font color=\"Lime\">]"
)
elif level == "info":
line_f = (
"<font color=\"Lime\">>>> ["
" <font color=\"White\">{message}"
" <font color=\"Lime\">]"
)
elif level == "warning":
line_f = (
"<font color=\"Yellow\">*** WRN:"
" <font color=\"Lime\"> >>> {{ {loggerName} }}: ["
" <font color=\"White\">{message}"
" <font color=\"Lime\">]"
)
elif level == "error":
line_f = (
"<font color=\"Red\">!!! ERR:"
" <font color=\"White\">{timestamp}"
" <font color=\"Lime\">>>> {{ {loggerName} }}: ["
" <font color=\"White\">{message}"
" <font color=\"Lime\">]"
)
exc = log.get("exception")
if exc:
log["message"] = exc["message"]
line = line_f.format(**log)
self.add_line(line)
if not exc:
continue
for _line in exc["stackTrace"].split("\n"):
self.add_line(_line)
class LogDetailWidget(QtWidgets.QWidget):
"""A Widget that display information about a specific version"""
data_rows = [
@ -418,5 +415,4 @@ class LogDetailWidget(QtWidgets.QWidget):
value = detail_data.get(row) or "< Not set >"
data[row] = value
self.detail_widget.setHtml(self.html_text.format(**data))

View file

@ -1,4 +1,3 @@
import os
from Qt import QtWidgets
from pype.api import Logger

View file

@ -34,19 +34,7 @@ class CreateLayout(Creator):
objects_to_link = set()
if (self.options or {}).get("useSelection"):
for obj in lib.get_selection():
objects_to_link.add(obj)
if obj.type == 'ARMATURE':
for subobj in obj.children:
objects_to_link.add(subobj)
for obj in objects_to_link:
collection.objects.link(obj)
collection.children.link(obj.users_collection[0])
return collection

View file

@ -31,22 +31,11 @@ class CreateRig(Creator):
# This links automatically the children meshes if they were not
# selected, and doesn't link them twice if they, insted,
# were manually selected by the user.
objects_to_link = set()
if (self.options or {}).get("useSelection"):
for obj in lib.get_selection():
objects_to_link.add(obj)
if obj.type == 'ARMATURE':
for subobj in obj.children:
objects_to_link.add(subobj)
for obj in objects_to_link:
collection.objects.link(obj)
for child in obj.users_collection[0].children:
collection.children.link(child)
collection.objects.link(obj)
return collection

View file

@ -29,7 +29,6 @@ class BlendAnimationLoader(pype.hosts.blender.plugin.AssetLoader):
icon = "code-fork"
color = "orange"
@staticmethod
def _remove(self, objects, lib_container):
for obj in objects:
@ -41,7 +40,6 @@ class BlendAnimationLoader(pype.hosts.blender.plugin.AssetLoader):
bpy.data.collections.remove(bpy.data.collections[lib_container])
@staticmethod
def _process(self, libpath, lib_container, container_name):
relative = bpy.context.preferences.filepaths.use_relative_paths
@ -131,7 +129,7 @@ class BlendAnimationLoader(pype.hosts.blender.plugin.AssetLoader):
container_metadata["lib_container"] = lib_container
objects_list = self._process(
self, libpath, lib_container, container_name)
libpath, lib_container, container_name)
# Save the list of objects in the metadata container
container_metadata["objects"] = objects_list
@ -205,14 +203,10 @@ class BlendAnimationLoader(pype.hosts.blender.plugin.AssetLoader):
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
# Get the armature of the rig
armatures = [obj for obj in objects if obj.type == 'ARMATURE']
assert(len(armatures) == 1)
self._remove(self, objects, lib_container)
self._remove(objects, lib_container)
objects_list = self._process(
self, str(libpath), lib_container, collection.name)
str(libpath), lib_container, collection.name)
# Save the list of objects in the metadata container
collection_metadata["objects"] = objects_list
@ -249,7 +243,7 @@ class BlendAnimationLoader(pype.hosts.blender.plugin.AssetLoader):
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
self._remove(self, objects, lib_container)
self._remove(objects, lib_container)
bpy.data.collections.remove(collection)

View file

@ -29,7 +29,6 @@ class BlendLayoutLoader(pype.hosts.blender.plugin.AssetLoader):
icon = "code-fork"
color = "orange"
@staticmethod
def _remove(self, objects, lib_container):
for obj in objects:
@ -39,9 +38,13 @@ class BlendLayoutLoader(pype.hosts.blender.plugin.AssetLoader):
elif obj.type == 'MESH':
bpy.data.meshes.remove(obj.data)
for element_container in bpy.data.collections[lib_container].children:
for child in element_container.children:
bpy.data.collections.remove(child)
bpy.data.collections.remove(element_container)
bpy.data.collections.remove(bpy.data.collections[lib_container])
@staticmethod
def _process(self, libpath, lib_container, container_name, actions):
relative = bpy.context.preferences.filepaths.use_relative_paths
@ -56,24 +59,27 @@ class BlendLayoutLoader(pype.hosts.blender.plugin.AssetLoader):
layout_container = scene.collection.children[lib_container].make_local()
meshes = [
obj for obj in layout_container.objects if obj.type == 'MESH']
armatures = [
obj for obj in layout_container.objects if obj.type == 'ARMATURE']
meshes = []
armatures = []
objects_list = []
for element_container in layout_container.children:
element_container.make_local()
meshes.extend([obj for obj in element_container.objects if obj.type == 'MESH'])
armatures.extend([obj for obj in element_container.objects if obj.type == 'ARMATURE'])
for child in element_container.children:
child.make_local()
meshes.extend(child.objects)
# Link meshes first, then armatures.
# The armature is unparented for all the non-local meshes,
# when it is made local.
for obj in meshes + armatures:
obj = obj.make_local()
obj.data.make_local()
if not obj.get(blender.pipeline.AVALON_PROPERTY):
obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
@ -82,7 +88,6 @@ class BlendLayoutLoader(pype.hosts.blender.plugin.AssetLoader):
action = actions.get( obj.name, None )
if obj.type == 'ARMATURE' and action is not None:
obj.animation_data.action = action
objects_list.append(obj)
@ -130,7 +135,7 @@ class BlendLayoutLoader(pype.hosts.blender.plugin.AssetLoader):
container_metadata["lib_container"] = lib_container
objects_list = self._process(
self, libpath, lib_container, container_name, {})
libpath, lib_container, container_name, {})
# Save the list of objects in the metadata container
container_metadata["objects"] = objects_list
@ -212,10 +217,10 @@ class BlendLayoutLoader(pype.hosts.blender.plugin.AssetLoader):
actions[obj.name] = obj.animation_data.action
self._remove(self, objects, lib_container)
self._remove(objects, lib_container)
objects_list = self._process(
self, str(libpath), lib_container, collection.name, actions)
str(libpath), lib_container, collection.name, actions)
# Save the list of objects in the metadata container
collection_metadata["objects"] = objects_list
@ -252,7 +257,7 @@ class BlendLayoutLoader(pype.hosts.blender.plugin.AssetLoader):
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
self._remove(self, objects, lib_container)
self._remove(objects, lib_container)
bpy.data.collections.remove(collection)

View file

@ -30,7 +30,6 @@ class BlendModelLoader(pype.hosts.blender.plugin.AssetLoader):
icon = "code-fork"
color = "orange"
@staticmethod
def _remove(self, objects, lib_container):
for obj in objects:
@ -39,7 +38,6 @@ class BlendModelLoader(pype.hosts.blender.plugin.AssetLoader):
bpy.data.collections.remove(bpy.data.collections[lib_container])
@staticmethod
def _process(self, libpath, lib_container, container_name):
relative = bpy.context.preferences.filepaths.use_relative_paths
@ -118,7 +116,7 @@ class BlendModelLoader(pype.hosts.blender.plugin.AssetLoader):
container_metadata["lib_container"] = lib_container
objects_list = self._process(
self, libpath, lib_container, container_name)
libpath, lib_container, container_name)
# Save the list of objects in the metadata container
container_metadata["objects"] = objects_list
@ -189,10 +187,10 @@ class BlendModelLoader(pype.hosts.blender.plugin.AssetLoader):
logger.info("Library already loaded, not updating...")
return
self._remove(self, objects, lib_container)
self._remove(objects, lib_container)
objects_list = self._process(
self, str(libpath), lib_container, collection.name)
str(libpath), lib_container, collection.name)
# Save the list of objects in the metadata container
collection_metadata["objects"] = objects_list
@ -226,7 +224,7 @@ class BlendModelLoader(pype.hosts.blender.plugin.AssetLoader):
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
self._remove(self, objects, lib_container)
self._remove(objects, lib_container)
bpy.data.collections.remove(collection)

View file

@ -30,7 +30,6 @@ class BlendRigLoader(pype.hosts.blender.plugin.AssetLoader):
icon = "code-fork"
color = "orange"
@staticmethod
def _remove(self, objects, lib_container):
for obj in objects:
@ -40,9 +39,11 @@ class BlendRigLoader(pype.hosts.blender.plugin.AssetLoader):
elif obj.type == 'MESH':
bpy.data.meshes.remove(obj.data)
for child in bpy.data.collections[lib_container].children:
bpy.data.collections.remove(child)
bpy.data.collections.remove(bpy.data.collections[lib_container])
@staticmethod
def _process(self, libpath, lib_container, container_name, action):
relative = bpy.context.preferences.filepaths.use_relative_paths
@ -57,32 +58,30 @@ class BlendRigLoader(pype.hosts.blender.plugin.AssetLoader):
rig_container = scene.collection.children[lib_container].make_local()
meshes = [obj for obj in rig_container.objects if obj.type == 'MESH']
meshes = []
armatures = [
obj for obj in rig_container.objects if obj.type == 'ARMATURE']
objects_list = []
assert(len(armatures) == 1)
for child in rig_container.children:
child.make_local()
meshes.extend( child.objects )
# Link meshes first, then armatures.
# The armature is unparented for all the non-local meshes,
# when it is made local.
for obj in meshes + armatures:
obj = obj.make_local()
obj.data.make_local()
if not obj.get(blender.pipeline.AVALON_PROPERTY):
obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": container_name})
if obj.type == 'ARMATURE' and action is not None:
obj.animation_data.action = action
objects_list.append(obj)
@ -130,7 +129,7 @@ class BlendRigLoader(pype.hosts.blender.plugin.AssetLoader):
container_metadata["lib_container"] = lib_container
objects_list = self._process(
self, libpath, lib_container, container_name, None)
libpath, lib_container, container_name, None)
# Save the list of objects in the metadata container
container_metadata["objects"] = objects_list
@ -209,10 +208,10 @@ class BlendRigLoader(pype.hosts.blender.plugin.AssetLoader):
action = armatures[0].animation_data.action
self._remove(self, objects, lib_container)
self._remove(objects, lib_container)
objects_list = self._process(
self, str(libpath), lib_container, collection.name, action)
str(libpath), lib_container, collection.name, action)
# Save the list of objects in the metadata container
collection_metadata["objects"] = objects_list
@ -249,7 +248,7 @@ class BlendRigLoader(pype.hosts.blender.plugin.AssetLoader):
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
self._remove(self, objects, lib_container)
self._remove(objects, lib_container)
bpy.data.collections.remove(collection)

View file

@ -1,22 +0,0 @@
"""
Requires:
config_data -> ftrack.output_representation
Provides:
context -> output_repre_config (str)
"""
import pyblish.api
from pype.api import config
class CollectOutputRepreConfig(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder
label = "Collect Config for representation"
hosts = ["shell", "standalonepublisher"]
def process(self, context):
config_data = config.get_presets()["ftrack"]["output_representation"]
context.data['output_repre_config'] = config_data

View file

@ -77,7 +77,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"gizmo",
"source",
"matchmove",
"image"
"image",
"source",
"assembly",
"fbx",

View file

@ -147,7 +147,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
hosts = ["fusion", "maya", "nuke"]
families = ["render.farm", "prerener", "renderlayer", "imagesequence"]
families = ["render.farm", "prerener",
"renderlayer", "imagesequence", "vrayscene"]
aov_filter = {"maya": ["beauty"]}

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
"""Create ``Render`` instance in Maya."""
import os
import json
import appdirs
@ -11,7 +13,38 @@ import avalon.maya
class CreateRender(avalon.maya.Creator):
"""Create render layer for export"""
"""Create *render* instance.
Render instances are not actually published, they hold options for
collecting of render data. It render instance is present, it will trigger
collection of render layers, AOVs, cameras for either direct submission
to render farm or export as various standalone formats (like V-Rays
``vrscenes`` or Arnolds ``ass`` files) and then submitting them to render
farm.
Instance has following attributes::
primaryPool (list of str): Primary list of slave machine pool to use.
secondaryPool (list of str): Optional secondary list of slave pools.
suspendPublishJob (bool): Suspend the job after it is submitted.
extendFrames (bool): Use already existing frames from previous version
to extend current render.
overrideExistingFrame (bool): Overwrite already existing frames.
priority (int): Submitted job priority
framesPerTask (int): How many frames per task to render. This is
basically job division on render farm.
whitelist (list of str): White list of slave machines
machineList (list of str): Specific list of slave machines to use
useMayaBatch (bool): Use Maya batch mode to render as opposite to
Maya interactive mode. This consumes different licenses.
vrscene (bool): Submit as ``vrscene`` file for standalone V-Ray
renderer.
ass (bool): Submit as ``ass`` file for standalone Arnold renderer.
See Also:
https://pype.club/docs/artist_hosts_maya#creating-basic-render-setup
"""
label = "Render"
family = "rendering"
@ -42,9 +75,11 @@ class CreateRender(avalon.maya.Creator):
}
def __init__(self, *args, **kwargs):
"""Constructor."""
super(CreateRender, self).__init__(*args, **kwargs)
def process(self):
"""Entry point."""
exists = cmds.ls(self.name)
if exists:
return cmds.warning("%s already exists." % exists[0])
@ -145,17 +180,22 @@ class CreateRender(avalon.maya.Creator):
self.data["whitelist"] = False
self.data["machineList"] = ""
self.data["useMayaBatch"] = True
self.data["vrayScene"] = False
# Disable for now as this feature is not working yet
# self.data["assScene"] = False
self.options = {"useSelection": False} # Force no content
def _load_credentials(self):
"""
Load Muster credentials from file and set `MUSTER_USER`,
`MUSTER_PASSWORD`, `MUSTER_REST_URL` is loaded from presets.
"""Load Muster credentials.
.. todo::
Load Muster credentials from file and set ```MUSTER_USER``,
```MUSTER_PASSWORD``, ``MUSTER_REST_URL`` is loaded from presets.
Raises:
RuntimeError: If loaded credentials are invalid.
AttributeError: If ``MUSTER_REST_URL`` is not set.
Show login dialog if access token is invalid or missing.
"""
app_dir = os.path.normpath(appdirs.user_data_dir("pype-app", "pype"))
file_name = "muster_cred.json"
@ -172,8 +212,11 @@ class CreateRender(avalon.maya.Creator):
raise AttributeError("Muster REST API url not set")
def _get_muster_pools(self):
"""
Get render pools from muster
"""Get render pools from Muster.
Raises:
Exception: If pool list cannot be obtained from Muster.
"""
params = {"authToken": self._token}
api_entry = "/api/pools/list"
@ -209,14 +252,17 @@ class CreateRender(avalon.maya.Creator):
raise Exception("Cannot show login form to Muster")
def _requests_post(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
"""Wrap request post method.
WARNING: disabling SSL certificate validation is defeating one line
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
variable is found. This is useful when Deadline or Muster server are
running with self-signed certificates and their certificate is not
added to trusted certificates on client machines.
Warning:
Disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if "verify" not in kwargs:
kwargs["verify"] = (
@ -225,14 +271,17 @@ class CreateRender(avalon.maya.Creator):
return requests.post(*args, **kwargs)
def _requests_get(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
"""Wrap request get method.
WARNING: disabling SSL certificate validation is defeating one line
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
variable is found. This is useful when Deadline or Muster server are
running with self-signed certificates and their certificate is not
added to trusted certificates on client machines.
Warning:
Disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if "verify" not in kwargs:
kwargs["verify"] = (

View file

@ -1,4 +1,6 @@
"""
# -*- coding: utf-8 -*-
"""Collect render data.
This collector will go through render layers in maya and prepare all data
needed to create instances and their representations for submition and
publishing on farm.
@ -39,10 +41,7 @@ Provides:
import re
import os
import types
import six
import json
from abc import ABCMeta, abstractmethod
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
@ -50,55 +49,19 @@ import maya.app.renderSetup.model.renderSetup as renderSetup
import pyblish.api
from avalon import maya, api
from pype.hosts.maya.expected_files import ExpectedFiles
from pype.hosts.maya import lib
R_SINGLE_FRAME = re.compile(r"^(-?)\d+$")
R_FRAME_RANGE = re.compile(r"^(?P<sf>(-?)\d+)-(?P<ef>(-?)\d+)$")
R_FRAME_NUMBER = re.compile(r".+\.(?P<frame>[0-9]+)\..+")
R_LAYER_TOKEN = re.compile(
r".*((?:%l)|(?:<layer>)|(?:<renderlayer>)).*", re.IGNORECASE
)
R_AOV_TOKEN = re.compile(r".*%a.*|.*<aov>.*|.*<renderpass>.*", re.IGNORECASE)
R_SUBSTITUTE_AOV_TOKEN = re.compile(r"%a|<aov>|<renderpass>", re.IGNORECASE)
R_REMOVE_AOV_TOKEN = re.compile(r"(?:_|\.)((?:%a)|(?:<aov>)|(?:<renderpass>))",
re.IGNORECASE)
# to remove unused renderman tokens
R_CLEAN_FRAME_TOKEN = re.compile(r"\.?<f\d>\.?", re.IGNORECASE)
R_CLEAN_EXT_TOKEN = re.compile(r"\.?<ext>\.?", re.IGNORECASE)
R_SUBSTITUTE_LAYER_TOKEN = re.compile(
r"%l|<layer>|<renderlayer>", re.IGNORECASE
)
R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r"%c|<camera>", re.IGNORECASE)
R_SUBSTITUTE_SCENE_TOKEN = re.compile(r"%s|<scene>", re.IGNORECASE)
RENDERER_NAMES = {
"mentalray": "MentalRay",
"vray": "V-Ray",
"arnold": "Arnold",
"renderman": "Renderman",
"redshift": "Redshift",
}
# not sure about the renderman image prefix
ImagePrefixes = {
"mentalray": "defaultRenderGlobals.imageFilePrefix",
"vray": "vraySettings.fileNamePrefix",
"arnold": "defaultRenderGlobals.imageFilePrefix",
"renderman": "rmanGlobals.imageFileFormat",
"redshift": "defaultRenderGlobals.imageFilePrefix",
}
class CollectMayaRender(pyblish.api.ContextPlugin):
"""Gather all publishable render layers from renderSetup"""
"""Gather all publishable render layers from renderSetup."""
order = pyblish.api.CollectorOrder + 0.01
hosts = ["maya"]
label = "Collect Render Layers"
def process(self, context):
"""Entry point to collector."""
render_instance = None
for instance in context:
if "rendering" in instance.data["families"]:
@ -124,7 +87,9 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
self._rs = renderSetup.instance()
current_layer = self._rs.getVisibleRenderLayer()
maya_render_layers = {l.name(): l for l in self._rs.getRenderLayers()}
maya_render_layers = {
layer.name(): layer for layer in self._rs.getRenderLayers()
}
self.maya_layers = maya_render_layers
@ -156,13 +121,13 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
# test if there are sets (subsets) to attach render to
sets = cmds.sets(layer, query=True) or []
attachTo = []
attach_to = []
if sets:
for s in sets:
if "family" not in cmds.listAttr(s):
continue
attachTo.append(
attach_to.append(
{
"version": None, # we need integrator for that
"subset": s,
@ -183,15 +148,15 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
# return all expected files for all cameras and aovs in given
# frame range
exf = ExpectedFiles()
exp_files = exf.get(renderer, layer_name)
self.log.info("multipart: {}".format(exf.multipart))
ef = ExpectedFiles()
exp_files = ef.get(renderer, layer_name)
self.log.info("multipart: {}".format(ef.multipart))
assert exp_files, "no file names were generated, this is bug"
# if we want to attach render to subset, check if we have AOV's
# in expectedFiles. If so, raise error as we cannot attach AOV
# (considered to be subset on its own) to another subset
if attachTo:
if attach_to:
assert len(exp_files[0].keys()) == 1, (
"attaching multiple AOVs or renderable cameras to "
"subset is not supported"
@ -207,15 +172,15 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
if isinstance(exp_files[0], dict):
for aov, files in exp_files[0].items():
full_paths = []
for ef in files:
full_path = os.path.join(workspace, "renders", ef)
for e in files:
full_path = os.path.join(workspace, "renders", e)
full_path = full_path.replace("\\", "/")
full_paths.append(full_path)
aov_dict[aov] = full_paths
else:
full_paths = []
for ef in exp_files:
full_path = os.path.join(workspace, "renders", ef)
for e in exp_files:
full_path = os.path.join(workspace, "renders", e)
full_path = full_path.replace("\\", "/")
full_paths.append(full_path)
aov_dict["beauty"] = full_paths
@ -248,9 +213,9 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
# Get layer specific settings, might be overrides
data = {
"subset": expected_layer_name,
"attachTo": attachTo,
"attachTo": attach_to,
"setMembers": layer_name,
"multipartExr": exf.multipart,
"multipartExr": ef.multipart,
"publish": True,
"handleStart": handle_start,
@ -292,6 +257,13 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
data[attr] = value
# handle standalone renderers
if render_instance.data.get("vrayScene") is True:
data["families"].append("vrayscene")
if render_instance.data.get("assScene") is True:
data["families"].append("assscene")
# Include (optional) global settings
# Get global overrides and translate to Deadline values
overrides = self.parse_options(str(render_globals))
@ -313,7 +285,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
self._rs.switchToLayer(current_layer)
def parse_options(self, render_globals):
"""Get all overrides with a value, skip those without
"""Get all overrides with a value, skip those without.
Here's the kicker. These globals override defaults in the submission
integrator, but an empty value means no overriding is made.
@ -324,8 +296,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
Returns:
dict: only overrides with values
"""
"""
attributes = maya.read(render_globals)
options = {"renderGlobals": {}}
@ -397,601 +369,15 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
return rset.getOverrides()
def get_render_attribute(self, attr, layer):
"""Get attribute from render options.
Args:
attr (str): name of attribute to be looked up.
Returns:
Attribute value
"""
return lib.get_attr_in_layer(
"defaultRenderGlobals.{}".format(attr), layer=layer
)
class ExpectedFiles:
multipart = False
def get(self, renderer, layer):
renderSetup.instance().switchToLayerUsingLegacyName(layer)
if renderer.lower() == "arnold":
return self._get_files(ExpectedFilesArnold(layer))
elif renderer.lower() == "vray":
return self._get_files(ExpectedFilesVray(layer))
elif renderer.lower() == "redshift":
return self._get_files(ExpectedFilesRedshift(layer))
elif renderer.lower() == "mentalray":
return self._get_files(ExpectedFilesMentalray(layer))
elif renderer.lower() == "renderman":
return self._get_files(ExpectedFilesRenderman(layer))
else:
raise UnsupportedRendererException(
"unsupported {}".format(renderer)
)
def _get_files(self, renderer):
files = renderer.get_files()
self.multipart = renderer.multipart
return files
@six.add_metaclass(ABCMeta)
class AExpectedFiles:
renderer = None
layer = None
multipart = False
def __init__(self, layer):
self.layer = layer
@abstractmethod
def get_aovs(self):
pass
def get_renderer_prefix(self):
try:
file_prefix = cmds.getAttr(ImagePrefixes[self.renderer])
except KeyError:
raise UnsupportedRendererException(
"Unsupported renderer {}".format(self.renderer)
)
return file_prefix
def _get_layer_data(self):
# ______________________________________________
# ____________________/ ____________________________________________/
# 1 - get scene name /__________________/
# ____________________/
scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True))
scene_name, _ = os.path.splitext(scene_basename)
# ______________________________________________
# ____________________/ ____________________________________________/
# 2 - detect renderer /__________________/
# ____________________/
renderer = self.renderer
# ________________________________________________
# __________________/ ______________________________________________/
# 3 - image prefix /__________________/
# __________________/
file_prefix = self.get_renderer_prefix()
if not file_prefix:
raise RuntimeError("Image prefix not set")
default_ext = cmds.getAttr("defaultRenderGlobals.imfPluginKey")
# ________________________________________________
# __________________/ ______________________________________________/
# 4 - get renderable cameras_____________/
# __________________/
# if we have <camera> token in prefix path we'll expect output for
# every renderable camera in layer.
renderable_cameras = self.get_renderable_cameras()
# ________________________________________________
# __________________/ ______________________________________________/
# 5 - get AOVs /____________________/
# __________________/
enabled_aovs = self.get_aovs()
layer_name = self.layer
if self.layer.startswith("rs_"):
layer_name = self.layer[3:]
start_frame = int(self.get_render_attribute("startFrame"))
end_frame = int(self.get_render_attribute("endFrame"))
frame_step = int(self.get_render_attribute("byFrameStep"))
padding = int(self.get_render_attribute("extensionPadding"))
scene_data = {
"frameStart": start_frame,
"frameEnd": end_frame,
"frameStep": frame_step,
"padding": padding,
"cameras": renderable_cameras,
"sceneName": scene_name,
"layerName": layer_name,
"renderer": renderer,
"defaultExt": default_ext,
"filePrefix": file_prefix,
"enabledAOVs": enabled_aovs,
}
return scene_data
def _generate_single_file_sequence(self, layer_data, aov_name=None):
expected_files = []
file_prefix = layer_data["filePrefix"]
for cam in layer_data["cameras"]:
mappings = [
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
(R_SUBSTITUTE_CAMERA_TOKEN, cam),
(R_CLEAN_FRAME_TOKEN, ""),
(R_CLEAN_EXT_TOKEN, ""),
]
# this is required to remove unfilled aov token, for example
# in Redshift
if aov_name:
mappings.append((R_SUBSTITUTE_AOV_TOKEN, aov_name))
else:
mappings.append((R_REMOVE_AOV_TOKEN, ""))
for regex, value in mappings:
file_prefix = re.sub(regex, value, file_prefix)
for frame in range(
int(layer_data["frameStart"]),
int(layer_data["frameEnd"]) + 1,
int(layer_data["frameStep"]),
):
expected_files.append(
"{}.{}.{}".format(
file_prefix,
str(frame).rjust(layer_data["padding"], "0"),
layer_data["defaultExt"],
)
)
return expected_files
def _generate_aov_file_sequences(self, layer_data):
expected_files = []
aov_file_list = {}
file_prefix = layer_data["filePrefix"]
for aov in layer_data["enabledAOVs"]:
for cam in layer_data["cameras"]:
mappings = (
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
(R_SUBSTITUTE_CAMERA_TOKEN, cam),
(R_SUBSTITUTE_AOV_TOKEN, aov[0]),
(R_CLEAN_FRAME_TOKEN, ""),
(R_CLEAN_EXT_TOKEN, ""),
)
for regex, value in mappings:
file_prefix = re.sub(regex, value, file_prefix)
aov_files = []
for frame in range(
int(layer_data["frameStart"]),
int(layer_data["frameEnd"]) + 1,
int(layer_data["frameStep"]),
):
aov_files.append(
"{}.{}.{}".format(
file_prefix,
str(frame).rjust(layer_data["padding"], "0"),
aov[1],
)
)
# if we have more then one renderable camera, append
# camera name to AOV to allow per camera AOVs.
aov_name = aov[0]
if len(layer_data["cameras"]) > 1:
aov_name = "{}_{}".format(aov[0], cam)
aov_file_list[aov_name] = aov_files
file_prefix = layer_data["filePrefix"]
expected_files.append(aov_file_list)
return expected_files
def get_files(self):
"""
This method will return list of expected files.
It will translate render token strings ('<RenderPass>', etc.) to
their values. This task is tricky as every renderer deals with this
differently. It depends on `get_aovs()` abstract method implemented
for every supported renderer.
"""
layer_data = self._get_layer_data()
expected_files = []
if layer_data.get("enabledAOVs"):
expected_files = self._generate_aov_file_sequences(layer_data)
else:
expected_files = self._generate_single_file_sequence(layer_data)
return expected_files
def get_renderable_cameras(self):
cam_parents = [
cmds.listRelatives(x, ap=True)[-1] for x in cmds.ls(cameras=True)
]
renderable_cameras = []
for cam in cam_parents:
renderable = False
if self.maya_is_true(cmds.getAttr("{}.renderable".format(cam))):
renderable = True
for override in self.get_layer_overrides(
"{}.renderable".format(cam), self.layer
):
renderable = self.maya_is_true(override)
if renderable:
renderable_cameras.append(cam)
return renderable_cameras
def maya_is_true(self, attr_val):
"""
Whether a Maya attr evaluates to True.
When querying an attribute value from an ambiguous object the
Maya API will return a list of values, which need to be properly
handled to evaluate properly.
"""
if isinstance(attr_val, types.BooleanType):
return attr_val
elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
return any(attr_val)
else:
return bool(attr_val)
def get_layer_overrides(self, attr, layer):
connections = cmds.listConnections(attr, plugs=True)
if connections:
for connection in connections:
if connection:
node_name = connection.split(".")[0]
if cmds.nodeType(node_name) == "renderLayer":
attr_name = "%s.value" % ".".join(
connection.split(".")[:-1]
)
if node_name == layer:
yield cmds.getAttr(attr_name)
def get_render_attribute(self, attr):
return lib.get_attr_in_layer(
"defaultRenderGlobals.{}".format(attr), layer=self.layer
)
class ExpectedFilesArnold(AExpectedFiles):
# Arnold AOV driver extension mapping
# Is there a better way?
aiDriverExtension = {
"jpeg": "jpg",
"exr": "exr",
"deepexr": "exr",
"png": "png",
"tiff": "tif",
"mtoa_shaders": "ass", # TODO: research what those last two should be
"maya": "",
}
def __init__(self, layer):
super(ExpectedFilesArnold, self).__init__(layer)
self.renderer = "arnold"
def get_aovs(self):
enabled_aovs = []
try:
if not (
cmds.getAttr("defaultArnoldRenderOptions.aovMode")
and not cmds.getAttr("defaultArnoldDriver.mergeAOVs") # noqa: W503, E501
):
# AOVs are merged in mutli-channel file
self.multipart = True
return enabled_aovs
except ValueError:
# this occurs when Render Setting windows was not opened yet. In
# such case there are no Arnold options created so query for AOVs
# will fail. We terminate here as there are no AOVs specified then.
# This state will most probably fail later on some Validator
# anyway.
return enabled_aovs
# AOVs are set to be rendered separately. We should expect
# <RenderPass> token in path.
ai_aovs = [n for n in cmds.ls(type="aiAOV")]
for aov in ai_aovs:
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
ai_driver = cmds.listConnections("{}.outputs".format(aov))[0]
ai_translator = cmds.getAttr("{}.aiTranslator".format(ai_driver))
try:
aov_ext = self.aiDriverExtension[ai_translator]
except KeyError:
msg = (
"Unrecognized arnold " "driver format for AOV - {}"
).format(cmds.getAttr("{}.name".format(aov)))
raise AOVError(msg)
for override in self.get_layer_overrides(
"{}.enabled".format(aov), self.layer
):
enabled = self.maya_is_true(override)
if enabled:
# If aov RGBA is selected, arnold will translate it to `beauty`
aov_name = cmds.getAttr("%s.name" % aov)
if aov_name == "RGBA":
aov_name = "beauty"
enabled_aovs.append((aov_name, aov_ext))
# Append 'beauty' as this is arnolds
# default. If <RenderPass> token is specified and no AOVs are
# defined, this will be used.
enabled_aovs.append(
(u"beauty", cmds.getAttr("defaultRenderGlobals.imfPluginKey"))
)
return enabled_aovs
class ExpectedFilesVray(AExpectedFiles):
# V-ray file extension mapping
# 5 - exr
# 6 - multichannel exr
# 13 - deep exr
def __init__(self, layer):
super(ExpectedFilesVray, self).__init__(layer)
self.renderer = "vray"
def get_renderer_prefix(self):
prefix = super(ExpectedFilesVray, self).get_renderer_prefix()
prefix = "{}_<aov>".format(prefix)
return prefix
def get_files(self):
expected_files = super(ExpectedFilesVray, self).get_files()
# we need to add one sequence for plain beauty if AOVs are enabled.
# as vray output beauty without 'beauty' in filename.
layer_data = self._get_layer_data()
if layer_data.get("enabledAOVs"):
expected_files[0][u"beauty"] = self._generate_single_file_sequence(
layer_data
) # noqa: E501
return expected_files
def get_aovs(self):
enabled_aovs = []
try:
# really? do we set it in vray just by selecting multichannel exr?
if (
cmds.getAttr("vraySettings.imageFormatStr")
== "exr (multichannel)" # noqa: W503
):
# AOVs are merged in mutli-channel file
self.multipart = True
return enabled_aovs
except ValueError:
# this occurs when Render Setting windows was not opened yet. In
# such case there are no Arnold options created so query for AOVs
# will fail. We terminate here as there are no AOVs specified then.
# This state will most probably fail later on some Validator
# anyway.
return enabled_aovs
default_ext = cmds.getAttr("vraySettings.imageFormatStr")
if default_ext == "exr (multichannel)" or default_ext == "exr (deep)":
default_ext = "exr"
vr_aovs = [
n
for n in cmds.ls(
type=["VRayRenderElement", "VRayRenderElementSet"]
)
]
# todo: find out how to detect multichannel exr for vray
for aov in vr_aovs:
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
for override in self.get_layer_overrides(
"{}.enabled".format(aov), "rs_{}".format(self.layer)
):
enabled = self.maya_is_true(override)
if enabled:
# todo: find how vray set format for AOVs
enabled_aovs.append(
(self._get_vray_aov_name(aov), default_ext))
return enabled_aovs
def _get_vray_aov_name(self, node):
# Get render element pass type
vray_node_attr = next(
attr
for attr in cmds.listAttr(node)
if attr.startswith("vray_name")
)
pass_type = vray_node_attr.rsplit("_", 1)[-1]
# Support V-Ray extratex explicit name (if set by user)
if pass_type == "extratex":
explicit_attr = "{}.vray_explicit_name_extratex".format(node)
explicit_name = cmds.getAttr(explicit_attr)
if explicit_name:
return explicit_name
# Node type is in the attribute name but we need to check if value
# of the attribute as it can be changed
return cmds.getAttr("{}.{}".format(node, vray_node_attr))
class ExpectedFilesRedshift(AExpectedFiles):
# mapping redshift extension dropdown values to strings
ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"]
# name of aovs that are not merged into resulting exr and we need
# them specified in expectedFiles output.
unmerged_aovs = ["Cryptomatte"]
def __init__(self, layer):
super(ExpectedFilesRedshift, self).__init__(layer)
self.renderer = "redshift"
def get_renderer_prefix(self):
prefix = super(ExpectedFilesRedshift, self).get_renderer_prefix()
prefix = "{}.<aov>".format(prefix)
return prefix
def get_files(self):
expected_files = super(ExpectedFilesRedshift, self).get_files()
# we need to add one sequence for plain beauty if AOVs are enabled.
# as redshift output beauty without 'beauty' in filename.
layer_data = self._get_layer_data()
if layer_data.get("enabledAOVs"):
expected_files[0][u"beauty"] = self._generate_single_file_sequence(
layer_data
)
# Redshift doesn't merge Cryptomatte AOV to final exr. We need to check
# for such condition and add it to list of expected files.
for aov in layer_data.get("enabledAOVs"):
if aov[0].lower() == "cryptomatte":
aov_name = aov[0]
expected_files.append(
{aov_name: self._generate_single_file_sequence(
layer_data, aov_name=aov_name)})
return expected_files
def get_aovs(self):
enabled_aovs = []
try:
default_ext = self.ext_mapping[
cmds.getAttr("redshiftOptions.imageFormat")
]
except ValueError:
# this occurs when Render Setting windows was not opened yet. In
# such case there are no Redshift options created so query
# will fail.
raise ValueError("Render settings are not initialized")
rs_aovs = [n for n in cmds.ls(type="RedshiftAOV")]
# todo: find out how to detect multichannel exr for redshift
for aov in rs_aovs:
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
for override in self.get_layer_overrides(
"{}.enabled".format(aov), self.layer
):
enabled = self.maya_is_true(override)
if enabled:
# If AOVs are merged into multipart exr, append AOV only if it
# is in the list of AOVs that renderer cannot (or will not)
# merge into final exr.
if self.maya_is_true(
cmds.getAttr("redshiftOptions.exrForceMultilayer")
):
if cmds.getAttr("%s.name" % aov) in self.unmerged_aovs:
enabled_aovs.append(
(cmds.getAttr("%s.name" % aov), default_ext)
)
else:
enabled_aovs.append(
(cmds.getAttr("%s.name" % aov), default_ext)
)
if self.maya_is_true(
cmds.getAttr("redshiftOptions.exrForceMultilayer")
):
# AOVs are merged in mutli-channel file
self.multipart = True
return enabled_aovs
class ExpectedFilesRenderman(AExpectedFiles):
def __init__(self, layer):
super(ExpectedFilesRenderman, self).__init__(layer)
self.renderer = "renderman"
def get_aovs(self):
enabled_aovs = []
default_ext = "exr"
displays = cmds.listConnections("rmanGlobals.displays")
for aov in displays:
aov_name = str(aov)
if aov_name == "rmanDefaultDisplay":
aov_name = "beauty"
enabled = self.maya_is_true(cmds.getAttr("{}.enable".format(aov)))
for override in self.get_layer_overrides(
"{}.enable".format(aov), self.layer
):
enabled = self.maya_is_true(override)
if enabled:
enabled_aovs.append((aov_name, default_ext))
return enabled_aovs
def get_files(self):
"""
In renderman we hack it with prepending path. This path would
normally be translated from `rmanGlobals.imageOutputDir`. We skip
this and harcode prepend path we expect. There is no place for user
to mess around with this settings anyway and it is enforced in
render settings validator.
"""
layer_data = self._get_layer_data()
new_aovs = {}
expected_files = super(ExpectedFilesRenderman, self).get_files()
# we always get beauty
for aov, files in expected_files[0].items():
new_files = []
for file in files:
new_file = "{}/{}/{}".format(
layer_data["sceneName"], layer_data["layerName"], file
)
new_files.append(new_file)
new_aovs[aov] = new_files
return [new_aovs]
class ExpectedFilesMentalray(AExpectedFiles):
def __init__(self, layer):
raise UnimplementedRendererException("Mentalray not implemented")
def get_aovs(self):
return []
class AOVError(Exception):
pass
class UnsupportedRendererException(Exception):
pass
class UnimplementedRendererException(Exception):
pass

View file

@ -1,110 +0,0 @@
import os
import pyblish.api
from maya import cmds
from avalon import api
class CollectVRayScene(pyblish.api.ContextPlugin):
"""Collect all information prior for exporting vrscenes
"""
order = pyblish.api.CollectorOrder
label = "Collect VRay Scene"
hosts = ["maya"]
def process(self, context):
# Sort by displayOrder
def sort_by_display_order(layer):
return cmds.getAttr("%s.displayOrder" % layer)
host = api.registered_host()
asset = api.Session["AVALON_ASSET"]
work_dir = context.data["workspaceDir"]
# Get VRay Scene instance
vray_scenes = host.lsattr("family", "vrayscene")
if not vray_scenes:
self.log.info("Skipping vrayScene collection, no "
"vrayscene instance found..")
return
assert len(vray_scenes) == 1, "Multiple vrayscene instances found!"
vray_scene = vray_scenes[0]
vrscene_data = host.read(vray_scene)
assert cmds.ls("vraySettings", type="VRaySettingsNode"), (
"VRay Settings node does not exists. "
"Please ensure V-Ray is the current renderer."
)
# Output data
start_frame = int(cmds.getAttr("defaultRenderGlobals.startFrame"))
end_frame = int(cmds.getAttr("defaultRenderGlobals.endFrame"))
# Create output file path with template
file_name = context.data["currentFile"].replace("\\", "/")
vrscene = ("vrayscene", "<Scene>", "<Scene>_<Layer>", "<Layer>")
vrscene_output = os.path.join(work_dir, *vrscene)
# Check and create render output template for render job
# outputDir is required for submit_publish_job
if not vrscene_data.get("suspendRenderJob", False):
renders = ("renders", "<Scene>", "<Scene>_<Layer>", "<Layer>")
output_renderpath = os.path.join(work_dir, *renders)
vrscene_data["outputDir"] = output_renderpath
# Get resolution
resolution = (cmds.getAttr("defaultResolution.width"),
cmds.getAttr("defaultResolution.height"))
# Get format extension
extension = cmds.getAttr("vraySettings.imageFormatStr")
# Get render layers
render_layers = [i for i in cmds.ls(type="renderLayer") if
cmds.getAttr("{}.renderable".format(i)) and not
cmds.referenceQuery(i, isNodeReferenced=True)]
render_layers = sorted(render_layers, key=sort_by_display_order)
for layer in render_layers:
subset = layer
if subset == "defaultRenderLayer":
subset = "masterLayer"
data = {
"subset": subset,
"setMembers": layer,
"frameStart": start_frame,
"frameEnd": end_frame,
"renderer": "vray",
"resolution": resolution,
"ext": ".{}".format(extension),
# instance subset
"family": "VRay Scene",
"families": ["vrayscene"],
"asset": asset,
"time": api.time(),
"author": context.data["user"],
# Add source to allow tracing back to the scene from
# which was submitted originally
"source": file_name,
# Store VRay Scene additional data
"vrsceneOutput": vrscene_output
}
data.update(vrscene_data)
instance = context.create_instance(subset)
self.log.info("Created: %s" % instance.name)
instance.data.update(data)

View file

@ -3,26 +3,61 @@
This module is taking care of submitting job from Maya to Deadline. It
creates job and set correct environments. Its behavior is controlled by
`DEADLINE_REST_URL` environment variable - pointing to Deadline Web Service
and `MayaSubmitDeadline.use_published (bool)` property telling Deadline to
``DEADLINE_REST_URL`` environment variable - pointing to Deadline Web Service
and :data:`MayaSubmitDeadline.use_published` property telling Deadline to
use published scene workfile or not.
If ``vrscene`` or ``assscene`` are detected in families, it will first
submit job to export these files and then dependent job to render them.
Attributes:
payload_skeleton (dict): Skeleton payload data sent as job to Deadline.
Default values are for ``MayaBatch`` plugin.
"""
import os
import json
import getpass
import re
import copy
import clique
import requests
from maya import cmds
from avalon import api
from avalon.vendor import requests
import pyblish.api
from pype.hosts.maya import lib
# Documentation for keys available at:
# https://docs.thinkboxsoftware.com
# /products/deadline/8.0/1_User%20Manual/manual
# /manual-submission.html#job-info-file-options
payload_skeleton = {
"JobInfo": {
"BatchName": None, # Top-level group name
"Name": None, # Job name, as seen in Monitor
"UserName": None,
"Plugin": "MayaBatch",
"Frames": "{start}-{end}x{step}",
"Comment": None,
},
"PluginInfo": {
"SceneFile": None, # Input
"OutputFilePath": None, # Output directory and filename
"OutputFilePrefix": None,
"Version": cmds.about(version=True), # Mandatory for Deadline
"UsingRenderLayers": True,
"RenderLayer": None, # Render only this layer
"Renderer": None,
"ProjectPath": None, # Resolve relative references
},
"AuxFiles": [] # Mandatory for Deadline, may be empty
}
def get_renderer_variables(renderlayer=None):
"""Retrieve the extension which has been set in the VRay settings.
@ -91,7 +126,15 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
"""Submit available render layers to Deadline.
Renders are submitted to a Deadline Web Service as
supplied via the environment variable DEADLINE_REST_URL
supplied via the environment variable ``DEADLINE_REST_URL``.
Note:
If Deadline configuration is not detected, this plugin will
be disabled.
Attributes:
use_published (bool): Use published scene to render instead of the
one in work area.
"""
@ -108,10 +151,11 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
use_published = True
def process(self, instance):
DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
"http://localhost:8082")
assert DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
"""Plugin entry point."""
self._instance = instance
self._deadline_url = os.environ.get(
"DEADLINE_REST_URL", "http://localhost:8082")
assert self._deadline_url, "Requires DEADLINE_REST_URL"
context = instance.context
workspace = context.data["workspaceDir"]
@ -119,6 +163,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
filepath = None
# Handle render/export from published scene or not ------------------
if self.use_published:
for i in context:
if "workfile" in i.data["families"]:
@ -135,6 +180,9 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
self.log.info("Using published scene for render {}".format(
filepath))
if not os.path.exists(filepath):
self.log.error("published scene does not exist!")
raise
# now we need to switch scene in expected files
# because <scene> token will now point to published
# scene file and that might differ from current one
@ -166,11 +214,11 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
orig_scene, new_scene
))
allInstances = []
all_instances = []
for result in context.data["results"]:
if (result["instance"] is not None and
result["instance"] not in allInstances):
allInstances.append(result["instance"])
result["instance"] not in all_instances): # noqa: E128
all_instances.append(result["instance"])
# fallback if nothing was set
if not filepath:
@ -179,6 +227,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
self.log.debug(filepath)
# Gather needed data ------------------------------------------------
filename = os.path.basename(filepath)
comment = context.data.get("comment", "")
dirname = os.path.join(workspace, "renders")
@ -198,68 +247,49 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
output_filename_0 = filename_0
# Create render folder ----------------------------------------------
try:
# Ensure render folder exists
os.makedirs(dirname)
except OSError:
pass
# Documentation for keys available at:
# https://docs.thinkboxsoftware.com
# /products/deadline/8.0/1_User%20Manual/manual
# /manual-submission.html#job-info-file-options
payload = {
"JobInfo": {
# Top-level group name
"BatchName": filename,
# Fill in common data to payload ------------------------------------
payload_data = {}
payload_data["filename"] = filename
payload_data["filepath"] = filepath
payload_data["jobname"] = jobname
payload_data["deadline_user"] = deadline_user
payload_data["comment"] = comment
payload_data["output_filename_0"] = output_filename_0
payload_data["render_variables"] = render_variables
payload_data["renderlayer"] = renderlayer
payload_data["workspace"] = workspace
payload_data["dirname"] = dirname
# Job name, as seen in Monitor
"Name": jobname,
frame_pattern = payload_skeleton["JobInfo"]["Frames"]
payload_skeleton["JobInfo"]["Frames"] = frame_pattern.format(
start=int(self._instance.data["frameStartHandle"]),
end=int(self._instance.data["frameEndHandle"]),
step=int(self._instance.data["byFrameStep"]))
# Arbitrary username, for visualisation in Monitor
"UserName": deadline_user,
payload_skeleton["JobInfo"]["Plugin"] = self._instance.data.get(
"mayaRenderPlugin", "MayaBatch")
"Plugin": instance.data.get("mayaRenderPlugin", "MayaBatch"),
"Frames": "{start}-{end}x{step}".format(
start=int(instance.data["frameStartHandle"]),
end=int(instance.data["frameEndHandle"]),
step=int(instance.data["byFrameStep"]),
),
payload_skeleton["JobInfo"]["BatchName"] = filename
# Job name, as seen in Monitor
payload_skeleton["JobInfo"]["Name"] = jobname
# Arbitrary username, for visualisation in Monitor
payload_skeleton["JobInfo"]["UserName"] = deadline_user
# Optional, enable double-click to preview rendered
# frames from Deadline Monitor
payload_skeleton["JobInfo"]["OutputDirectory0"] = \
os.path.dirname(output_filename_0)
payload_skeleton["JobInfo"]["OutputFilename0"] = \
output_filename_0.replace("\\", "/")
"Comment": comment,
# Optional, enable double-click to preview rendered
# frames from Deadline Monitor
"OutputDirectory0": os.path.dirname(output_filename_0),
"OutputFilename0": output_filename_0.replace("\\", "/")
},
"PluginInfo": {
# Input
"SceneFile": filepath,
# Output directory and filename
"OutputFilePath": dirname.replace("\\", "/"),
"OutputFilePrefix": render_variables["filename_prefix"],
# Mandatory for Deadline
"Version": cmds.about(version=True),
# Only render layers are considered renderable in this pipeline
"UsingRenderLayers": True,
# Render only this layer
"RenderLayer": renderlayer,
# Determine which renderer to use from the file itself
"Renderer": instance.data["renderer"],
# Resolve relative references
"ProjectPath": workspace,
},
# Mandatory for Deadline, may be empty
"AuxFiles": []
}
payload_skeleton["JobInfo"]["Comment"] = comment
payload_skeleton["PluginInfo"]["RenderLayer"] = renderlayer
# Adding file dependencies.
dependencies = instance.context.data["fileDependencies"]
@ -268,28 +298,9 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
self.log.info(dependency)
key = "AssetDependency" + str(dependencies.index(dependency))
self.log.info(key)
payload["JobInfo"][key] = dependency
# Expected files.
exp = instance.data.get("expectedFiles")
OutputFilenames = {}
expIndex = 0
if isinstance(exp[0], dict):
# we have aovs and we need to iterate over them
for aov, files in exp[0].items():
col = clique.assemble(files)[0][0]
outputFile = col.format('{head}{padding}{tail}')
payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile # noqa: E501
OutputFilenames[expIndex] = outputFile
expIndex += 1
else:
col = clique.assemble(files)[0][0]
outputFile = col.format('{head}{padding}{tail}')
payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile
# OutputFilenames[expIndex] = outputFile
payload_skeleton["JobInfo"][key] = dependency
# Handle environments -----------------------------------------------
# We need those to pass them to pype for it to set correct context
keys = [
"FTRACK_API_KEY",
@ -299,33 +310,77 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
"AVALON_ASSET",
"AVALON_TASK",
"PYPE_USERNAME",
"PYPE_DEV"
"PYPE_DEV",
"PYPE_LOG_NO_COLORS"
]
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **api.Session)
payload["JobInfo"].update({
environment["PYPE_LOG_NO_COLORS"] = "1"
payload_skeleton["JobInfo"].update({
"EnvironmentKeyValue%d" % index: "{key}={value}".format(
key=key,
value=environment[key]
) for index, key in enumerate(environment)
})
# Include optional render globals
# Add options from RenderGlobals-------------------------------------
render_globals = instance.data.get("renderGlobals", {})
payload["JobInfo"].update(render_globals)
payload_skeleton["JobInfo"].update(render_globals)
# Submit preceeding export jobs -------------------------------------
export_job = None
assert not all(x in instance.data["families"]
for x in ['vrayscene', 'assscene']), (
"Vray Scene and Ass Scene options are mutually exclusive")
if "vrayscene" in instance.data["families"]:
export_job = self._submit_export(payload_data, "vray")
if "assscene" in instance.data["families"]:
export_job = self._submit_export(payload_data, "arnold")
# Prepare main render job -------------------------------------------
if "vrayscene" in instance.data["families"]:
payload = self._get_vray_render_payload(payload_data)
elif "assscene" in instance.data["families"]:
payload = self._get_arnold_render_payload(payload_data)
else:
payload = self._get_maya_payload(payload_data)
# Add export job as dependency --------------------------------------
if export_job:
payload["JobInfo"]["JobDependency0"] = export_job
# Add list of expected files to job ---------------------------------
exp = instance.data.get("expectedFiles")
output_filenames = {}
exp_index = 0
if isinstance(exp[0], dict):
# we have aovs and we need to iterate over them
for _aov, files in exp[0].items():
col = clique.assemble(files)[0][0]
output_file = col.format('{head}{padding}{tail}')
payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501
output_filenames[exp_index] = output_file
exp_index += 1
else:
col = clique.assemble(files)[0][0]
output_file = col.format('{head}{padding}{tail}')
payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file
# OutputFilenames[exp_index] = output_file
plugin = payload["JobInfo"]["Plugin"]
self.log.info("using render plugin : {}".format(plugin))
self.preflight_check(instance)
# Submit job to farm ------------------------------------------------
self.log.info("Submitting ...")
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
self.log.debug(json.dumps(payload, indent=4, sort_keys=True))
# E.g. http://192.168.0.1:8082/api/jobs
url = "{}/api/jobs".format(DEADLINE_REST_URL)
url = "{}/api/jobs".format(self._deadline_url)
response = self._requests_post(url, json=payload)
if not response.ok:
raise Exception(response.text)
@ -334,9 +389,209 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
instance.data["outputDir"] = os.path.dirname(filename_0)
instance.data["deadlineSubmissionJob"] = response.json()
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers"""
def _get_maya_payload(self, data):
payload = copy.deepcopy(payload_skeleton)
job_info_ext = {
# Asset dependency to wait for at least the scene file to sync.
"AssetDependency0": data["filepath"],
}
plugin_info = {
"SceneFile": data["filepath"],
# Output directory and filename
"OutputFilePath": data["dirname"].replace("\\", "/"),
"OutputFilePrefix": data["render_variables"]["filename_prefix"], # noqa: E501
# Only render layers are considered renderable in this pipeline
"UsingRenderLayers": True,
# Render only this layer
"RenderLayer": data["renderlayer"],
# Determine which renderer to use from the file itself
"Renderer": self._instance.data["renderer"],
# Resolve relative references
"ProjectPath": data["workspace"],
}
payload["JobInfo"].update(job_info_ext)
payload["PluginInfo"].update(plugin_info)
return payload
def _get_vray_export_payload(self, data):
payload = copy.deepcopy(payload_skeleton)
job_info_ext = {
# Job name, as seen in Monitor
"Name": "Export {} [{}-{}]".format(
data["jobname"],
int(self._instance.data["frameStartHandle"]),
int(self._instance.data["frameEndHandle"])),
"Plugin": "MayaBatch",
"FramesPerTask": self._instance.data.get("framesPerTask", 1)
}
plugin_info_ext = {
# Renderer
"Renderer": "vray",
# Input
"SceneFile": data["filepath"],
"SkipExistingFrames": True,
"UsingRenderLayers": True,
"UseLegacyRenderLayers": True,
"RenderLayer": data["renderlayer"],
"ProjectPath": data["workspace"]
}
payload["JobInfo"].update(job_info_ext)
payload["PluginInfo"].update(plugin_info_ext)
return payload
def _get_arnold_export_payload(self, data):
try:
from pype.scripts import export_maya_ass_job
except Exception:
raise AssertionError(
"Expected module 'export_maya_ass_job' to be available")
module_path = export_maya_ass_job.__file__
if module_path.endswith(".pyc"):
module_path = module_path[: -len(".pyc")] + ".py"
script = os.path.normpath(module_path)
payload = copy.deepcopy(payload_skeleton)
job_info_ext = {
# Job name, as seen in Monitor
"Name": "Export {} [{}-{}]".format(
data["jobname"],
int(self._instance.data["frameStartHandle"]),
int(self._instance.data["frameEndHandle"])),
"Plugin": "Python",
"FramesPerTask": self._instance.data.get("framesPerTask", 1),
"Frames": 1
}
plugin_info_ext = {
"Version": "3.6",
"ScriptFile": script,
"Arguments": "",
"SingleFrameOnly": "True",
}
payload["JobInfo"].update(job_info_ext)
payload["PluginInfo"].update(plugin_info_ext)
envs = []
for k, v in payload["JobInfo"].items():
if k.startswith("EnvironmentKeyValue"):
envs.append(v)
# add app name to environment
envs.append(
"AVALON_APP_NAME={}".format(os.environ.get("AVALON_APP_NAME")))
envs.append(
"PYPE_ASS_EXPORT_RENDER_LAYER={}".format(data["renderlayer"]))
envs.append(
"PYPE_ASS_EXPORT_SCENE_FILE={}".format(data["filepath"]))
envs.append(
"PYPE_ASS_EXPORT_OUTPUT={}".format(
payload['JobInfo']['OutputFilename0']))
envs.append(
"PYPE_ASS_EXPORT_START={}".format(
int(self._instance.data["frameStartHandle"])))
envs.append(
"PYPE_ASS_EXPORT_END={}".format(
int(self._instance.data["frameEndHandle"])))
envs.append(
"PYPE_ASS_EXPORT_STEP={}".format(1))
i = 0
for e in envs:
payload["JobInfo"]["EnvironmentKeyValue{}".format(i)] = e
i += 1
return payload
def _get_vray_render_payload(self, data):
payload = copy.deepcopy(payload_skeleton)
vray_settings = cmds.ls(type="VRaySettingsNode")
node = vray_settings[0]
template = cmds.getAttr("{}.vrscene_filename".format(node))
# "vrayscene/<Scene>/<Scene>_<Layer>/<Layer>"
scene, _ = os.path.splitext(data["filename"])
first_file = self.format_vray_output_filename(scene, template)
first_file = "{}/{}".format(data["workspace"], first_file)
job_info_ext = {
"Name": "Render {} [{}-{}]".format(
data["jobname"],
int(self._instance.data["frameStartHandle"]),
int(self._instance.data["frameEndHandle"])),
"Plugin": "Vray",
"OverrideTaskExtraInfoNames": False,
}
plugin_info = {
"InputFilename": first_file,
"SeparateFilesPerFrame": True,
"VRayEngine": "V-Ray",
"Width": self._instance.data["resolutionWidth"],
"Height": self._instance.data["resolutionHeight"],
}
payload["JobInfo"].update(job_info_ext)
payload["PluginInfo"].update(plugin_info)
return payload
def _get_arnold_render_payload(self, data):
payload = copy.deepcopy(payload_skeleton)
ass_file, _ = os.path.splitext(data["output_filename_0"])
first_file = ass_file + ".ass"
job_info_ext = {
"Name": "Render {} [{}-{}]".format(
data["jobname"],
int(self._instance.data["frameStartHandle"]),
int(self._instance.data["frameEndHandle"])),
"Plugin": "Arnold",
"OverrideTaskExtraInfoNames": False,
}
plugin_info = {
"ArnoldFile": first_file,
}
payload["JobInfo"].update(job_info_ext)
payload["PluginInfo"].update(plugin_info)
return payload
def _submit_export(self, data, format):
if format == "vray":
payload = self._get_vray_export_payload(data)
self.log.info("Submitting vrscene export job.")
elif format == "arnold":
payload = self._get_arnold_export_payload(data)
self.log.info("Submitting ass export job.")
url = "{}/api/jobs".format(self._deadline_url)
response = self._requests_post(url, json=payload)
if not response.ok:
self.log.error("Submition failed!")
self.log.error(response.status_code)
self.log.error(response.content)
self.log.debug(payload)
raise RuntimeError(response.text)
dependency = response.json()
return dependency["_id"]
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers."""
for key in ("frameStartHandle", "frameEndHandle", "byFrameStep"):
value = instance.data[key]
@ -349,14 +604,17 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
)
def _requests_post(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
"""Wrap request post method.
WARNING: disabling SSL certificate validation is defeating one line
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
variable is found. This is useful when Deadline or Muster server are
running with self-signed certificates and their certificate is not
added to trusted certificates on client machines.
Warning:
Disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
@ -365,17 +623,61 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
return requests.post(*args, **kwargs)
def _requests_get(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
"""Wrap request get method.
WARNING: disabling SSL certificate validation is defeating one line
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
variable is found. This is useful when Deadline or Muster server are
running with self-signed certificates and their certificate is not
added to trusted certificates on client machines.
Warning:
Disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
# add 10sec timeout before bailing out
kwargs['timeout'] = 10
return requests.get(*args, **kwargs)
def format_vray_output_filename(self, filename, template, dir=False):
"""Format the expected output file of the Export job.
Example:
<Scene>/<Scene>_<Layer>/<Layer>
"shot010_v006/shot010_v006_CHARS/CHARS"
Args:
instance:
filename(str):
dir(bool):
Returns:
str
"""
def smart_replace(string, key_values):
new_string = string
for key, value in key_values.items():
new_string = new_string.replace(key, value)
return new_string
# Ensure filename has no extension
file_name, _ = os.path.splitext(filename)
# Reformat without tokens
output_path = smart_replace(
template,
{"<Scene>": file_name,
"<Layer>": self._instance.data['setMembers']})
if dir:
return output_path.replace("\\", "/")
start_frame = int(self._instance.data["frameStartHandle"])
filename_zero = "{}_{:04d}.vrscene".format(output_path, start_frame)
result = filename_zero.replace("\\", "/")
return result

View file

@ -1,293 +0,0 @@
import getpass
import json
import os
from copy import deepcopy
import pyblish.api
from avalon import api
from avalon.vendor import requests
from maya import cmds
class VraySubmitDeadline(pyblish.api.InstancePlugin):
"""Export the scene to `.vrscene` files per frame per render layer
vrscene files will be written out based on the following template:
<project>/vrayscene/<Scene>/<Scene>_<Layer>/<Layer>
A dependency job will be added for each layer to render the framer
through VRay Standalone
"""
label = "Submit to Deadline ( vrscene )"
order = pyblish.api.IntegratorOrder
hosts = ["maya"]
families = ["vrayscene"]
if not os.environ.get("DEADLINE_REST_URL"):
optional = False
active = False
else:
optional = True
def process(self, instance):
DEADLINE_REST_URL = api.Session.get("DEADLINE_REST_URL",
"http://localhost:8082")
assert DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
context = instance.context
deadline_url = "{}/api/jobs".format(DEADLINE_REST_URL)
deadline_user = context.data.get("deadlineUser", getpass.getuser())
filepath = context.data["currentFile"]
filename = os.path.basename(filepath)
task_name = "{} - {}".format(filename, instance.name)
batch_name = "{} - (vrscene)".format(filename)
# Get the output template for vrscenes
vrscene_output = instance.data["vrsceneOutput"]
# This is also the input file for the render job
first_file = self.format_output_filename(instance,
filename,
vrscene_output)
start_frame = int(instance.data["frameStart"])
end_frame = int(instance.data["frameEnd"])
# Primary job
self.log.info("Submitting export job ..")
payload = {
"JobInfo": {
# Top-level group name
"BatchName": batch_name,
# Job name, as seen in Monitor
"Name": "Export {} [{}-{}]".format(task_name,
start_frame,
end_frame),
# Arbitrary username, for visualisation in Monitor
"UserName": deadline_user,
"Plugin": "MayaBatch",
"Frames": "{}-{}".format(start_frame, end_frame),
"FramesPerTask": instance.data.get("framesPerTask", 1),
"Comment": context.data.get("comment", ""),
"OutputFilename0": os.path.dirname(first_file),
},
"PluginInfo": {
# Renderer
"Renderer": "vray",
# Mandatory for Deadline
"Version": cmds.about(version=True),
# Input
"SceneFile": filepath,
"SkipExistingFrames": True,
"UsingRenderLayers": True,
"UseLegacyRenderLayers": True
},
# Mandatory for Deadline, may be empty
"AuxFiles": []
}
environment = dict(AVALON_TOOLS="global;python36;maya2018")
environment.update(api.Session.copy())
jobinfo_environment = self.build_jobinfo_environment(environment)
payload["JobInfo"].update(jobinfo_environment)
self.log.info("Job Data:\n{}".format(json.dumps(payload)))
response = self._requests_post(url=deadline_url, json=payload)
if not response.ok:
raise RuntimeError(response.text)
# Secondary job
# Store job to create dependency chain
dependency = response.json()
if instance.data["suspendRenderJob"]:
self.log.info("Skipping render job and publish job")
return
self.log.info("Submitting render job ..")
start_frame = int(instance.data["frameStart"])
end_frame = int(instance.data["frameEnd"])
ext = instance.data.get("ext", "exr")
# Create output directory for renders
render_ouput = self.format_output_filename(instance,
filename,
instance.data["outputDir"],
dir=True)
self.log.info("Render output: %s" % render_ouput)
# Update output dir
instance.data["outputDir"] = render_ouput
# Format output file name
sequence_filename = ".".join([instance.name, ext])
output_filename = os.path.join(render_ouput, sequence_filename)
# Ensure folder exists:
if not os.path.exists(render_ouput):
os.makedirs(render_ouput)
payload_b = {
"JobInfo": {
"JobDependency0": dependency["_id"],
"BatchName": batch_name,
"Name": "Render {} [{}-{}]".format(task_name,
start_frame,
end_frame),
"UserName": deadline_user,
"Frames": "{}-{}".format(start_frame, end_frame),
"Plugin": "Vray",
"OverrideTaskExtraInfoNames": False,
"OutputFilename0": render_ouput,
},
"PluginInfo": {
"InputFilename": first_file,
"OutputFilename": output_filename,
"SeparateFilesPerFrame": True,
"VRayEngine": "V-Ray",
"Width": instance.data["resolution"][0],
"Height": instance.data["resolution"][1],
},
"AuxFiles": [],
}
# Add vray renderslave to environment
tools = environment["AVALON_TOOLS"] + ";vrayrenderslave"
environment_b = deepcopy(environment)
environment_b["AVALON_TOOLS"] = tools
jobinfo_environment_b = self.build_jobinfo_environment(environment_b)
payload_b["JobInfo"].update(jobinfo_environment_b)
self.log.info(json.dumps(payload_b))
# Post job to deadline
response_b = self._requests_post(url=deadline_url, json=payload_b)
if not response_b.ok:
raise RuntimeError(response_b.text)
# Add job for publish job
if not instance.data.get("suspendPublishJob", False):
instance.data["deadlineSubmissionJob"] = response_b.json()
def build_command(self, instance):
"""Create command for Render.exe to export vray scene
Args:
instance
Returns:
str
"""
cmd = ('-r vray -proj {project} -cam {cam} -noRender -s {startFrame} '
'-e {endFrame} -rl {layer} -exportFramesSeparate')
# Get the camera
cammera = instance.data["cameras"][0]
return cmd.format(project=instance.context.data["workspaceDir"],
cam=cammera,
startFrame=instance.data["frameStart"],
endFrame=instance.data["frameEnd"],
layer=instance.name)
def build_jobinfo_environment(self, env):
"""Format environment keys and values to match Deadline rquirements
Args:
env(dict): environment dictionary
Returns:
dict
"""
return {"EnvironmentKeyValue%d" % index: "%s=%s" % (k, env[k])
for index, k in enumerate(env)}
def format_output_filename(self, instance, filename, template, dir=False):
"""Format the expected output file of the Export job
Example:
<Scene>/<Scene>_<Layer>/<Layer>
"shot010_v006/shot010_v006_CHARS/CHARS"
Args:
instance:
filename(str):
dir(bool):
Returns:
str
"""
def smart_replace(string, key_values):
new_string = string
for key, value in key_values.items():
new_string = new_string.replace(key, value)
return new_string
# Ensure filename has no extension
file_name, _ = os.path.splitext(filename)
# Reformat without tokens
output_path = smart_replace(template,
{"<Scene>": file_name,
"<Layer>": instance.name})
if dir:
return output_path.replace("\\", "/")
start_frame = int(instance.data["frameStart"])
filename_zero = "{}_{:04d}.vrscene".format(output_path, start_frame)
result = filename_zero.replace("\\", "/")
return result
def _requests_post(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
WARNING: disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
return requests.post(*args, **kwargs)

View file

@ -0,0 +1,12 @@
from avalon import photoshop
class CreateImage(photoshop.Creator):
"""Image folder for publish."""
name = "imageDefault"
label = "Image"
family = "image"
def __init__(self, *args, **kwargs):
super(CreateImage, self).__init__(*args, **kwargs)

View file

@ -0,0 +1,43 @@
from avalon import api, photoshop
class ImageLoader(api.Loader):
"""Load images
Stores the imported asset in a container named after the asset.
"""
families = ["image"]
representations = ["*"]
def load(self, context, name=None, namespace=None, data=None):
with photoshop.maintained_selection():
layer = photoshop.import_smart_object(self.fname)
self[:] = [layer]
return photoshop.containerise(
name,
namespace,
layer,
context,
self.__class__.__name__
)
def update(self, container, representation):
layer = container.pop("layer")
with photoshop.maintained_selection():
photoshop.replace_smart_object(
layer, api.get_representation_path(representation)
)
photoshop.imprint(
layer, {"representation": str(representation["_id"])}
)
def remove(self, container):
container["layer"].Delete()
def switch(self, container, representation):
self.update(container, representation)

View file

@ -0,0 +1,17 @@
import os
import pyblish.api
from avalon import photoshop
class CollectCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Current File"
hosts = ["photoshop"]
def process(self, context):
context.data["currentFile"] = os.path.normpath(
photoshop.app().ActiveDocument.FullName
).replace("\\", "/")

View file

@ -0,0 +1,56 @@
import pythoncom
from avalon import photoshop
import pyblish.api
class CollectInstances(pyblish.api.ContextPlugin):
"""Gather instances by LayerSet and file metadata
This collector takes into account assets that are associated with
an LayerSet and marked with a unique identifier;
Identifier:
id (str): "pyblish.avalon.instance"
"""
label = "Instances"
order = pyblish.api.CollectorOrder
hosts = ["photoshop"]
families_mapping = {
"image": []
}
def process(self, context):
# Necessary call when running in a different thread which pyblish-qml
# can be.
pythoncom.CoInitialize()
for layer in photoshop.get_layers_in_document():
layer_data = photoshop.read(layer)
# Skip layers without metadata.
if layer_data is None:
continue
# Skip containers.
if "container" in layer_data["id"]:
continue
child_layers = [*layer.Layers]
if not child_layers:
self.log.info("%s skipped, it was empty." % layer.Name)
continue
instance = context.create_instance(layer.Name)
instance.append(layer)
instance.data.update(layer_data)
instance.data["families"] = self.families_mapping[
layer_data["family"]
]
instance.data["publish"] = layer.Visible
# Produce diagnostic message for any graphical
# user interface interested in visualising it.
self.log.info("Found: \"%s\" " % instance.data["name"])

View file

@ -0,0 +1,39 @@
import pyblish.api
import os
class CollectWorkfile(pyblish.api.ContextPlugin):
"""Collect current script for publish."""
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Workfile"
hosts = ["photoshop"]
def process(self, context):
family = "workfile"
task = os.getenv("AVALON_TASK", None)
subset = family + task.capitalize()
file_path = context.data["currentFile"]
staging_dir = os.path.dirname(file_path)
base_name = os.path.basename(file_path)
# Create instance
instance = context.create_instance(subset)
instance.data.update({
"subset": subset,
"label": base_name,
"name": base_name,
"family": family,
"families": [],
"representations": [],
"asset": os.environ["AVALON_ASSET"]
})
# creating representation
instance.data["representations"].append({
"name": "psd",
"ext": "psd",
"files": base_name,
"stagingDir": staging_dir,
})

View file

@ -0,0 +1,62 @@
import os
import pype.api
from avalon import photoshop
class ExtractImage(pype.api.Extractor):
"""Produce a flattened image file from instance
This plug-in takes into account only the layers in the group.
"""
label = "Extract Image"
hosts = ["photoshop"]
families = ["image"]
def process(self, instance):
staging_dir = self.staging_dir(instance)
self.log.info("Outputting image to {}".format(staging_dir))
# Perform extraction
files = {}
with photoshop.maintained_selection():
self.log.info("Extracting %s" % str(list(instance)))
with photoshop.maintained_visibility():
# Hide all other layers.
extract_ids = [
x.id for x in photoshop.get_layers_in_layers([instance[0]])
]
for layer in photoshop.get_layers_in_document():
if layer.id not in extract_ids:
layer.Visible = False
save_options = {
"png": photoshop.com_objects.PNGSaveOptions(),
"jpg": photoshop.com_objects.JPEGSaveOptions()
}
for extension, save_option in save_options.items():
photoshop.app().ActiveDocument.SaveAs(
staging_dir, save_option, True
)
files[extension] = "{} copy.{}".format(
os.path.splitext(
photoshop.app().ActiveDocument.Name
)[0],
extension
)
representations = []
for extension, filename in files.items():
representations.append({
"name": extension,
"ext": extension,
"files": filename,
"stagingDir": staging_dir
})
instance.data["representations"] = representations
instance.data["stagingDir"] = staging_dir
self.log.info(f"Extracted {instance} to {staging_dir}")

View file

@ -0,0 +1,14 @@
import pype.api
from avalon import photoshop
class ExtractSaveScene(pype.api.Extractor):
"""Save scene before extraction."""
order = pype.api.Extractor.order - 0.49
label = "Extract Save Scene"
hosts = ["photoshop"]
families = ["workfile"]
def process(self, instance):
photoshop.app().ActiveDocument.Save()

View file

@ -0,0 +1,48 @@
import os
import pyblish.api
import pype.api
from avalon import photoshop
class ValidateInstanceAssetRepair(pyblish.api.Action):
"""Repair the instance asset."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
# Get the errored instances
failed = []
for result in context.data["results"]:
if (result["error"] is not None and result["instance"] is not None
and result["instance"] not in failed):
failed.append(result["instance"])
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
for instance in instances:
data = photoshop.read(instance[0])
data["asset"] = os.environ["AVALON_ASSET"]
photoshop.imprint(instance[0], data)
class ValidateInstanceAsset(pyblish.api.InstancePlugin):
"""Validate the instance asset is the current asset."""
label = "Validate Instance Asset"
hosts = ["photoshop"]
actions = [ValidateInstanceAssetRepair]
order = pype.api.ValidateContentsOrder
def process(self, instance):
instance_asset = instance.data["asset"]
current_asset = os.environ["AVALON_ASSET"]
msg = (
"Instance asset is not the same as current asset:"
f"\nInstance: {instance_asset}\nCurrent: {current_asset}"
)
assert instance_asset == current_asset, msg

View file

@ -0,0 +1,101 @@
"""This module is used for command line exporting of ASS files."""
import os
import argparse
import logging
import subprocess
import platform
try:
from shutil import which
except ImportError:
# we are in python < 3.3
def which(command):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, command)
if os.path.exists(p) and os.access(p, os.X_OK):
return p
handler = logging.basicConfig()
log = logging.getLogger("Publish Image Sequences")
log.setLevel(logging.DEBUG)
error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}"
def __main__():
parser = argparse.ArgumentParser()
parser.add_argument("--paths",
nargs="*",
default=[],
help="The filepaths to publish. This can be a "
"directory or a path to a .json publish "
"configuration.")
parser.add_argument("--gui",
default=False,
action="store_true",
help="Whether to run Pyblish in GUI mode.")
parser.add_argument("--pype", help="Pype root")
kwargs, args = parser.parse_known_args()
print("Running pype ...")
auto_pype_root = os.path.dirname(os.path.abspath(__file__))
auto_pype_root = os.path.abspath(auto_pype_root + "../../../../..")
auto_pype_root = os.environ.get('PYPE_SETUP_PATH') or auto_pype_root
if os.environ.get('PYPE_SETUP_PATH'):
print("Got Pype location from environment: {}".format(
os.environ.get('PYPE_SETUP_PATH')))
pype_command = "pype.ps1"
if platform.system().lower() == "linux":
pype_command = "pype"
elif platform.system().lower() == "windows":
pype_command = "pype.bat"
if kwargs.pype:
pype_root = kwargs.pype
else:
# test if pype.bat / pype is in the PATH
# if it is, which() will return its path and we use that.
# if not, we use auto_pype_root path. Caveat of that one is
# that it can be UNC path and that will not work on windows.
pype_path = which(pype_command)
if pype_path:
pype_root = os.path.dirname(pype_path)
else:
pype_root = auto_pype_root
print("Set pype root to: {}".format(pype_root))
print("Paths: {}".format(kwargs.paths or [os.getcwd()]))
# paths = kwargs.paths or [os.environ.get("PYPE_METADATA_FILE")] or [os.getcwd()] # noqa
mayabatch = os.environ.get("AVALON_APP_NAME").replace("maya", "mayabatch")
args = [
os.path.join(pype_root, pype_command),
"launch",
"--app",
mayabatch,
"-script",
os.path.join(pype_root, "repos", "pype",
"pype", "scripts", "export_maya_ass_sequence.mel")
]
print("Pype command: {}".format(" ".join(args)))
# Forcing forwaring the environment because environment inheritance does
# not always work.
# Cast all values in environment to str to be safe
env = {k: str(v) for k, v in os.environ.items()}
exit_code = subprocess.call(args, env=env)
if exit_code != 0:
raise RuntimeError("Publishing failed.")
if __name__ == '__main__':
__main__()

View file

@ -0,0 +1,67 @@
/*
Script to export specified layer as ass files.
Attributes:
scene_file (str): Name of the scene to load.
start (int): Start frame.
end (int): End frame.
step (int): Step size.
output_path (str): File output path.
render_layer (str): Name of render layer.
*/
$scene_file=`getenv "PYPE_ASS_EXPORT_SCENE_FILE"`;
$step=`getenv "PYPE_ASS_EXPORT_STEP"`;
$start=`getenv "PYPE_ASS_EXPORT_START"`;
$end=`getenv "PYPE_ASS_EXPORT_END"`;
$file_path=`getenv "PYPE_ASS_EXPORT_OUTPUT"`;
$render_layer = `getenv "PYPE_ASS_EXPORT_RENDER_LAYER"`;
print("*** ASS Export Plugin\n");
if ($scene_file == "") {
print("!!! cannot determine scene file\n");
quit -a -ex -1;
}
if ($step == "") {
print("!!! cannot determine step size\n");
quit -a -ex -1;
}
if ($start == "") {
print("!!! cannot determine start frame\n");
quit -a -ex -1;
}
if ($end == "") {
print("!!! cannot determine end frame\n");
quit -a -ex -1;
}
if ($file_path == "") {
print("!!! cannot determine output file\n");
quit -a -ex -1;
}
if ($render_layer == "") {
print("!!! cannot determine render layer\n");
quit -a -ex -1;
}
print(">>> Opening Scene [ " + $scene_file + " ]\n");
// open scene
file -o -f $scene_file;
// switch to render layer
print(">>> Switching layer [ "+ $render_layer + " ]\n");
editRenderLayerGlobals -currentRenderLayer $render_layer;
// export
print(">>> Exporting to [ " + $file_path + " ]\n");
arnoldExportAss -mask 255 -sl 1 -ll 1 -bb 1 -sf $start -se $end -b -fs $step;
print("--- Done\n");

BIN
res/app_icons/photoshop.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB