diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py
index 33a1e82ba6..b0293c94f9 100644
--- a/pype/plugins/global/publish/collect_filesequences.py
+++ b/pype/plugins/global/publish/collect_filesequences.py
@@ -95,6 +95,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder - 0.0001
targets = ["filesequence"]
label = "RenderedFrames"
+ active = False
def process(self, context):
pixel_aspect = 1
diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py
new file mode 100644
index 0000000000..010cf44c15
--- /dev/null
+++ b/pype/plugins/global/publish/collect_rendered_files.py
@@ -0,0 +1,94 @@
+import os
+import json
+
+import pyblish.api
+from avalon import api
+
+from pypeapp import PypeLauncher
+
+
+class CollectRenderedFiles(pyblish.api.ContextPlugin):
+ """
+ This collector will try to find json files in provided
+ `PYPE_PUBLISH_DATA`. Those files _MUST_ share same context.
+
+ """
+ order = pyblish.api.CollectorOrder - 0.0001
+ targets = ["filesequence"]
+ label = "Collect rendered frames"
+
+ _context = None
+
+ def _load_json(self, path):
+ assert os.path.isfile(path), ("path to json file doesn't exist")
+ data = None
+ with open(path, "r") as json_file:
+ try:
+ data = json.load(json_file)
+ except Exception as exc:
+ self.log.error(
+ "Error loading json: "
+ "{} - Exception: {}".format(path, exc)
+ )
+ return data
+
+ def _process_path(self, data):
+ # validate basic necessary data
+ data_err = "invalid json file - missing data"
+ required = ["asset", "user", "intent", "comment",
+ "job", "instances", "session", "version"]
+ assert all(elem in data.keys() for elem in required), data_err
+
+ # set context by first json file
+ ctx = self._context.data
+
+ ctx["asset"] = ctx.get("asset") or data.get("asset")
+ ctx["intent"] = ctx.get("intent") or data.get("intent")
+ ctx["comment"] = ctx.get("comment") or data.get("comment")
+ ctx["user"] = ctx.get("user") or data.get("user")
+ ctx["version"] = ctx.get("version") or data.get("version")
+
+ # basic sanity check to see if we are working in same context
+ # if some other json file has different context, bail out.
+ ctx_err = "inconsistent contexts in json files - %s"
+ assert ctx.get("asset") == data.get("asset"), ctx_err % "asset"
+ assert ctx.get("intent") == data.get("intent"), ctx_err % "intent"
+ assert ctx.get("comment") == data.get("comment"), ctx_err % "comment"
+ assert ctx.get("user") == data.get("user"), ctx_err % "user"
+ assert ctx.get("version") == data.get("version"), ctx_err % "version"
+
+ # ftrack credentials are passed as environment variables by Deadline
+ # to publish job, but Muster doesn't pass them.
+ if data.get("ftrack") and not os.environ.get("FTRACK_API_USER"):
+ ftrack = data.get("ftrack")
+ os.environ["FTRACK_API_USER"] = ftrack["FTRACK_API_USER"]
+ os.environ["FTRACK_API_KEY"] = ftrack["FTRACK_API_KEY"]
+ os.environ["FTRACK_SERVER"] = ftrack["FTRACK_SERVER"]
+
+ # now we can just add instances from json file and we are done
+ for instance in data.get("instances"):
+ self.log.info(" - processing instance for {}".format(
+ instance.get("subset")))
+ i = self._context.create_instance(instance.get("subset"))
+ self.log.info("remapping paths ...")
+ i.data["representations"] = [PypeLauncher().path_remapper(
+ data=r) for r in instance.get("representations")]
+ i.data.update(instance)
+
+ def process(self, context):
+ self._context = context
+
+ assert os.environ.get("PYPE_PUBLISH_DATA"), (
+ "Missing `PYPE_PUBLISH_DATA`")
+ paths = os.environ["PYPE_PUBLISH_DATA"].split(os.pathsep)
+
+ session_set = False
+ for path in paths:
+ data = self._load_json(path)
+ if not session_set:
+ self.log.info("Setting session using data from file")
+ api.Session.update(data.get("session"))
+ os.environ.update(data.get("session"))
+ session_set = True
+ assert data, "failed to load json file"
+ self._process_path(data)
diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py
index 4978649ba2..ce9c043c45 100644
--- a/pype/plugins/global/publish/extract_jpeg.py
+++ b/pype/plugins/global/publish/extract_jpeg.py
@@ -1,20 +1,11 @@
import os
import pyblish.api
-import clique
import pype.api
class ExtractJpegEXR(pyblish.api.InstancePlugin):
- """Resolve any dependency issues
-
- This plug-in resolves any paths which, if not updated might break
- the published file.
-
- The order of families is important, when working with lookdev you want to
- first publish the texture, update the texture paths in the nodes and then
- publish the shading network. Same goes for file dependent assets.
- """
+ """Create jpg thumbnail from sequence using ffmpeg"""
label = "Extract Jpeg EXR"
hosts = ["shell"]
@@ -23,11 +14,6 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
enabled = False
def process(self, instance):
- start = instance.data.get("frameStart")
- stagingdir = os.path.normpath(instance.data.get("stagingDir"))
-
- collected_frames = os.listdir(stagingdir)
- collections, remainder = clique.assemble(collected_frames)
self.log.info("subset {}".format(instance.data['subset']))
if 'crypto' in instance.data['subset']:
@@ -44,6 +30,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
if 'review' not in repre['tags']:
return
+ stagingdir = os.path.normpath(repre.get("stagingDir"))
input_file = repre['files'][0]
# input_file = (
diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py
index 35d6bf5c4a..7592423a08 100644
--- a/pype/plugins/global/publish/submit_publish_job.py
+++ b/pype/plugins/global/publish/submit_publish_job.py
@@ -1,16 +1,27 @@
import os
import json
import re
-import logging
+from copy import copy
from avalon import api, io
from avalon.vendor import requests, clique
import pyblish.api
-
+# regex for finding frame number in string
R_FRAME_NUMBER = re.compile(r'.+\.(?P[0-9]+)\..+')
+# mapping of instance properties to be transfered to new instance for every
+# specified family
+instance_transfer = {
+ "slate": ["slateFrame"],
+ "review": ["lutPath"],
+ "render.farm": ["bakeScriptPath", "bakeRenderPath", "bakeWriteNodeName"]
+ }
+
+# list of family names to transfer to new family if present
+families_transfer = ["render2d", "ftrack", "slate"]
+
def _get_script():
"""Get path to the image sequence script"""
@@ -217,9 +228,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
environment["PYPE_METADATA_FILE"] = metadata_path
i = 0
for index, key in enumerate(environment):
- self.log.info("KEY: {}".format(key))
- self.log.info("FILTER: {}".format(self.enviro_filter))
-
if key.upper() in self.enviro_filter:
payload["JobInfo"].update(
{
@@ -235,8 +243,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
payload["JobInfo"]["Pool"] = "none"
payload["JobInfo"].pop("SecondaryPool", None)
- self.log.info("Submitting..")
- self.log.info(json.dumps(payload, indent=4, sort_keys=True))
+ self.log.info("Submitting Deadline job ...")
+ # self.log.info(json.dumps(payload, indent=4, sort_keys=True))
url = "{}/api/jobs".format(self.DEADLINE_REST_URL)
response = requests.post(url, json=payload)
@@ -251,6 +259,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
:param instance: instance to get required data from
:type instance: pyblish.plugin.Instance
"""
+
import speedcopy
self.log.info("Preparing to copy ...")
@@ -311,13 +320,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
self.log.info(
"Finished copying %i files" % len(resource_files))
- def _create_instances_for_aov(self, context, instance_data, exp_files):
+ def _create_instances_for_aov(self, instance_data, exp_files):
"""
This will create new instance for every aov it can detect in expected
files list.
- :param context: context of orignal instance to get important data
- :type context: pyblish.plugin.Context
:param instance_data: skeleton data for instance (those needed) later
by collector
:type instance_data: pyblish.plugin.Instance
@@ -326,11 +333,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
:returns: list of instances
:rtype: list(publish.plugin.Instance)
"""
+
task = os.environ["AVALON_TASK"]
subset = instance_data["subset"]
instances = []
# go through aovs in expected files
- for aov, files in exp_files.items():
+ for aov, files in exp_files[0].items():
cols, rem = clique.assemble(files)
# we shouldn't have any reminders
if rem:
@@ -339,7 +347,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"in sequence: {}".format(rem))
# but we really expect only one collection, nothing else make sense
- self.log.error("got {} sequence type".format(len(cols)))
assert len(cols) == 1, "only one image sequence type is expected"
# create subset name `familyTaskSubset_AOV`
@@ -352,7 +359,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
start = int(instance_data.get("frameStart"))
end = int(instance_data.get("frameEnd"))
- new_instance = self.context.create_instance(subset_name)
+ self.log.info("Creating data for: {}".format(subset_name))
+
app = os.environ.get("AVALON_APP", "")
preview = False
@@ -360,13 +368,14 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
if aov in self.aov_filter[app]:
preview = True
- new_instance.data.update(instance_data)
- new_instance.data["subset"] = subset_name
+ new_instance = copy(instance_data)
+ new_instance["subset"] = subset_name
+
ext = cols[0].tail.lstrip(".")
# create represenation
rep = {
- "name": ext,
+ "name": aov,
"ext": ext,
"files": [os.path.basename(f) for f in list(cols[0])],
"frameStart": start,
@@ -374,26 +383,25 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# If expectedFile are absolute, we need only filenames
"stagingDir": staging,
"anatomy_template": "render",
- "fps": new_instance.data.get("fps"),
+ "fps": new_instance.get("fps"),
"tags": ["review", "preview"] if preview else []
}
# add tags
if preview:
- if "ftrack" not in new_instance.data["families"]:
+ if "ftrack" not in new_instance["families"]:
if os.environ.get("FTRACK_SERVER"):
- new_instance.data["families"].append("ftrack")
- if "review" not in new_instance.data["families"]:
- new_instance.data["families"].append("review")
+ new_instance["families"].append("ftrack")
+ if "review" not in new_instance["families"]:
+ new_instance["families"].append("review")
- new_instance.data["representations"] = [rep]
- instances.append(new_instance)
+ new_instance["representations"] = [rep]
# if extending frames from existing version, copy files from there
# into our destination directory
- if instance_data.get("extendFrames", False):
+ if new_instance.get("extendFrames", False):
self._copy_extend_frames(new_instance, rep)
-
+ instances.append(new_instance)
return instances
def _get_representations(self, instance, exp_files):
@@ -409,9 +417,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
:returns: list of representations
:rtype: list(dict)
"""
+
representations = []
- start = int(instance.data.get("frameStart"))
- end = int(instance.data.get("frameEnd"))
+ start = int(instance.get("frameStart"))
+ end = int(instance.get("frameEnd"))
cols, rem = clique.assemble(exp_files)
# create representation for every collected sequence
for c in cols:
@@ -438,15 +447,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# If expectedFile are absolute, we need only filenames
"stagingDir": os.path.dirname(list(c)[0]),
"anatomy_template": "render",
- "fps": instance.data.get("fps"),
+ "fps": instance.get("fps"),
"tags": ["review", "preview"] if preview else [],
}
representations.append(rep)
- # TODO: implement extendFrame
-
- families = instance.data.get("families")
+ families = instance.get("families")
# if we have one representation with preview tag
# flag whole instance for review and for ftrack
if preview:
@@ -455,7 +462,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
families.append("ftrack")
if "review" not in families:
families.append("review")
- instance.data["families"] = families
+ instance["families"] = families
# add reminders as representations
for r in rem:
@@ -536,7 +543,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
)
relative_path = os.path.relpath(source, api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
- regex = None
families = ["render"]
@@ -550,94 +556,138 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"fps": data.get("fps", 25),
"source": source,
"extendFrames": data.get("extendFrames"),
- "overrideExistingFrame": data.get("overrideExistingFrame")
+ "overrideExistingFrame": data.get("overrideExistingFrame"),
+ "pixelAspect": data.get("pixelAspect", 1),
+ "resolutionWidth": data.get("resolutionWidth", 1920),
+ "resolutionHeight": data.get("resolutionHeight", 1080),
}
+ # transfer specific families from original instance to new render
+ if "render2d" in instance.data.get("families", []):
+ instance_skeleton_data["families"] += ["render2d"]
+
+ if "ftrack" in instance.data.get("families", []):
+ instance_skeleton_data["families"] += ["ftrack"]
+
+ if "slate" in instance.data.get("families", []):
+ instance_skeleton_data["families"] += ["slate"]
+
+ # transfer specific properties from original instance based on
+ # mapping dictionary `instance_transfer`
+ for key, values in instance_transfer.items():
+ if key in instance.data.get("families", []):
+ for v in values:
+ instance_skeleton_data[v] = instance.data.get(v)
+
instances = None
- if data.get("expectedFiles"):
- """
- if content of `expectedFiles` are dictionaries, we will handle
- it as list of AOVs, creating instance from every one of them.
+ assert data.get("expectedFiles"), ("Submission from old Pype version"
+ " - missing expectedFiles")
- Example:
- --------
+ """
+ if content of `expectedFiles` are dictionaries, we will handle
+ it as list of AOVs, creating instance from every one of them.
- expectedFiles = [
- {
- "beauty": [
- "foo_v01.0001.exr",
- "foo_v01.0002.exr"
- ],
- "Z": [
- "boo_v01.0001.exr",
- "boo_v01.0002.exr"
- ]
- }
- ]
+ Example:
+ --------
- This will create instances for `beauty` and `Z` subset
- adding those files to their respective representations.
+ expectedFiles = [
+ {
+ "beauty": [
+ "foo_v01.0001.exr",
+ "foo_v01.0002.exr"
+ ],
- If we've got only list of files, we collect all filesequences.
- More then one doesn't probably make sense, but we'll handle it
- like creating one instance with multiple representations.
+ "Z": [
+ "boo_v01.0001.exr",
+ "boo_v01.0002.exr"
+ ]
+ }
+ ]
- Example:
- --------
+ This will create instances for `beauty` and `Z` subset
+ adding those files to their respective representations.
- expectedFiles = [
- "foo_v01.0001.exr",
- "foo_v01.0002.exr",
- "xxx_v01.0001.exr",
- "xxx_v01.0002.exr"
- ]
+ If we've got only list of files, we collect all filesequences.
+ More then one doesn't probably make sense, but we'll handle it
+ like creating one instance with multiple representations.
- This will result in one instance with two representations:
- `foo` and `xxx`
- """
- if isinstance(data.get("expectedFiles")[0], dict):
- instances = self._create_instances_for_aov(
- instance_skeleton_data,
- data.get("expectedFiles"))
- else:
- representations = self._get_representations(
- instance_skeleton_data,
- data.get("expectedFiles")
- )
+ Example:
+ --------
- if "representations" not in instance.data:
- data["representations"] = []
+ expectedFiles = [
+ "foo_v01.0001.exr",
+ "foo_v01.0002.exr",
+ "xxx_v01.0001.exr",
+ "xxx_v01.0002.exr"
+ ]
- # add representation
- data["representations"] += representations
+ This will result in one instance with two representations:
+ `foo` and `xxx`
+ """
+
+ if isinstance(data.get("expectedFiles")[0], dict):
+ # we cannot attach AOVs to other subsets as we consider every
+ # AOV subset of its own.
+
+ if len(data.get("attachTo")) > 0:
+ assert len(data.get("expectedFiles")[0].keys()) > 1, (
+ "attaching multiple AOVs or renderable cameras to "
+ "subset is not supported")
+
+ # create instances for every AOV we found in expected files.
+ # note: this is done for every AOV and every render camere (if
+ # there are multiple renderable cameras in scene)
+ instances = self._create_instances_for_aov(
+ instance_skeleton_data,
+ data.get("expectedFiles"))
+ self.log.info("got {} instance{}".format(
+ len(instances),
+ "s" if len(instances) > 1 else ""))
else:
- # deprecated: passing regex is depecated. Please use
- # `expectedFiles` and collect them.
- if "ext" in instance.data:
- ext = r"\." + re.escape(instance.data["ext"])
- else:
- ext = r"\.\D+"
+ representations = self._get_representations(
+ instance_skeleton_data,
+ data.get("expectedFiles")
+ )
- regex = r"^{subset}.*\d+{ext}$".format(
- subset=re.escape(subset), ext=ext)
+ if "representations" not in instance_skeleton_data:
+ instance_skeleton_data["representations"] = []
+
+ # add representation
+ instance_skeleton_data["representations"] += representations
+ instances = [instance_skeleton_data]
+
+ # if we are attaching to other subsets, create copy of existing
+ # instances, change data to match thats subset and replace
+ # existing instances with modified data
+ if instance.data.get("attachTo"):
+ self.log.info("Attaching render to subset:")
+ new_instances = []
+ for at in instance.data.get("attachTo"):
+ for i in instances:
+ new_i = copy(i)
+ new_i["version"] = at.get("version")
+ new_i["subset"] = at.get("subset")
+ new_i["families"].append(at.get("family"))
+ new_instances.append(new_i)
+ self.log.info(" - {} / v{}".format(
+ at.get("subset"), at.get("version")))
+ instances = new_instances
- # Write metadata for publish job
# publish job file
publish_job = {
"asset": asset,
"frameStart": start,
"frameEnd": end,
"fps": context.data.get("fps", None),
- "families": families,
"source": source,
"user": context.data["user"],
- "version": context.data["version"],
+ "version": context.data["version"], # this is workfile version
"intent": context.data.get("intent"),
"comment": context.data.get("comment"),
"job": render_job,
"session": api.Session.copy(),
- "instances": instances or [data]
+ "instances": instances
}
# pass Ftrack credentials in case of Muster
@@ -649,14 +699,18 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
}
publish_job.update({"ftrack": ftrack})
- if regex:
- publish_job["regex"] = regex
-
# Ensure output dir exists
output_dir = instance.data["outputDir"]
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
+ metadata_filename = "{}_metadata.json".format(subset)
+
+ metadata_path = os.path.join(output_dir, metadata_filename)
+ self.log.info("Writing json file: {}".format(metadata_path))
+ with open(metadata_path, "w") as f:
+ json.dump(publish_job, f, indent=4, sort_keys=True)
+
def _extend_frames(self, asset, subset, start, end, override):
"""
This will get latest version of asset and update frame range based
diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py
index 37e1d0d7b1..1188669d00 100644
--- a/pype/plugins/maya/publish/collect_render.py
+++ b/pype/plugins/maya/publish/collect_render.py
@@ -1,7 +1,46 @@
+"""
+This collector will go through render layers in maya and prepare all data
+needed to create instances and their representations for submition and
+publishing on farm.
+
+Requires:
+ instance -> families
+ instance -> setMembers
+
+ context -> currentFile
+ context -> workspaceDir
+ context -> user
+
+ session -> AVALON_ASSET
+
+Optional:
+
+Provides:
+ instance -> label
+ instance -> subset
+ instance -> attachTo
+ instance -> setMembers
+ instance -> publish
+ instance -> frameStart
+ instance -> frameEnd
+ instance -> byFrameStep
+ instance -> renderer
+ instance -> family
+ instance -> families
+ instance -> asset
+ instance -> time
+ instance -> author
+ instance -> source
+ instance -> expectedFiles
+ instance -> resolutionWidth
+ instance -> resolutionHeight
+ instance -> pixelAspect
+"""
+
import re
import os
import types
-# TODO: pending python 3 upgrade
+import six
from abc import ABCMeta, abstractmethod
from maya import cmds
@@ -122,12 +161,27 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
# frame range
exp_files = ExpectedFiles().get(renderer, layer_name)
+ # if we want to attach render to subset, check if we have AOV's
+ # in expectedFiles. If so, raise error as we cannot attach AOV
+ # (considered to be subset on its own) to another subset
+ if attachTo:
+ assert len(exp_files[0].keys()) == 1, (
+ "attaching multiple AOVs or renderable cameras to "
+ "subset is not supported")
+
# append full path
full_exp_files = []
- for ef in exp_files:
- full_path = os.path.join(workspace, "renders", ef)
- full_path = full_path.replace("\\", "/")
- full_exp_files.append(full_path)
+ aov_dict = {}
+
+ for aov, files in exp_files[0].items():
+ full_paths = []
+ for ef in files:
+ full_path = os.path.join(workspace, "renders", ef)
+ full_path = full_path.replace("\\", "/")
+ full_paths.append(full_path)
+ aov_dict[aov] = full_paths
+
+ full_exp_files.append(aov_dict)
self.log.info("collecting layer: {}".format(layer_name))
# Get layer specific settings, might be overrides
@@ -136,12 +190,13 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
"attachTo": attachTo,
"setMembers": layer_name,
"publish": True,
- "frameStart": self.get_render_attribute("startFrame",
- layer=layer_name),
- "frameEnd": self.get_render_attribute("endFrame",
- layer=layer_name),
- "byFrameStep": self.get_render_attribute("byFrameStep",
- layer=layer_name),
+ "frameStart": int(self.get_render_attribute("startFrame",
+ layer=layer_name)),
+ "frameEnd": int(self.get_render_attribute("endFrame",
+ layer=layer_name)),
+ "byFrameStep": int(
+ self.get_render_attribute("byFrameStep",
+ layer=layer_name)),
"renderer": self.get_render_attribute("currentRenderer",
layer=layer_name),
@@ -155,7 +210,10 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
# Add source to allow tracing back to the scene from
# which was submitted originally
"source": filepath,
- "expectedFiles": full_exp_files
+ "expectedFiles": full_exp_files,
+ "resolutionWidth": cmds.getAttr("defaultResolution.width"),
+ "resolutionHeight": cmds.getAttr("defaultResolution.height"),
+ "pixelAspect": cmds.getAttr("defaultResolution.height")
}
# Apply each user defined attribute as data
@@ -285,16 +343,16 @@ class ExpectedFiles:
elif renderer.lower() == 'redshift':
return ExpectedFilesRedshift(layer).get_files()
elif renderer.lower() == 'mentalray':
- renderer.ExpectedFilesMentalray(layer).get_files()
+ return ExpectedFilesMentalray(layer).get_files()
elif renderer.lower() == 'renderman':
- renderer.ExpectedFilesRenderman(layer).get_files()
+ return ExpectedFilesRenderman(layer).get_files()
else:
raise UnsupportedRendererException(
"unsupported {}".format(renderer))
+@six.add_metaclass(ABCMeta)
class AExpectedFiles:
- __metaclass__ = ABCMeta
renderer = None
layer = None
@@ -360,9 +418,10 @@ class AExpectedFiles:
padding = int(self.get_render_attribute('extensionPadding'))
resolved_path = file_prefix
- for cam in renderable_cameras:
- if enabled_aovs:
- for aov in enabled_aovs:
+ if enabled_aovs:
+ aov_file_list = {}
+ for aov in enabled_aovs:
+ for cam in renderable_cameras:
mappings = (
(R_SUBSTITUTE_SCENE_TOKEN, scene_name),
@@ -380,12 +439,23 @@ class AExpectedFiles:
int(end_frame) + 1,
int(frame_step)):
aov_files.append(
- '{}.{}.{}'.format(file_prefix,
- str(frame).rjust(padding, "0"),
- aov[1]))
- expected_files.append({aov[0]: aov_files})
+ '{}.{}.{}'.format(
+ file_prefix,
+ str(frame).rjust(padding, "0"),
+ aov[1]))
+
+ # if we have more then one renderable camera, append
+ # camera name to AOV to allow per camera AOVs.
+ aov_name = aov[0]
+ if len(renderable_cameras) > 1:
+ aov_name = "{}_{}".format(aov[0], cam)
+
+ aov_file_list[aov_name] = aov_files
file_prefix = resolved_path
- else:
+
+ expected_files.append(aov_file_list)
+ else:
+ for cam in renderable_cameras:
mappings = (
(R_SUBSTITUTE_SCENE_TOKEN, scene_name),
(R_SUBSTITUTE_LAYER_TOKEN, layer_name),
@@ -475,9 +545,17 @@ class ExpectedFilesArnold(AExpectedFiles):
def get_aovs(self):
enabled_aovs = []
- if not (cmds.getAttr('defaultArnoldRenderOptions.aovMode')
- and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')):
- # AOVs are merged in mutli-channel file
+ try:
+ if not (cmds.getAttr('defaultArnoldRenderOptions.aovMode')
+ and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')):
+ # AOVs are merged in mutli-channel file
+ return enabled_aovs
+ except ValueError:
+ # this occurs when Render Setting windows was not opened yet. In
+ # such case there are no Arnold options created so query for AOVs
+ # will fail. We terminate here as there are no AOVs specified then.
+ # This state will most probably fail later on some Validator
+ # anyway.
return enabled_aovs
# AOVs are set to be rendered separately. We should expect
@@ -515,16 +593,15 @@ class ExpectedFilesArnold(AExpectedFiles):
aov_ext
)
)
- if not enabled_aovs:
- # if there are no AOVs, append 'beauty' as this is arnolds
- # default. If token is specified and no AOVs are
- # defined, this will be used.
- enabled_aovs.append(
- (
- 'beauty',
- cmds.getAttr('defaultRenderGlobals.imfPluginKey')
- )
+ # Append 'beauty' as this is arnolds
+ # default. If token is specified and no AOVs are
+ # defined, this will be used.
+ enabled_aovs.append(
+ (
+ u'beauty',
+ cmds.getAttr('defaultRenderGlobals.imfPluginKey')
)
+ )
return enabled_aovs