mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
collecting AOVs, remapping for representations, family corrections
This commit is contained in:
parent
14e45f59f9
commit
9416f23466
6 changed files with 393 additions and 317 deletions
|
|
@ -16,6 +16,8 @@ import json
|
|||
import pyblish.api
|
||||
from avalon import api
|
||||
|
||||
from pypeapp import PypeLauncher
|
||||
|
||||
|
||||
def collect(root,
|
||||
regex=None,
|
||||
|
|
@ -72,9 +74,9 @@ def collect(root,
|
|||
class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
||||
"""Gather file sequences from working directory
|
||||
|
||||
When "PYPE_PUBLISH_PATHS" environment variable is set these paths
|
||||
(folders or .json files) are parsed for image sequences.
|
||||
Otherwise the current working directory is searched for file sequences.
|
||||
When "FILESEQUENCE" environment variable is set these paths (folders or
|
||||
.json files) are parsed for image sequences. Otherwise the current
|
||||
working directory is searched for file sequences.
|
||||
|
||||
The json configuration may have the optional keys:
|
||||
asset (str): The asset to publish to. If not provided fall back to
|
||||
|
|
@ -101,6 +103,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
|||
lut_path = None
|
||||
slate_frame = None
|
||||
families_data = None
|
||||
baked_mov_path = None
|
||||
subset = None
|
||||
version = None
|
||||
frame_start = 0
|
||||
|
|
@ -159,8 +162,10 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
|||
if instance:
|
||||
instance_family = instance.get("family")
|
||||
pixel_aspect = instance.get("pixelAspect", 1)
|
||||
resolution_width = instance.get("resolutionWidth", 1920)
|
||||
resolution_height = instance.get("resolutionHeight", 1080)
|
||||
resolution_width = instance.get(
|
||||
"resolutionWidth", 1920)
|
||||
resolution_height = instance.get(
|
||||
"resolutionHeight", 1080)
|
||||
lut_path = instance.get("lutPath", None)
|
||||
baked_mov_path = instance.get("bakeRenderPath")
|
||||
families_data = instance.get("families")
|
||||
|
|
@ -183,19 +188,24 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
|||
if "slate" in families_data:
|
||||
frame_start -= 1
|
||||
|
||||
collections, remainder = collect(
|
||||
root=root,
|
||||
regex=regex,
|
||||
exclude_regex=data.get("exclude_regex"),
|
||||
frame_start=frame_start,
|
||||
frame_end=frame_end,
|
||||
)
|
||||
if regex:
|
||||
collections, remainder = collect(
|
||||
root=root,
|
||||
regex=regex,
|
||||
exclude_regex=data.get("exclude_regex"),
|
||||
frame_start=frame_start,
|
||||
frame_end=frame_end,
|
||||
)
|
||||
|
||||
self.log.info("Found collections: {}".format(collections))
|
||||
self.log.info("Found remainder: {}".format(remainder))
|
||||
self.log.info("Found collections: {}".format(collections))
|
||||
self.log.info("Found remainder: {}".format(remainder))
|
||||
|
||||
fps = data.get("fps", 25)
|
||||
|
||||
# adding publish comment and intent to context
|
||||
context.data["comment"] = data.get("comment", "")
|
||||
context.data["intent"] = data.get("intent", "")
|
||||
|
||||
if data.get("user"):
|
||||
context.data["user"] = data["user"]
|
||||
|
||||
|
|
@ -221,9 +231,9 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
|||
self.log.info(
|
||||
"Attaching render {}:v{}".format(
|
||||
attach["subset"], attach["version"]))
|
||||
instance = context.create_instance(
|
||||
new_instance = context.create_instance(
|
||||
attach["subset"])
|
||||
instance.data.update(
|
||||
new_instance.data.update(
|
||||
{
|
||||
"name": attach["subset"],
|
||||
"version": attach["version"],
|
||||
|
|
@ -241,29 +251,34 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
|||
"resolutionHeight": resolution_height
|
||||
})
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
if regex:
|
||||
if "representations" not in new_instance.data:
|
||||
new_instance.data["representations"] = []
|
||||
|
||||
for collection in collections:
|
||||
self.log.info(
|
||||
" - adding representation: {}".format(
|
||||
str(collection))
|
||||
)
|
||||
ext = collection.tail.lstrip(".")
|
||||
for collection in collections:
|
||||
self.log.info(
|
||||
" - adding representation: {}".format(
|
||||
str(collection))
|
||||
)
|
||||
ext = collection.tail.lstrip(".")
|
||||
|
||||
representation = {
|
||||
"name": ext,
|
||||
"ext": "{}".format(ext),
|
||||
"files": list(collection),
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": ["review"],
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
representation)
|
||||
representation = {
|
||||
"name": ext,
|
||||
"ext": "{}".format(ext),
|
||||
"files": list(collection),
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": ["review"],
|
||||
}
|
||||
new_instance.data["representations"].append(
|
||||
representation)
|
||||
else:
|
||||
try:
|
||||
representations = data["metadata"]["instance"]["representations"] # noqa: E501
|
||||
except KeyError as e:
|
||||
assert False, e
|
||||
new_instance.data["representations"] = representations
|
||||
|
||||
elif subset:
|
||||
# if we have subset - add all collections and known
|
||||
|
|
@ -285,10 +300,10 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
|||
"Adding representations to subset {}".format(
|
||||
subset))
|
||||
|
||||
instance = context.create_instance(subset)
|
||||
new_instance = context.create_instance(subset)
|
||||
data = copy.deepcopy(data)
|
||||
|
||||
instance.data.update(
|
||||
new_instance.data.update(
|
||||
{
|
||||
"name": subset,
|
||||
"family": families[0],
|
||||
|
|
@ -309,138 +324,158 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
|||
}
|
||||
)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
if "representations" not in new_instance.data:
|
||||
new_instance.data["representations"] = []
|
||||
|
||||
for collection in collections:
|
||||
self.log.info(" - {}".format(str(collection)))
|
||||
if regex:
|
||||
for collection in collections:
|
||||
self.log.info(" - {}".format(str(collection)))
|
||||
|
||||
ext = collection.tail.lstrip(".")
|
||||
ext = collection.tail.lstrip(".")
|
||||
|
||||
if "slate" in instance.data["families"]:
|
||||
frame_start += 1
|
||||
|
||||
representation = {
|
||||
"name": ext,
|
||||
"ext": "{}".format(ext),
|
||||
"files": list(collection),
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": ["review"] if not baked_mov_path else [],
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
representation)
|
||||
|
||||
# filter out only relevant mov in case baked available
|
||||
self.log.debug("__ remainder {}".format(remainder))
|
||||
if baked_mov_path:
|
||||
remainder = [r for r in remainder
|
||||
if r in baked_mov_path]
|
||||
self.log.debug("__ remainder {}".format(remainder))
|
||||
|
||||
# process reminders
|
||||
for rem in remainder:
|
||||
# add only known types to representation
|
||||
if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']:
|
||||
self.log.info(" . {}".format(rem))
|
||||
|
||||
if "slate" in instance.data["families"]:
|
||||
if "slate" in new_instance.data["families"]:
|
||||
frame_start += 1
|
||||
|
||||
tags = ["review"]
|
||||
|
||||
if baked_mov_path:
|
||||
tags.append("delete")
|
||||
|
||||
representation = {
|
||||
"name": rem.split(".")[-1],
|
||||
"ext": "{}".format(rem.split(".")[-1]),
|
||||
"files": rem,
|
||||
"stagingDir": root,
|
||||
"name": ext,
|
||||
"ext": "{}".format(ext),
|
||||
"files": list(collection),
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": tags
|
||||
"tags": ["review"] if not baked_mov_path else [],
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
representation)
|
||||
new_instance.data["representations"].append(
|
||||
representation)
|
||||
|
||||
# filter out only relevant mov in case baked available
|
||||
self.log.debug("__ remainder {}".format(remainder))
|
||||
if baked_mov_path:
|
||||
remainder = [r for r in remainder
|
||||
if r in baked_mov_path]
|
||||
self.log.debug("__ remainder {}".format(remainder))
|
||||
|
||||
# process reminders
|
||||
for rem in remainder:
|
||||
# add only known types to representation
|
||||
if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']:
|
||||
self.log.info(" . {}".format(rem))
|
||||
|
||||
if "slate" in instance.data["families"]:
|
||||
frame_start += 1
|
||||
|
||||
tags = ["review"]
|
||||
|
||||
if baked_mov_path:
|
||||
tags.append("delete")
|
||||
|
||||
representation = {
|
||||
"name": rem.split(".")[-1],
|
||||
"ext": "{}".format(rem.split(".")[-1]),
|
||||
"files": rem,
|
||||
"stagingDir": root,
|
||||
"frameStart": frame_start,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": tags
|
||||
}
|
||||
new_instance.data["representations"].append(
|
||||
representation)
|
||||
else:
|
||||
try:
|
||||
representations = data["metadata"]["instance"]["representations"] # noqa: E501
|
||||
except KeyError as e:
|
||||
assert False, e
|
||||
|
||||
new_instance.data["representations"] = representations
|
||||
|
||||
else:
|
||||
# we have no subset so we take every collection and create one
|
||||
# from it
|
||||
for collection in collections:
|
||||
instance = context.create_instance(str(collection))
|
||||
self.log.info("Creating subset from: %s" % str(collection))
|
||||
if regex:
|
||||
for collection in collections:
|
||||
new_instance = context.create_instance(str(collection))
|
||||
self.log.info(
|
||||
"Creating subset from: %s" % str(collection))
|
||||
|
||||
# Ensure each instance gets a unique reference to the data
|
||||
data = copy.deepcopy(data)
|
||||
# Ensure each instance gets a unique
|
||||
# reference to the data
|
||||
data = copy.deepcopy(data)
|
||||
|
||||
# If no subset provided, get it from collection's head
|
||||
subset = data.get("subset", collection.head.rstrip("_. "))
|
||||
# If no subset provided, get it from collection's head
|
||||
subset = data.get(
|
||||
"subset", collection.head.rstrip("_. "))
|
||||
|
||||
# If no start or end frame provided, get it from collection
|
||||
indices = list(collection.indexes)
|
||||
start = data.get("frameStart", indices[0])
|
||||
end = data.get("frameEnd", indices[-1])
|
||||
# If no start or end frame provided,
|
||||
# get it from collection
|
||||
indices = list(collection.indexes)
|
||||
start = data.get("frameStart", indices[0])
|
||||
end = data.get("frameEnd", indices[-1])
|
||||
|
||||
ext = list(collection)[0].split(".")[-1]
|
||||
ext = list(collection)[0].split(".")[-1]
|
||||
|
||||
if "review" not in families:
|
||||
families.append("review")
|
||||
if "review" not in families:
|
||||
families.append("review")
|
||||
|
||||
instance.data.update(
|
||||
{
|
||||
"name": str(collection),
|
||||
"family": families[0], # backwards compatibility
|
||||
"families": list(families),
|
||||
"subset": subset,
|
||||
"asset": data.get(
|
||||
"asset", api.Session["AVALON_ASSET"]),
|
||||
"stagingDir": root,
|
||||
new_instance.data.update(
|
||||
{
|
||||
"name": str(collection),
|
||||
"family": families[0],
|
||||
"families": list(families),
|
||||
"subset": subset,
|
||||
"asset": data.get(
|
||||
"asset", api.Session["AVALON_ASSET"]),
|
||||
"stagingDir": root,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"fps": fps,
|
||||
"source": data.get("source", ""),
|
||||
"pixelAspect": pixel_aspect,
|
||||
"resolutionWidth": resolution_width,
|
||||
"resolutionHeight": resolution_height,
|
||||
"version": version
|
||||
}
|
||||
)
|
||||
if lut_path:
|
||||
new_instance.data.update({"lutPath": lut_path})
|
||||
|
||||
new_instance.append(collection)
|
||||
new_instance.context.data["fps"] = fps
|
||||
|
||||
if "representations" not in new_instance.data:
|
||||
new_instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
"name": ext,
|
||||
"ext": "{}".format(ext),
|
||||
"files": list(collection),
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"source": data.get("source", ""),
|
||||
"pixelAspect": pixel_aspect,
|
||||
"resolutionWidth": resolution_width,
|
||||
"resolutionHeight": resolution_height,
|
||||
"version": version
|
||||
"tags": ["review"],
|
||||
}
|
||||
)
|
||||
if lut_path:
|
||||
instance.data.update({"lutPath": lut_path})
|
||||
new_instance.data["representations"].append(
|
||||
representation)
|
||||
|
||||
instance.append(collection)
|
||||
instance.context.data["fps"] = fps
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
"name": ext,
|
||||
"ext": "{}".format(ext),
|
||||
"files": list(collection),
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": ["review"],
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
# temporary ... allow only beauty on ftrack
|
||||
if session['AVALON_APP'] == "maya":
|
||||
AOV_filter = ['beauty']
|
||||
for aov in AOV_filter:
|
||||
if aov not in instance.data['subset']:
|
||||
instance.data['families'].remove('review')
|
||||
instance.data['families'].remove('ftrack')
|
||||
representation["tags"].remove('review')
|
||||
# temporary ... allow only beauty on ftrack
|
||||
if session['AVALON_APP'] == "maya":
|
||||
AOV_filter = ['beauty']
|
||||
for aov in AOV_filter:
|
||||
if aov not in new_instance.data['subset']:
|
||||
new_instance.data['families'].remove(
|
||||
'review')
|
||||
new_instance.data['families'].remove(
|
||||
'ftrack')
|
||||
representation["tags"].remove('review')
|
||||
|
||||
self.log.info("remapping paths ...")
|
||||
new_instance.data["representations"] = [PypeLauncher.path_remapper(r) for r in new_instance.data["representations"]] # noqa: E501
|
||||
self.log.debug(
|
||||
"__ representations {}".format(
|
||||
instance.data["representations"]))
|
||||
new_instance.data["representations"]))
|
||||
self.log.debug(
|
||||
"__ instance.data {}".format(instance.data))
|
||||
"__ instance.data {}".format(new_instance.data))
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import json
|
||||
import re
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
from avalon import api, io
|
||||
from avalon.vendor import requests, clique
|
||||
|
|
@ -9,21 +10,23 @@ from avalon.vendor import requests, clique
|
|||
import pyblish.api
|
||||
|
||||
|
||||
AOVFilter = namedtuple("AOVFilter", ["app", "aov"])
|
||||
|
||||
|
||||
def _get_script():
|
||||
"""Get path to the image sequence script"""
|
||||
try:
|
||||
from pype.scripts import publish_filesequence
|
||||
except Exception:
|
||||
raise RuntimeError("Expected module 'publish_deadline'"
|
||||
"to be available")
|
||||
assert False, "Expected module 'publish_deadline'to be available"
|
||||
|
||||
module_path = publish_filesequence.__file__
|
||||
if module_path.endswith(".pyc"):
|
||||
module_path = module_path[:-len(".pyc")] + ".py"
|
||||
module_path = module_path[: -len(".pyc")] + ".py"
|
||||
|
||||
module_path = os.path.normpath(module_path)
|
||||
mount_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_MOUNT'])
|
||||
network_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_PATH'])
|
||||
mount_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_MOUNT"])
|
||||
network_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_PATH"])
|
||||
|
||||
module_path = module_path.replace(mount_root, network_root)
|
||||
|
||||
|
|
@ -34,39 +37,29 @@ def _get_script():
|
|||
def get_latest_version(asset_name, subset_name, family):
|
||||
# Get asset
|
||||
asset_name = io.find_one(
|
||||
{
|
||||
"type": "asset",
|
||||
"name": asset_name
|
||||
},
|
||||
projection={"name": True}
|
||||
{"type": "asset", "name": asset_name}, projection={"name": True}
|
||||
)
|
||||
|
||||
subset = io.find_one(
|
||||
{
|
||||
"type": "subset",
|
||||
"name": subset_name,
|
||||
"parent": asset_name["_id"]
|
||||
},
|
||||
projection={"_id": True, "name": True}
|
||||
{"type": "subset", "name": subset_name, "parent": asset_name["_id"]},
|
||||
projection={"_id": True, "name": True},
|
||||
)
|
||||
|
||||
# Check if subsets actually exists (pre-run check)
|
||||
assert subset, "No subsets found, please publish with `extendFrames` off"
|
||||
|
||||
# Get version
|
||||
version_projection = {"name": True,
|
||||
"data.startFrame": True,
|
||||
"data.endFrame": True,
|
||||
"parent": True}
|
||||
version_projection = {
|
||||
"name": True,
|
||||
"data.startFrame": True,
|
||||
"data.endFrame": True,
|
||||
"parent": True,
|
||||
}
|
||||
|
||||
version = io.find_one(
|
||||
{
|
||||
"type": "version",
|
||||
"parent": subset["_id"],
|
||||
"data.families": family
|
||||
},
|
||||
{"type": "version", "parent": subset["_id"], "data.families": family},
|
||||
projection=version_projection,
|
||||
sort=[("name", -1)]
|
||||
sort=[("name", -1)],
|
||||
)
|
||||
|
||||
assert version, "No version found, this is a bug"
|
||||
|
|
@ -87,8 +80,12 @@ def get_resources(version, extension=None):
|
|||
|
||||
directory = api.get_representation_path(representation)
|
||||
print("Source: ", directory)
|
||||
resources = sorted([os.path.normpath(os.path.join(directory, fname))
|
||||
for fname in os.listdir(directory)])
|
||||
resources = sorted(
|
||||
[
|
||||
os.path.normpath(os.path.join(directory, fname))
|
||||
for fname in os.listdir(directory)
|
||||
]
|
||||
)
|
||||
|
||||
return resources
|
||||
|
||||
|
|
@ -149,23 +146,22 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
|
||||
hosts = ["fusion", "maya", "nuke"]
|
||||
|
||||
families = [
|
||||
"render.farm",
|
||||
"renderlayer",
|
||||
"imagesequence"
|
||||
]
|
||||
families = ["render.farm", "renderlayer", "imagesequence"]
|
||||
|
||||
# this will add review and ftrack tag only to `beauty` in `maya` app
|
||||
aov_filter = [AOVFilter("maya", ["beauty"])]
|
||||
|
||||
enviro_filter = [
|
||||
"PATH",
|
||||
"PYTHONPATH",
|
||||
"FTRACK_API_USER",
|
||||
"FTRACK_API_KEY",
|
||||
"FTRACK_SERVER",
|
||||
"PYPE_ROOT",
|
||||
"PYPE_METADATA_FILE",
|
||||
"PYPE_STUDIO_PROJECTS_PATH",
|
||||
"PYPE_STUDIO_PROJECTS_MOUNT"
|
||||
]
|
||||
"PATH",
|
||||
"PYTHONPATH",
|
||||
"FTRACK_API_USER",
|
||||
"FTRACK_API_KEY",
|
||||
"FTRACK_SERVER",
|
||||
"PYPE_ROOT",
|
||||
"PYPE_METADATA_FILE",
|
||||
"PYPE_STUDIO_PROJECTS_PATH",
|
||||
"PYPE_STUDIO_PROJECTS_MOUNT",
|
||||
]
|
||||
|
||||
def _submit_deadline_post_job(self, instance, job):
|
||||
"""
|
||||
|
|
@ -176,8 +172,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
data = instance.data.copy()
|
||||
subset = data["subset"]
|
||||
job_name = "{batch} - {subset} [publish image sequence]".format(
|
||||
batch=job["Props"]["Name"],
|
||||
subset=subset
|
||||
batch=job["Props"]["Name"], subset=subset
|
||||
)
|
||||
|
||||
metadata_filename = "{}_metadata.json".format(subset)
|
||||
|
|
@ -185,9 +180,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
metadata_path = os.path.join(output_dir, metadata_filename)
|
||||
|
||||
metadata_path = os.path.normpath(metadata_path)
|
||||
mount_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_MOUNT'])
|
||||
mount_root = os.path.normpath(os.environ["PYPE_STUDIO_PROJECTS_MOUNT"])
|
||||
network_root = os.path.normpath(
|
||||
os.environ['PYPE_STUDIO_PROJECTS_PATH'])
|
||||
os.environ["PYPE_STUDIO_PROJECTS_PATH"]
|
||||
)
|
||||
|
||||
metadata_path = metadata_path.replace(mount_root, network_root)
|
||||
|
||||
|
|
@ -197,21 +193,19 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"Plugin": "Python",
|
||||
"BatchName": job["Props"]["Batch"],
|
||||
"Name": job_name,
|
||||
"JobType": "Normal",
|
||||
"JobDependency0": job["_id"],
|
||||
"UserName": job["Props"]["User"],
|
||||
"Comment": instance.context.data.get("comment", ""),
|
||||
"Priority": job["Props"]["Pri"]
|
||||
"Priority": job["Props"]["Pri"],
|
||||
},
|
||||
"PluginInfo": {
|
||||
"Version": "3.6",
|
||||
"ScriptFile": _get_script(),
|
||||
"Arguments": "",
|
||||
"SingleFrameOnly": "True"
|
||||
"SingleFrameOnly": "True",
|
||||
},
|
||||
|
||||
# Mandatory for Deadline, may be empty
|
||||
"AuxFiles": []
|
||||
"AuxFiles": [],
|
||||
}
|
||||
|
||||
# Transfer the environment from the original job to this dependent
|
||||
|
|
@ -225,12 +219,14 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
self.log.info("FILTER: {}".format(self.enviro_filter))
|
||||
|
||||
if key.upper() in self.enviro_filter:
|
||||
payload["JobInfo"].update({
|
||||
"EnvironmentKeyValue%d" % i: "{key}={value}".format(
|
||||
key=key,
|
||||
value=environment[key]
|
||||
)
|
||||
})
|
||||
payload["JobInfo"].update(
|
||||
{
|
||||
"EnvironmentKeyValue%d"
|
||||
% i: "{key}={value}".format(
|
||||
key=key, value=environment[key]
|
||||
)
|
||||
}
|
||||
)
|
||||
i += 1
|
||||
|
||||
# Avoid copied pools and remove secondary pool
|
||||
|
|
@ -295,33 +291,32 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
fps
|
||||
tags
|
||||
"""
|
||||
|
||||
# Get a submission job
|
||||
data = instance.data.copy()
|
||||
context = instance.context
|
||||
|
||||
render_job = data.pop("deadlineSubmissionJob", None)
|
||||
submission_type = "deadline"
|
||||
|
||||
if not render_job:
|
||||
# No deadline job. Try Muster: musterSubmissionJob
|
||||
render_job = data.pop("musterSubmissionJob", None)
|
||||
submission_type = "muster"
|
||||
if not render_job:
|
||||
raise RuntimeError("Can't continue without valid Deadline "
|
||||
"or Muster submission prior to this "
|
||||
"plug-in.")
|
||||
assert render_job, (
|
||||
"Can't continue without valid Deadline "
|
||||
"or Muster submission prior to this "
|
||||
"plug-in."
|
||||
)
|
||||
|
||||
if submission_type == "deadline":
|
||||
self.DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
|
||||
"http://localhost:8082")
|
||||
self.DEADLINE_REST_URL = os.environ.get(
|
||||
"DEADLINE_REST_URL", "http://localhost:8082"
|
||||
)
|
||||
assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
|
||||
|
||||
self._submit_deadline_post_job(instance, render_job)
|
||||
|
||||
asset = data.get("asset") or api.Session["AVALON_ASSET"]
|
||||
subset = data["subset"]
|
||||
subset = data.get("subset")
|
||||
|
||||
# Get start/end frame from instance, if not available get from context
|
||||
context = instance.context
|
||||
start = instance.data.get("frameStart")
|
||||
if start is None:
|
||||
start = context.data["frameStart"]
|
||||
|
|
@ -329,45 +324,76 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
if end is None:
|
||||
end = context.data["frameEnd"]
|
||||
|
||||
# Add in regex for sequence filename
|
||||
# This assumes the output files start with subset name and ends with
|
||||
# a file extension. The "ext" key includes the dot with the extension.
|
||||
if "ext" in instance.data:
|
||||
ext = r"\." + re.escape(instance.data["ext"])
|
||||
else:
|
||||
ext = r"\.\D+"
|
||||
|
||||
regex = r"^{subset}.*\d+{ext}$".format(subset=re.escape(subset),
|
||||
ext=ext)
|
||||
|
||||
try:
|
||||
source = data['source']
|
||||
source = data["source"]
|
||||
except KeyError:
|
||||
source = context.data["currentFile"]
|
||||
|
||||
source = source.replace(os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"),
|
||||
api.registered_root())
|
||||
|
||||
source = source.replace(
|
||||
os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), api.registered_root()
|
||||
)
|
||||
relative_path = os.path.relpath(source, api.registered_root())
|
||||
source = os.path.join("{root}", relative_path).replace("\\", "/")
|
||||
regex = None
|
||||
|
||||
# find subsets and version to attach render to
|
||||
attach_to = instance.data.get("attachTo")
|
||||
attach_subset_versions = []
|
||||
if attach_to:
|
||||
for subset in attach_to:
|
||||
for instance in context:
|
||||
if instance.data["subset"] != subset["subset"]:
|
||||
continue
|
||||
attach_subset_versions.append(
|
||||
{"version": instance.data["version"],
|
||||
"subset": subset["subset"],
|
||||
"family": subset["family"]})
|
||||
if data.get("expectedFiles"):
|
||||
representations = []
|
||||
cols, rem = clique.assemble(data.get("expectedFiles"))
|
||||
for c in cols:
|
||||
ext = c.tail.lstrip(".")
|
||||
review = True
|
||||
for filter in self.aov_filter:
|
||||
if os.environ.get("AVALON_APP", "") == filter.app:
|
||||
for aov in filter.aov:
|
||||
if re.match(
|
||||
r"(\.|_)({})(\.|_)".format(aov), list(c)[0]
|
||||
):
|
||||
review = False
|
||||
rep = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": [os.path.basename(f) for f in list(c)],
|
||||
"frameStart": int(start),
|
||||
"frameEnd": int(end),
|
||||
# If expectedFile are absolute, we need only filenames
|
||||
"stagingDir": os.path.dirname(list(c)[0]),
|
||||
"anatomy_template": "render",
|
||||
"fps": context.data.get("fps", None),
|
||||
"tags": ["review"] if review else [],
|
||||
}
|
||||
|
||||
representations.append(rep)
|
||||
|
||||
for r in rem:
|
||||
ext = r.split(".")[-1]
|
||||
rep = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": os.path.basename(r),
|
||||
"stagingDir": os.path.dirname(r),
|
||||
"anatomy_template": "publish",
|
||||
}
|
||||
|
||||
representations.append(rep)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
data["representations"] = []
|
||||
|
||||
# add representation
|
||||
data["representations"] += representations
|
||||
|
||||
else:
|
||||
if "ext" in instance.data:
|
||||
ext = r"\." + re.escape(instance.data["ext"])
|
||||
else:
|
||||
ext = r"\.\D+"
|
||||
|
||||
regex = r"^{subset}.*\d+{ext}$".format(
|
||||
subset=re.escape(subset), ext=ext
|
||||
)
|
||||
# Write metadata for publish job
|
||||
metadata = {
|
||||
"asset": asset,
|
||||
"regex": regex,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"fps": context.data.get("fps", None),
|
||||
|
|
@ -375,28 +401,30 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"source": source,
|
||||
"user": context.data["user"],
|
||||
"version": context.data["version"],
|
||||
"attachTo": attach_subset_versions,
|
||||
"intent": context.data.get("intent"),
|
||||
"comment": context.data.get("comment"),
|
||||
# Optional metadata (for debugging)
|
||||
"metadata": {
|
||||
"instance": data,
|
||||
"job": render_job,
|
||||
"session": api.Session.copy()
|
||||
}
|
||||
"session": api.Session.copy(),
|
||||
"instance": data,
|
||||
},
|
||||
}
|
||||
|
||||
if api.Session["AVALON_APP"] == "nuke":
|
||||
metadata['subset'] = subset
|
||||
metadata["subset"] = subset
|
||||
|
||||
if submission_type == "muster":
|
||||
ftrack = {
|
||||
"FTRACK_API_USER": os.environ.get("FTRACK_API_USER"),
|
||||
"FTRACK_API_KEY": os.environ.get("FTRACK_API_KEY"),
|
||||
"FTRACK_SERVER": os.environ.get("FTRACK_SERVER")
|
||||
"FTRACK_SERVER": os.environ.get("FTRACK_SERVER"),
|
||||
}
|
||||
metadata.update({"ftrack": ftrack})
|
||||
|
||||
if regex:
|
||||
metadata["regex"] = regex
|
||||
|
||||
# Ensure output dir exists
|
||||
output_dir = instance.data["outputDir"]
|
||||
if not os.path.isdir(output_dir):
|
||||
|
|
@ -418,16 +446,18 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
# Frame comparison
|
||||
prev_start = None
|
||||
prev_end = None
|
||||
resource_range = range(int(start), int(end)+1)
|
||||
resource_range = range(int(start), int(end) + 1)
|
||||
|
||||
# Gather all the subset files (one subset per render pass!)
|
||||
subset_names = [data["subset"]]
|
||||
subset_names.extend(data.get("renderPasses", []))
|
||||
resources = []
|
||||
for subset_name in subset_names:
|
||||
version = get_latest_version(asset_name=data["asset"],
|
||||
subset_name=subset_name,
|
||||
family=family)
|
||||
version = get_latest_version(
|
||||
asset_name=data["asset"],
|
||||
subset_name=subset_name,
|
||||
family=family,
|
||||
)
|
||||
|
||||
# Set prev start / end frames for comparison
|
||||
if not prev_start and not prev_end:
|
||||
|
|
@ -435,9 +465,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
prev_end = version["data"]["frameEnd"]
|
||||
|
||||
subset_resources = get_resources(version, _ext)
|
||||
resource_files = get_resource_files(subset_resources,
|
||||
resource_range,
|
||||
override)
|
||||
resource_files = get_resource_files(
|
||||
subset_resources, resource_range, override
|
||||
)
|
||||
|
||||
resources.extend(resource_files)
|
||||
|
||||
|
|
@ -445,27 +475,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
updated_end = max(end, prev_end)
|
||||
|
||||
# Update metadata and instance start / end frame
|
||||
self.log.info("Updating start / end frame : "
|
||||
"{} - {}".format(updated_start, updated_end))
|
||||
|
||||
# TODO : Improve logic to get new frame range for the
|
||||
# publish job (publish_filesequence.py)
|
||||
# The current approach is not following Pyblish logic
|
||||
# which is based
|
||||
# on Collect / Validate / Extract.
|
||||
|
||||
# ---- Collect Plugins ---
|
||||
# Collect Extend Frames - Only run if extendFrames is toggled
|
||||
# # # Store in instance:
|
||||
# # # Previous rendered files per subset based on frames
|
||||
# # # --> Add to instance.data[resources]
|
||||
# # # Update publish frame range
|
||||
|
||||
# ---- Validate Plugins ---
|
||||
# Validate Extend Frames
|
||||
# # # Check if instance has the requirements to extend frames
|
||||
# There might have been some things which can be added to the list
|
||||
# Please do so when fixing this.
|
||||
self.log.info(
|
||||
"Updating start / end frame : "
|
||||
"{} - {}".format(updated_start, updated_end)
|
||||
)
|
||||
|
||||
# Start frame
|
||||
metadata["frameStart"] = updated_start
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ class CreateRender(avalon.maya.Creator):
|
|||
"""Create render layer for export"""
|
||||
|
||||
label = "Render"
|
||||
family = "render"
|
||||
family = "rendering"
|
||||
icon = "eye"
|
||||
defaults = ["Main"]
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
import re
|
||||
import os
|
||||
import types
|
||||
from abc import ABC, abstractmethod
|
||||
# TODO: pending python 3 upgrade
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
from maya import cmds
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
|
@ -48,12 +49,11 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.CollectorOrder + 0.01
|
||||
hosts = ["maya"]
|
||||
label = "Collect Render Layers"
|
||||
families = ["render"]
|
||||
|
||||
def process(self, context):
|
||||
render_instance = None
|
||||
for instance in context:
|
||||
if 'render' in instance.data['families']:
|
||||
if 'rendering' in instance.data['families']:
|
||||
render_instance = instance
|
||||
|
||||
if not render_instance:
|
||||
|
|
@ -65,6 +65,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
collected_render_layers = render_instance.data['setMembers']
|
||||
filepath = context.data["currentFile"].replace("\\", "/")
|
||||
asset = api.Session["AVALON_ASSET"]
|
||||
workspace = context.data["workspaceDir"]
|
||||
|
||||
self._rs = renderSetup.instance()
|
||||
maya_render_layers = {l.name(): l for l in self._rs.getRenderLayers()}
|
||||
|
|
@ -120,11 +121,19 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
# frame range
|
||||
exp_files = ExpectedFiles().get(renderer, layer_name)
|
||||
|
||||
# append full path
|
||||
full_exp_files = []
|
||||
for ef in exp_files:
|
||||
full_path = os.path.join(workspace, "render", ef)
|
||||
full_path = full_path.replace("\\", "/")
|
||||
full_exp_files.append(full_path)
|
||||
|
||||
self.log.info("collecting layer: {}".format(layer_name))
|
||||
# Get layer specific settings, might be overrides
|
||||
data = {
|
||||
"subset": expected_layer_name,
|
||||
"attachTo": attachTo,
|
||||
"setMembers": expected_layer_name,
|
||||
"setMembers": layer_name,
|
||||
"publish": True,
|
||||
"frameStart": self.get_render_attribute("startFrame",
|
||||
layer=layer_name),
|
||||
|
|
@ -136,7 +145,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
layer=layer_name),
|
||||
|
||||
# instance subset
|
||||
"family": "Render Layers",
|
||||
"family": "renderlayer",
|
||||
"families": ["renderlayer"],
|
||||
"asset": asset,
|
||||
"time": api.time(),
|
||||
|
|
@ -145,7 +154,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
# Add source to allow tracing back to the scene from
|
||||
# which was submitted originally
|
||||
"source": filepath,
|
||||
"expectedFiles": exp_files
|
||||
"expectedFiles": full_exp_files
|
||||
}
|
||||
|
||||
# Apply each user defined attribute as data
|
||||
|
|
@ -201,9 +210,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
if pool_b:
|
||||
options["renderGlobals"].update({"SecondaryPool": pool_b})
|
||||
|
||||
legacy = attributes["useLegacyRenderLayers"]
|
||||
options["renderGlobals"]["UseLegacyRenderLayers"] = legacy
|
||||
|
||||
# Machine list
|
||||
machine_list = attributes["machineList"]
|
||||
if machine_list:
|
||||
|
|
@ -267,22 +273,10 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
|
||||
layer=layer)
|
||||
|
||||
def _get_layer_overrides(self, attr, layer):
|
||||
connections = cmds.listConnections(attr, plugs=True)
|
||||
if connections:
|
||||
for connection in connections:
|
||||
if connection:
|
||||
node_name = connection.split('.')[0]
|
||||
if cmds.nodeType(node_name) == 'renderLayer':
|
||||
attr_name = '%s.value' % '.'.join(
|
||||
connection.split('.')[:-1])
|
||||
if node_name == layer:
|
||||
yield cmds.getAttr(attr_name)
|
||||
|
||||
|
||||
class ExpectedFiles:
|
||||
|
||||
def get(renderer, layer):
|
||||
def get(self, renderer, layer):
|
||||
if renderer.lower() == 'arnold':
|
||||
return ExpectedFilesArnold(layer).get_files()
|
||||
elif renderer.lower() == 'vray':
|
||||
|
|
@ -298,8 +292,8 @@ class ExpectedFiles:
|
|||
"unsupported {}".format(renderer))
|
||||
|
||||
|
||||
class AExpectedFiles(ABC):
|
||||
|
||||
class AExpectedFiles:
|
||||
__metaclass__ = ABCMeta
|
||||
renderer = None
|
||||
layer = None
|
||||
|
||||
|
|
@ -356,6 +350,9 @@ class AExpectedFiles(ABC):
|
|||
# every renderable camera in layer.
|
||||
|
||||
expected_files = []
|
||||
layer_name = self.layer
|
||||
if self.layer.startswith("rs_"):
|
||||
layer_name = self.layer[3:]
|
||||
start_frame = int(self.get_render_attribute('startFrame'))
|
||||
end_frame = int(self.get_render_attribute('endFrame'))
|
||||
frame_step = int(self.get_render_attribute('byFrameStep'))
|
||||
|
|
@ -368,7 +365,7 @@ class AExpectedFiles(ABC):
|
|||
|
||||
mappings = (
|
||||
(R_SUBSTITUTE_SCENE_TOKEN, scene_name),
|
||||
(R_SUBSTITUTE_LAYER_TOKEN, self.layer),
|
||||
(R_SUBSTITUTE_LAYER_TOKEN, layer_name),
|
||||
(R_SUBSTITUTE_CAMERA_TOKEN, cam),
|
||||
(R_SUBSTITUTE_AOV_TOKEN, aov[0])
|
||||
)
|
||||
|
|
@ -377,7 +374,9 @@ class AExpectedFiles(ABC):
|
|||
file_prefix = re.sub(regex, value, file_prefix)
|
||||
|
||||
for frame in range(
|
||||
int(start_frame), int(end_frame), int(frame_step)):
|
||||
int(start_frame),
|
||||
int(end_frame) + 1,
|
||||
int(frame_step)):
|
||||
expected_files.append(
|
||||
'{}.{}.{}'.format(file_prefix,
|
||||
str(frame).rjust(padding, "0"),
|
||||
|
|
@ -386,7 +385,7 @@ class AExpectedFiles(ABC):
|
|||
else:
|
||||
mappings = (
|
||||
(R_SUBSTITUTE_SCENE_TOKEN, scene_name),
|
||||
(R_SUBSTITUTE_LAYER_TOKEN, self.layer),
|
||||
(R_SUBSTITUTE_LAYER_TOKEN, layer_name),
|
||||
(R_SUBSTITUTE_CAMERA_TOKEN, cam)
|
||||
)
|
||||
|
||||
|
|
@ -394,7 +393,9 @@ class AExpectedFiles(ABC):
|
|||
file_prefix = re.sub(regex, value, file_prefix)
|
||||
|
||||
for frame in range(
|
||||
int(start_frame), int(end_frame), int(frame_step)):
|
||||
int(start_frame),
|
||||
int(end_frame) + 1,
|
||||
int(frame_step)):
|
||||
expected_files.append(
|
||||
'{}.{}.{}'.format(file_prefix,
|
||||
str(frame).rjust(padding, "0"),
|
||||
|
|
@ -418,6 +419,7 @@ class AExpectedFiles(ABC):
|
|||
|
||||
if renderable:
|
||||
renderable_cameras.append(cam)
|
||||
return renderable_cameras
|
||||
|
||||
def maya_is_true(self, attr_val):
|
||||
"""
|
||||
|
|
@ -433,6 +435,22 @@ class AExpectedFiles(ABC):
|
|||
else:
|
||||
return bool(attr_val)
|
||||
|
||||
def get_layer_overrides(self, attr, layer):
|
||||
connections = cmds.listConnections(attr, plugs=True)
|
||||
if connections:
|
||||
for connection in connections:
|
||||
if connection:
|
||||
node_name = connection.split('.')[0]
|
||||
if cmds.nodeType(node_name) == 'renderLayer':
|
||||
attr_name = '%s.value' % '.'.join(
|
||||
connection.split('.')[:-1])
|
||||
if node_name == layer:
|
||||
yield cmds.getAttr(attr_name)
|
||||
|
||||
def get_render_attribute(self, attr):
|
||||
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
|
||||
layer=self.layer)
|
||||
|
||||
|
||||
class ExpectedFilesArnold(AExpectedFiles):
|
||||
|
||||
|
|
@ -449,10 +467,10 @@ class ExpectedFilesArnold(AExpectedFiles):
|
|||
}
|
||||
|
||||
def __init__(self, layer):
|
||||
super(self).__init__(layer)
|
||||
super(ExpectedFilesArnold, self).__init__(layer)
|
||||
self.renderer = 'arnold'
|
||||
|
||||
def _get_aovs(self):
|
||||
def get_aovs(self):
|
||||
enabled_aovs = []
|
||||
if not (cmds.getAttr('defaultArnoldRenderOptions.aovMode')
|
||||
and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')):
|
||||
|
|
@ -490,16 +508,26 @@ class ExpectedFilesArnold(AExpectedFiles):
|
|||
aov_ext
|
||||
)
|
||||
)
|
||||
if not enabled_aovs:
|
||||
# if there are no AOVs, append 'beauty' as this is arnolds
|
||||
# default. If <RenderPass> token is specified and no AOVs are
|
||||
# defined, this will be used.
|
||||
enabled_aovs.append(
|
||||
(
|
||||
'beauty',
|
||||
cmds.getAttr('defaultRenderGlobals.imfPluginKey')
|
||||
)
|
||||
)
|
||||
return enabled_aovs
|
||||
|
||||
|
||||
class ExpectedFilesVray(AExpectedFiles):
|
||||
|
||||
def __init__(self, layer):
|
||||
super(self).__init__(layer)
|
||||
super(ExpectedFilesVray, self).__init__(layer)
|
||||
self.renderer = 'vray'
|
||||
|
||||
def _get_aovs(self):
|
||||
def get_aovs(self):
|
||||
|
||||
default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey')
|
||||
enabled_aovs = []
|
||||
|
|
@ -545,10 +573,10 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
class ExpectedFilesRedshift(AExpectedFiles):
|
||||
|
||||
def __init__(self, layer):
|
||||
super(self).__init__(layer)
|
||||
super(ExpectedFilesRedshift, self).__init__(layer)
|
||||
self.renderer = 'redshift'
|
||||
|
||||
def _get_aovs(self):
|
||||
def get_aovs(self):
|
||||
enabled_aovs = []
|
||||
default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey')
|
||||
rs_aovs = [n for n in cmds.ls(type='RedshiftAOV')]
|
||||
|
|
|
|||
|
|
@ -16,8 +16,8 @@ class CollectRenderableCamera(pyblish.api.InstancePlugin):
|
|||
"renderlayer"]
|
||||
|
||||
def process(self, instance):
|
||||
layer = "rs_%s" % instance.data["setMembers"]
|
||||
|
||||
layer = instance.data["setMembers"]
|
||||
self.log.info("layer: {}".format(layer))
|
||||
cameras = cmds.ls(type="camera", long=True)
|
||||
renderable = [c for c in cameras if
|
||||
lib.get_attr_in_layer("%s.renderable" % c, layer=layer)]
|
||||
|
|
|
|||
|
|
@ -150,8 +150,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
dirname = os.path.join(workspace, "renders")
|
||||
renderlayer = instance.data['setMembers'] # rs_beauty
|
||||
renderlayer_name = instance.data['subset'] # beauty
|
||||
renderlayer_globals = instance.data["renderGlobals"]
|
||||
legacy_layers = renderlayer_globals["UseLegacyRenderLayers"]
|
||||
# renderlayer_globals = instance.data["renderGlobals"]
|
||||
# legacy_layers = renderlayer_globals["UseLegacyRenderLayers"]
|
||||
deadline_user = context.data.get("deadlineUser", getpass.getuser())
|
||||
jobname = "%s - %s" % (filename, instance.name)
|
||||
|
||||
|
|
@ -212,7 +212,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
"UsingRenderLayers": True,
|
||||
|
||||
# Use legacy Render Layer system
|
||||
"UseLegacyRenderLayers": legacy_layers,
|
||||
# "UseLegacyRenderLayers": legacy_layers,
|
||||
|
||||
# Render only this layer
|
||||
"RenderLayer": renderlayer,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue