mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
Merge branch 'develop' into feature/PYPE-762_multi_root
This commit is contained in:
commit
ccafd3d360
30 changed files with 507 additions and 397 deletions
|
|
@ -9,7 +9,6 @@ from pypeapp import config, Roots
|
|||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__version__ = "2.6.0"
|
||||
|
||||
PROJECT_PLUGINS_PATH = os.environ.get("PYPE_PROJECT_PLUGINS")
|
||||
PACKAGE_DIR = os.path.dirname(__file__)
|
||||
|
|
|
|||
|
|
@ -214,14 +214,14 @@ def script_name():
|
|||
|
||||
def add_button_write_to_read(node):
|
||||
name = "createReadNode"
|
||||
label = "Create Read"
|
||||
label = "[ Create Read ]"
|
||||
value = "import write_to_read;write_to_read.write_to_read(nuke.thisNode())"
|
||||
k = nuke.PyScript_Knob(name, label, value)
|
||||
k.setFlag(0x1000)
|
||||
node.addKnob(k)
|
||||
|
||||
|
||||
def create_write_node(name, data, input=None, prenodes=None):
|
||||
def create_write_node(name, data, input=None, prenodes=None, review=True):
|
||||
''' Creating write node which is group node
|
||||
|
||||
Arguments:
|
||||
|
|
@ -230,6 +230,7 @@ def create_write_node(name, data, input=None, prenodes=None):
|
|||
input (node): selected node to connect to
|
||||
prenodes (list, optional): list of lists, definitions for nodes
|
||||
to be created before write
|
||||
review (bool): adding review knob
|
||||
|
||||
Example:
|
||||
prenodes = [(
|
||||
|
|
@ -388,15 +389,8 @@ def create_write_node(name, data, input=None, prenodes=None):
|
|||
|
||||
add_rendering_knobs(GN)
|
||||
|
||||
# adding write to read button
|
||||
add_button_write_to_read(GN)
|
||||
|
||||
divider = nuke.Text_Knob('')
|
||||
GN.addKnob(divider)
|
||||
|
||||
# set tile color
|
||||
tile_color = _data.get("tile_color", "0xff0000ff")
|
||||
GN["tile_color"].setValue(tile_color)
|
||||
if review:
|
||||
add_review_knob(GN)
|
||||
|
||||
# add render button
|
||||
lnk = nuke.Link_Knob("Render")
|
||||
|
|
@ -404,9 +398,20 @@ def create_write_node(name, data, input=None, prenodes=None):
|
|||
lnk.setName("Render")
|
||||
GN.addKnob(lnk)
|
||||
|
||||
divider = nuke.Text_Knob('')
|
||||
GN.addKnob(divider)
|
||||
|
||||
# adding write to read button
|
||||
add_button_write_to_read(GN)
|
||||
|
||||
# Deadline tab.
|
||||
add_deadline_tab(GN)
|
||||
|
||||
|
||||
# set tile color
|
||||
tile_color = _data.get("tile_color", "0xff0000ff")
|
||||
GN["tile_color"].setValue(tile_color)
|
||||
|
||||
return GN
|
||||
|
||||
|
||||
|
|
@ -428,6 +433,17 @@ def add_rendering_knobs(node):
|
|||
knob = nuke.Boolean_Knob("render_farm", "Render on Farm")
|
||||
knob.setValue(False)
|
||||
node.addKnob(knob)
|
||||
return node
|
||||
|
||||
def add_review_knob(node):
|
||||
''' Adds additional review knob to given node
|
||||
|
||||
Arguments:
|
||||
node (obj): nuke node object to be fixed
|
||||
|
||||
Return:
|
||||
node (obj): with added knob
|
||||
'''
|
||||
if "review" not in node.knobs():
|
||||
knob = nuke.Boolean_Knob("review", "Review")
|
||||
knob.setValue(True)
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
'setdress': 'setdress',
|
||||
'pointcache': 'cache',
|
||||
'render': 'render',
|
||||
'render2d': 'render',
|
||||
'nukescript': 'comp',
|
||||
'write': 'render',
|
||||
'review': 'mov',
|
||||
|
|
|
|||
|
|
@ -68,6 +68,10 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
for i, repre in enumerate(instance.data["representations"]):
|
||||
self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre))
|
||||
|
||||
if "multipartExr" in repre.get("tags", []):
|
||||
# ffmpeg doesn't support multipart exrs
|
||||
continue
|
||||
|
||||
if "burnin" not in repre.get("tags", []):
|
||||
continue
|
||||
|
||||
|
|
|
|||
|
|
@ -36,6 +36,10 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
if not isinstance(repre['files'], list):
|
||||
continue
|
||||
|
||||
if "multipartExr" in tags:
|
||||
# ffmpeg doesn't support multipart exrs
|
||||
continue
|
||||
|
||||
stagingdir = os.path.normpath(repre.get("stagingDir"))
|
||||
input_file = repre['files'][0]
|
||||
|
||||
|
|
|
|||
|
|
@ -57,11 +57,16 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
# filter out mov and img sequences
|
||||
representations_new = representations[:]
|
||||
for repre in representations:
|
||||
|
||||
if repre['ext'] not in self.ext_filter:
|
||||
continue
|
||||
|
||||
tags = repre.get("tags", [])
|
||||
|
||||
if "multipartExr" in tags:
|
||||
# ffmpeg doesn't support multipart exrs
|
||||
continue
|
||||
|
||||
if "thumbnail" in tags:
|
||||
continue
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,9 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
|
||||
label = "Review with Slate frame"
|
||||
order = pyblish.api.ExtractorOrder + 0.031
|
||||
families = ["slate"]
|
||||
families = ["slate", "review"]
|
||||
match = pyblish.api.Subset
|
||||
|
||||
hosts = ["nuke", "maya", "shell"]
|
||||
optional = True
|
||||
|
||||
|
|
@ -34,7 +36,8 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
fps = inst_data.get("fps")
|
||||
|
||||
# defining image ratios
|
||||
resolution_ratio = (float(resolution_width) * pixel_aspect) / resolution_height
|
||||
resolution_ratio = ((float(resolution_width) * pixel_aspect) /
|
||||
resolution_height)
|
||||
delivery_ratio = float(to_width) / float(to_height)
|
||||
self.log.debug("__ resolution_ratio: `{}`".format(resolution_ratio))
|
||||
self.log.debug("__ delivery_ratio: `{}`".format(delivery_ratio))
|
||||
|
|
@ -89,7 +92,7 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
input_args.extend([
|
||||
"-r {}".format(fps),
|
||||
"-t 0.04"]
|
||||
)
|
||||
)
|
||||
|
||||
# output args
|
||||
codec_args = repre["_profile"].get('codec', [])
|
||||
|
|
@ -111,7 +114,7 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
self.log.debug("lower then delivery")
|
||||
width_scale = int(to_width * scale_factor)
|
||||
width_half_pad = int((
|
||||
to_width - width_scale)/2)
|
||||
to_width - width_scale) / 2)
|
||||
height_scale = to_height
|
||||
height_half_pad = 0
|
||||
else:
|
||||
|
|
@ -124,7 +127,7 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
height_scale = int(
|
||||
resolution_height * scale_factor)
|
||||
height_half_pad = int(
|
||||
(to_height - height_scale)/2)
|
||||
(to_height - height_scale) / 2)
|
||||
|
||||
self.log.debug(
|
||||
"__ width_scale: `{}`".format(width_scale))
|
||||
|
|
@ -135,8 +138,10 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
self.log.debug(
|
||||
"__ height_half_pad: `{}`".format(height_half_pad))
|
||||
|
||||
scaling_arg = "scale={0}x{1}:flags=lanczos,pad={2}:{3}:{4}:{5}:black,setsar=1".format(
|
||||
width_scale, height_scale, to_width, to_height, width_half_pad, height_half_pad
|
||||
scaling_arg = ("scale={0}x{1}:flags=lanczos,"
|
||||
"pad={2}:{3}:{4}:{5}:black,setsar=1").format(
|
||||
width_scale, height_scale, to_width, to_height,
|
||||
width_half_pad, height_half_pad
|
||||
)
|
||||
|
||||
vf_back = self.add_video_filter_args(
|
||||
|
|
|
|||
|
|
@ -111,13 +111,13 @@ class IntegrateMasterVersion(pyblish.api.InstancePlugin):
|
|||
|
||||
all_copied_files = []
|
||||
transfers = instance.data.get("transfers", list())
|
||||
for dst in transfers.values():
|
||||
for _src, dst in transfers:
|
||||
dst = os.path.normpath(dst)
|
||||
if dst not in all_copied_files:
|
||||
all_copied_files.append(dst)
|
||||
|
||||
hardlinks = instance.data.get("hardlinks", list())
|
||||
for dst in hardlinks.values():
|
||||
for _src, dst in hardlinks:
|
||||
dst = os.path.normpath(dst)
|
||||
if dst not in all_copied_files:
|
||||
all_copied_files.append(dst)
|
||||
|
|
|
|||
|
|
@ -64,6 +64,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"scene",
|
||||
"vrayproxy",
|
||||
"render",
|
||||
"prerender",
|
||||
"imagesequence",
|
||||
"review",
|
||||
"rendersetup",
|
||||
|
|
@ -387,6 +388,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
if not dst_start_frame:
|
||||
dst_start_frame = dst_padding
|
||||
|
||||
# Store used frame value to template data
|
||||
template_data["frame"] = dst_start_frame
|
||||
dst = "{0}{1}{2}".format(
|
||||
dst_head,
|
||||
dst_start_frame,
|
||||
|
|
|
|||
|
|
@ -135,7 +135,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
|
||||
hosts = ["fusion", "maya", "nuke"]
|
||||
|
||||
families = ["render.farm", "renderlayer", "imagesequence"]
|
||||
families = ["render.farm", "prerener", "renderlayer", "imagesequence"]
|
||||
|
||||
aov_filter = {"maya": ["beauty"]}
|
||||
|
||||
|
|
@ -161,8 +161,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
instance_transfer = {
|
||||
"slate": ["slateFrame"],
|
||||
"review": ["lutPath"],
|
||||
"render.farm": ["bakeScriptPath", "bakeRenderPath",
|
||||
"bakeWriteNodeName", "version"]
|
||||
"render2d": ["bakeScriptPath", "bakeRenderPath",
|
||||
"bakeWriteNodeName", "version"]
|
||||
}
|
||||
|
||||
# list of family names to transfer to new family if present
|
||||
|
|
@ -229,8 +229,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
|
||||
environment = job["Props"].get("Env", {})
|
||||
environment["PYPE_METADATA_FILE"] = metadata_path
|
||||
environment["AVALON_PROJECT"] = api.Session["AVALON_PROJECT"]
|
||||
|
||||
environment["AVALON_PROJECT"] = io.Session["AVALON_PROJECT"]
|
||||
i = 0
|
||||
for index, key in enumerate(environment):
|
||||
if key.upper() in self.enviro_filter:
|
||||
|
|
@ -454,6 +453,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"tags": ["review", "preview"] if preview else [],
|
||||
}
|
||||
|
||||
if instance.get("multipartExr", False):
|
||||
rep["tags"].append["multipartExr"]
|
||||
|
||||
representations.append(rep)
|
||||
|
||||
self._solve_families(instance, preview)
|
||||
|
|
@ -598,13 +600,26 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"pixelAspect": data.get("pixelAspect", 1),
|
||||
"resolutionWidth": data.get("resolutionWidth", 1920),
|
||||
"resolutionHeight": data.get("resolutionHeight", 1080),
|
||||
"multipartExr": data.get("multipartExr", False)
|
||||
}
|
||||
|
||||
if "prerender" in instance.data["families"]:
|
||||
instance_skeleton_data.update({
|
||||
"family": "prerender",
|
||||
"families": []})
|
||||
|
||||
# transfer specific families from original instance to new render
|
||||
for item in self.families_transfer:
|
||||
if item in instance.data.get("families", []):
|
||||
instance_skeleton_data["families"] += [item]
|
||||
|
||||
if "render.farm" in instance.data["families"]:
|
||||
instance_skeleton_data.update({
|
||||
"family": "render2d",
|
||||
"families": ["render"] + [f for f in instance.data["families"]
|
||||
if "render.farm" not in f]
|
||||
})
|
||||
|
||||
# transfer specific properties from original instance based on
|
||||
# mapping dictionary `instance_transfer`
|
||||
for key, values in self.instance_transfer.items():
|
||||
|
|
|
|||
|
|
@ -6,10 +6,11 @@ from collections import defaultdict
|
|||
|
||||
from maya import cmds
|
||||
|
||||
from avalon import api
|
||||
from avalon import api, io
|
||||
from avalon.maya import lib as avalon_lib, pipeline
|
||||
from pype.maya import lib
|
||||
from pypeapp import config
|
||||
from pprint import pprint
|
||||
|
||||
|
||||
class YetiCacheLoader(api.Loader):
|
||||
|
|
@ -101,12 +102,23 @@ class YetiCacheLoader(api.Loader):
|
|||
|
||||
def update(self, container, representation):
|
||||
|
||||
io.install()
|
||||
namespace = container["namespace"]
|
||||
container_node = container["objectName"]
|
||||
|
||||
fur_settings = io.find_one(
|
||||
{"parent": representation["parent"], "name": "fursettings"}
|
||||
)
|
||||
|
||||
pprint({"parent": representation["parent"], "name": "fursettings"})
|
||||
pprint(fur_settings)
|
||||
assert fur_settings is not None, (
|
||||
"cannot find fursettings representation"
|
||||
)
|
||||
|
||||
settings_fname = api.get_representation_path(fur_settings)
|
||||
path = api.get_representation_path(representation)
|
||||
# Get all node data
|
||||
fname, ext = os.path.splitext(path)
|
||||
settings_fname = "{}.fursettings".format(fname)
|
||||
with open(settings_fname, "r") as fp:
|
||||
settings = json.load(fp)
|
||||
|
||||
|
|
@ -147,13 +159,14 @@ class YetiCacheLoader(api.Loader):
|
|||
|
||||
cmds.delete(to_remove)
|
||||
|
||||
# replace frame in filename with %04d
|
||||
RE_frame = re.compile(r"(\d+)(\.fur)$")
|
||||
file_name = re.sub(RE_frame, r"%04d\g<2>", os.path.basename(path))
|
||||
for cb_id, data in meta_data_lookup.items():
|
||||
|
||||
# Update cache file name
|
||||
file_name = data["name"].replace(":", "_")
|
||||
cache_file_path = "{}.%04d.fur".format(file_name)
|
||||
data["attrs"]["cacheFileName"] = os.path.join(
|
||||
path, cache_file_path)
|
||||
os.path.dirname(path), file_name)
|
||||
|
||||
if cb_id not in scene_lookup:
|
||||
|
||||
|
|
@ -197,6 +210,12 @@ class YetiCacheLoader(api.Loader):
|
|||
yeti_node = yeti_nodes[0]
|
||||
|
||||
for attr, value in data["attrs"].items():
|
||||
# handle empty attribute strings. Those are reported
|
||||
# as None, so their type is NoneType and this is not
|
||||
# supported on attributes in Maya. We change it to
|
||||
# empty string.
|
||||
if value is None:
|
||||
value = ""
|
||||
lib.set_attribute(attr, value, yeti_node)
|
||||
|
||||
cmds.setAttr("{}.representation".format(container_node),
|
||||
|
|
|
|||
|
|
@ -53,38 +53,40 @@ from avalon import maya, api
|
|||
import pype.maya.lib as lib
|
||||
|
||||
|
||||
R_SINGLE_FRAME = re.compile(r'^(-?)\d+$')
|
||||
R_FRAME_RANGE = re.compile(r'^(?P<sf>(-?)\d+)-(?P<ef>(-?)\d+)$')
|
||||
R_FRAME_NUMBER = re.compile(r'.+\.(?P<frame>[0-9]+)\..+')
|
||||
R_SINGLE_FRAME = re.compile(r"^(-?)\d+$")
|
||||
R_FRAME_RANGE = re.compile(r"^(?P<sf>(-?)\d+)-(?P<ef>(-?)\d+)$")
|
||||
R_FRAME_NUMBER = re.compile(r".+\.(?P<frame>[0-9]+)\..+")
|
||||
R_LAYER_TOKEN = re.compile(
|
||||
r'.*%l.*|.*<layer>.*|.*<renderlayer>.*', re.IGNORECASE)
|
||||
R_AOV_TOKEN = re.compile(r'.*%a.*|.*<aov>.*|.*<renderpass>.*', re.IGNORECASE)
|
||||
R_SUBSTITUTE_AOV_TOKEN = re.compile(r'%a|<aov>|<renderpass>', re.IGNORECASE)
|
||||
R_REMOVE_AOV_TOKEN = re.compile(r'_%a|_<aov>|_<renderpass>', re.IGNORECASE)
|
||||
r".*%l.*|.*<layer>.*|.*<renderlayer>.*", re.IGNORECASE
|
||||
)
|
||||
R_AOV_TOKEN = re.compile(r".*%a.*|.*<aov>.*|.*<renderpass>.*", re.IGNORECASE)
|
||||
R_SUBSTITUTE_AOV_TOKEN = re.compile(r"%a|<aov>|<renderpass>", re.IGNORECASE)
|
||||
R_REMOVE_AOV_TOKEN = re.compile(r"_%a|_<aov>|_<renderpass>", re.IGNORECASE)
|
||||
# to remove unused renderman tokens
|
||||
R_CLEAN_FRAME_TOKEN = re.compile(r'\.?<f\d>\.?', re.IGNORECASE)
|
||||
R_CLEAN_EXT_TOKEN = re.compile(r'\.?<ext>\.?', re.IGNORECASE)
|
||||
R_CLEAN_FRAME_TOKEN = re.compile(r"\.?<f\d>\.?", re.IGNORECASE)
|
||||
R_CLEAN_EXT_TOKEN = re.compile(r"\.?<ext>\.?", re.IGNORECASE)
|
||||
|
||||
R_SUBSTITUTE_LAYER_TOKEN = re.compile(
|
||||
r'%l|<layer>|<renderlayer>', re.IGNORECASE)
|
||||
R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r'%c|<camera>', re.IGNORECASE)
|
||||
R_SUBSTITUTE_SCENE_TOKEN = re.compile(r'%s|<scene>', re.IGNORECASE)
|
||||
r"%l|<layer>|<renderlayer>", re.IGNORECASE
|
||||
)
|
||||
R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r"%c|<camera>", re.IGNORECASE)
|
||||
R_SUBSTITUTE_SCENE_TOKEN = re.compile(r"%s|<scene>", re.IGNORECASE)
|
||||
|
||||
RENDERER_NAMES = {
|
||||
'mentalray': 'MentalRay',
|
||||
'vray': 'V-Ray',
|
||||
'arnold': 'Arnold',
|
||||
'renderman': 'Renderman',
|
||||
'redshift': 'Redshift'
|
||||
"mentalray": "MentalRay",
|
||||
"vray": "V-Ray",
|
||||
"arnold": "Arnold",
|
||||
"renderman": "Renderman",
|
||||
"redshift": "Redshift",
|
||||
}
|
||||
|
||||
# not sure about the renderman image prefix
|
||||
ImagePrefixes = {
|
||||
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'vray': 'vraySettings.fileNamePrefix',
|
||||
'arnold': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'renderman': 'rmanGlobals.imageFileFormat',
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix'
|
||||
"mentalray": "defaultRenderGlobals.imageFilePrefix",
|
||||
"vray": "vraySettings.fileNamePrefix",
|
||||
"arnold": "defaultRenderGlobals.imageFilePrefix",
|
||||
"renderman": "rmanGlobals.imageFileFormat",
|
||||
"redshift": "defaultRenderGlobals.imageFilePrefix",
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -98,21 +100,23 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
def process(self, context):
|
||||
render_instance = None
|
||||
for instance in context:
|
||||
if 'rendering' in instance.data['families']:
|
||||
if "rendering" in instance.data["families"]:
|
||||
render_instance = instance
|
||||
render_instance.data["remove"] = True
|
||||
|
||||
# make sure workfile instance publishing is enabled
|
||||
if 'workfile' in instance.data['families']:
|
||||
if "workfile" in instance.data["families"]:
|
||||
instance.data["publish"] = True
|
||||
|
||||
if not render_instance:
|
||||
self.log.info("No render instance found, skipping render "
|
||||
"layer collection.")
|
||||
self.log.info(
|
||||
"No render instance found, skipping render "
|
||||
"layer collection."
|
||||
)
|
||||
return
|
||||
|
||||
render_globals = render_instance
|
||||
collected_render_layers = render_instance.data['setMembers']
|
||||
collected_render_layers = render_instance.data["setMembers"]
|
||||
filepath = context.data["currentFile"].replace("\\", "/")
|
||||
asset = api.Session["AVALON_ASSET"]
|
||||
workspace = context.data["workspaceDir"]
|
||||
|
|
@ -127,22 +131,24 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
try:
|
||||
expected_layer_name = re.search(r"^LAYER_(.*)", layer).group(1)
|
||||
except IndexError:
|
||||
msg = ("Invalid layer name in set [ {} ]".format(layer))
|
||||
msg = "Invalid layer name in set [ {} ]".format(layer)
|
||||
self.log.warnig(msg)
|
||||
continue
|
||||
|
||||
self.log.info("processing %s" % layer)
|
||||
# check if layer is part of renderSetup
|
||||
if expected_layer_name not in maya_render_layers:
|
||||
msg = ("Render layer [ {} ] is not in "
|
||||
"Render Setup".format(expected_layer_name))
|
||||
msg = "Render layer [ {} ] is not in " "Render Setup".format(
|
||||
expected_layer_name
|
||||
)
|
||||
self.log.warning(msg)
|
||||
continue
|
||||
|
||||
# check if layer is renderable
|
||||
if not maya_render_layers[expected_layer_name].isRenderable():
|
||||
msg = ("Render layer [ {} ] is not "
|
||||
"renderable".format(expected_layer_name))
|
||||
msg = "Render layer [ {} ] is not " "renderable".format(
|
||||
expected_layer_name
|
||||
)
|
||||
self.log.warning(msg)
|
||||
continue
|
||||
|
||||
|
|
@ -151,26 +157,31 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
attachTo = []
|
||||
if sets:
|
||||
for s in sets:
|
||||
attachTo.append({
|
||||
"version": None, # we need integrator to get version
|
||||
"subset": s,
|
||||
"family": cmds.getAttr("{}.family".format(s))
|
||||
})
|
||||
attachTo.append(
|
||||
{
|
||||
"version": None, # we need integrator for that
|
||||
"subset": s,
|
||||
"family": cmds.getAttr("{}.family".format(s)),
|
||||
}
|
||||
)
|
||||
self.log.info(" -> attach render to: {}".format(s))
|
||||
|
||||
layer_name = "rs_{}".format(expected_layer_name)
|
||||
|
||||
# collect all frames we are expecting to be rendered
|
||||
renderer = cmds.getAttr(
|
||||
'defaultRenderGlobals.currentRenderer').lower()
|
||||
"defaultRenderGlobals.currentRenderer"
|
||||
).lower()
|
||||
# handle various renderman names
|
||||
if renderer.startswith('renderman'):
|
||||
renderer = 'renderman'
|
||||
if renderer.startswith("renderman"):
|
||||
renderer = "renderman"
|
||||
|
||||
# return all expected files for all cameras and aovs in given
|
||||
# frame range
|
||||
exp_files = ExpectedFiles().get(renderer, layer_name)
|
||||
assert exp_files, ("no file names were generated, this is bug")
|
||||
exf = ExpectedFiles()
|
||||
exp_files = exf.get(renderer, layer_name)
|
||||
self.log.info("multipart: {}".format(exf.multipart))
|
||||
assert exp_files, "no file names were generated, this is bug"
|
||||
|
||||
# if we want to attach render to subset, check if we have AOV's
|
||||
# in expectedFiles. If so, raise error as we cannot attach AOV
|
||||
|
|
@ -178,7 +189,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
if attachTo:
|
||||
assert len(exp_files[0].keys()) == 1, (
|
||||
"attaching multiple AOVs or renderable cameras to "
|
||||
"subset is not supported")
|
||||
"subset is not supported"
|
||||
)
|
||||
|
||||
# append full path
|
||||
full_exp_files = []
|
||||
|
|
@ -233,6 +245,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
"subset": expected_layer_name,
|
||||
"attachTo": attachTo,
|
||||
"setMembers": layer_name,
|
||||
"multipartExr": exf.multipart,
|
||||
"publish": True,
|
||||
|
||||
"handleStart": handle_start,
|
||||
|
|
@ -252,14 +265,13 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
"asset": asset,
|
||||
"time": api.time(),
|
||||
"author": context.data["user"],
|
||||
|
||||
# Add source to allow tracing back to the scene from
|
||||
# which was submitted originally
|
||||
"source": filepath,
|
||||
"expectedFiles": full_exp_files,
|
||||
"resolutionWidth": cmds.getAttr("defaultResolution.width"),
|
||||
"resolutionHeight": cmds.getAttr("defaultResolution.height"),
|
||||
"pixelAspect": cmds.getAttr("defaultResolution.pixelAspect")
|
||||
"pixelAspect": cmds.getAttr("defaultResolution.pixelAspect"),
|
||||
}
|
||||
|
||||
# Apply each user defined attribute as data
|
||||
|
|
@ -282,8 +294,9 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
|
||||
# Define nice label
|
||||
label = "{0} ({1})".format(expected_layer_name, data["asset"])
|
||||
label += " [{0}-{1}]".format(int(data["frameStartHandle"]),
|
||||
int(data["frameEndHandle"]))
|
||||
label += " [{0}-{1}]".format(
|
||||
int(data["frameStartHandle"]), int(data["frameEndHandle"])
|
||||
)
|
||||
|
||||
instance = context.create_instance(expected_layer_name)
|
||||
instance.data["label"] = label
|
||||
|
|
@ -319,7 +332,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
machine_list = attributes["machineList"]
|
||||
if machine_list:
|
||||
key = "Whitelist" if attributes["whitelist"] else "Blacklist"
|
||||
options['renderGlobals'][key] = machine_list
|
||||
options["renderGlobals"][key] = machine_list
|
||||
|
||||
# Suspend publish job
|
||||
state = "Suspended" if attributes["suspendPublishJob"] else "Active"
|
||||
|
|
@ -375,32 +388,41 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
return rset.getOverrides()
|
||||
|
||||
def get_render_attribute(self, attr, layer):
|
||||
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
|
||||
layer=layer)
|
||||
return lib.get_attr_in_layer(
|
||||
"defaultRenderGlobals.{}".format(attr), layer=layer
|
||||
)
|
||||
|
||||
|
||||
class ExpectedFiles:
|
||||
multipart = False
|
||||
|
||||
def get(self, renderer, layer):
|
||||
if renderer.lower() == 'arnold':
|
||||
return ExpectedFilesArnold(layer).get_files()
|
||||
elif renderer.lower() == 'vray':
|
||||
return ExpectedFilesVray(layer).get_files()
|
||||
elif renderer.lower() == 'redshift':
|
||||
return ExpectedFilesRedshift(layer).get_files()
|
||||
elif renderer.lower() == 'mentalray':
|
||||
return ExpectedFilesMentalray(layer).get_files()
|
||||
elif renderer.lower() == 'renderman':
|
||||
return ExpectedFilesRenderman(layer).get_files()
|
||||
if renderer.lower() == "arnold":
|
||||
return self._get_files(ExpectedFilesArnold(layer))
|
||||
elif renderer.lower() == "vray":
|
||||
return self._get_files(ExpectedFilesVray(layer))
|
||||
elif renderer.lower() == "redshift":
|
||||
return self._get_files(ExpectedFilesRedshift(layer))
|
||||
elif renderer.lower() == "mentalray":
|
||||
return self._get_files(ExpectedFilesMentalray(layer))
|
||||
elif renderer.lower() == "renderman":
|
||||
return self._get_files(ExpectedFilesRenderman(layer))
|
||||
else:
|
||||
raise UnsupportedRendererException(
|
||||
"unsupported {}".format(renderer))
|
||||
"unsupported {}".format(renderer)
|
||||
)
|
||||
|
||||
def _get_files(self, renderer):
|
||||
files = renderer.get_files()
|
||||
self.multipart = renderer.multipart
|
||||
return files
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class AExpectedFiles:
|
||||
renderer = None
|
||||
layer = None
|
||||
multipart = False
|
||||
|
||||
def __init__(self, layer):
|
||||
self.layer = layer
|
||||
|
|
@ -414,7 +436,8 @@ class AExpectedFiles:
|
|||
file_prefix = cmds.getAttr(ImagePrefixes[self.renderer])
|
||||
except KeyError:
|
||||
raise UnsupportedRendererException(
|
||||
"Unsupported renderer {}".format(self.renderer))
|
||||
"Unsupported renderer {}".format(self.renderer)
|
||||
)
|
||||
return file_prefix
|
||||
|
||||
def _get_layer_data(self):
|
||||
|
|
@ -440,7 +463,7 @@ class AExpectedFiles:
|
|||
if not file_prefix:
|
||||
raise RuntimeError("Image prefix not set")
|
||||
|
||||
default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey')
|
||||
default_ext = cmds.getAttr("defaultRenderGlobals.imfPluginKey")
|
||||
|
||||
# ________________________________________________
|
||||
# __________________/ ______________________________________________/
|
||||
|
|
@ -461,10 +484,10 @@ class AExpectedFiles:
|
|||
layer_name = self.layer
|
||||
if self.layer.startswith("rs_"):
|
||||
layer_name = self.layer[3:]
|
||||
start_frame = int(self.get_render_attribute('startFrame'))
|
||||
end_frame = int(self.get_render_attribute('endFrame'))
|
||||
frame_step = int(self.get_render_attribute('byFrameStep'))
|
||||
padding = int(self.get_render_attribute('extensionPadding'))
|
||||
start_frame = int(self.get_render_attribute("startFrame"))
|
||||
end_frame = int(self.get_render_attribute("endFrame"))
|
||||
frame_step = int(self.get_render_attribute("byFrameStep"))
|
||||
padding = int(self.get_render_attribute("extensionPadding"))
|
||||
|
||||
scene_data = {
|
||||
"frameStart": start_frame,
|
||||
|
|
@ -477,7 +500,7 @@ class AExpectedFiles:
|
|||
"renderer": renderer,
|
||||
"defaultExt": default_ext,
|
||||
"filePrefix": file_prefix,
|
||||
"enabledAOVs": enabled_aovs
|
||||
"enabledAOVs": enabled_aovs,
|
||||
}
|
||||
return scene_data
|
||||
|
||||
|
|
@ -493,21 +516,24 @@ class AExpectedFiles:
|
|||
# in Redshift
|
||||
(R_REMOVE_AOV_TOKEN, ""),
|
||||
(R_CLEAN_FRAME_TOKEN, ""),
|
||||
(R_CLEAN_EXT_TOKEN, "")
|
||||
(R_CLEAN_EXT_TOKEN, ""),
|
||||
)
|
||||
|
||||
for regex, value in mappings:
|
||||
file_prefix = re.sub(regex, value, file_prefix)
|
||||
|
||||
for frame in range(
|
||||
int(layer_data["frameStart"]),
|
||||
int(layer_data["frameEnd"]) + 1,
|
||||
int(layer_data["frameStep"])):
|
||||
int(layer_data["frameStart"]),
|
||||
int(layer_data["frameEnd"]) + 1,
|
||||
int(layer_data["frameStep"]),
|
||||
):
|
||||
expected_files.append(
|
||||
'{}.{}.{}'.format(file_prefix,
|
||||
str(frame).rjust(
|
||||
layer_data["padding"], "0"),
|
||||
layer_data["defaultExt"]))
|
||||
"{}.{}.{}".format(
|
||||
file_prefix,
|
||||
str(frame).rjust(layer_data["padding"], "0"),
|
||||
layer_data["defaultExt"],
|
||||
)
|
||||
)
|
||||
return expected_files
|
||||
|
||||
def _generate_aov_file_sequences(self, layer_data):
|
||||
|
|
@ -523,7 +549,7 @@ class AExpectedFiles:
|
|||
(R_SUBSTITUTE_CAMERA_TOKEN, cam),
|
||||
(R_SUBSTITUTE_AOV_TOKEN, aov[0]),
|
||||
(R_CLEAN_FRAME_TOKEN, ""),
|
||||
(R_CLEAN_EXT_TOKEN, "")
|
||||
(R_CLEAN_EXT_TOKEN, ""),
|
||||
)
|
||||
|
||||
for regex, value in mappings:
|
||||
|
|
@ -531,14 +557,17 @@ class AExpectedFiles:
|
|||
|
||||
aov_files = []
|
||||
for frame in range(
|
||||
int(layer_data["frameStart"]),
|
||||
int(layer_data["frameEnd"]) + 1,
|
||||
int(layer_data["frameStep"])):
|
||||
int(layer_data["frameStart"]),
|
||||
int(layer_data["frameEnd"]) + 1,
|
||||
int(layer_data["frameStep"]),
|
||||
):
|
||||
aov_files.append(
|
||||
'{}.{}.{}'.format(
|
||||
"{}.{}.{}".format(
|
||||
file_prefix,
|
||||
str(frame).rjust(layer_data["padding"], "0"),
|
||||
aov[1]))
|
||||
aov[1],
|
||||
)
|
||||
)
|
||||
|
||||
# if we have more then one renderable camera, append
|
||||
# camera name to AOV to allow per camera AOVs.
|
||||
|
|
@ -572,17 +601,19 @@ class AExpectedFiles:
|
|||
return expected_files
|
||||
|
||||
def get_renderable_cameras(self):
|
||||
cam_parents = [cmds.listRelatives(x, ap=True)[-1]
|
||||
for x in cmds.ls(cameras=True)]
|
||||
cam_parents = [
|
||||
cmds.listRelatives(x, ap=True)[-1] for x in cmds.ls(cameras=True)
|
||||
]
|
||||
|
||||
renderable_cameras = []
|
||||
for cam in cam_parents:
|
||||
renderable = False
|
||||
if self.maya_is_true(cmds.getAttr('{}.renderable'.format(cam))):
|
||||
if self.maya_is_true(cmds.getAttr("{}.renderable".format(cam))):
|
||||
renderable = True
|
||||
|
||||
for override in self.get_layer_overrides(
|
||||
'{}.renderable'.format(cam), self.layer):
|
||||
"{}.renderable".format(cam), self.layer
|
||||
):
|
||||
renderable = self.maya_is_true(override)
|
||||
|
||||
if renderable:
|
||||
|
|
@ -608,16 +639,18 @@ class AExpectedFiles:
|
|||
if connections:
|
||||
for connection in connections:
|
||||
if connection:
|
||||
node_name = connection.split('.')[0]
|
||||
if cmds.nodeType(node_name) == 'renderLayer':
|
||||
attr_name = '%s.value' % '.'.join(
|
||||
connection.split('.')[:-1])
|
||||
node_name = connection.split(".")[0]
|
||||
if cmds.nodeType(node_name) == "renderLayer":
|
||||
attr_name = "%s.value" % ".".join(
|
||||
connection.split(".")[:-1]
|
||||
)
|
||||
if node_name == layer:
|
||||
yield cmds.getAttr(attr_name)
|
||||
|
||||
def get_render_attribute(self, attr):
|
||||
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
|
||||
layer=self.layer)
|
||||
return lib.get_attr_in_layer(
|
||||
"defaultRenderGlobals.{}".format(attr), layer=self.layer
|
||||
)
|
||||
|
||||
|
||||
class ExpectedFilesArnold(AExpectedFiles):
|
||||
|
|
@ -625,25 +658,28 @@ class ExpectedFilesArnold(AExpectedFiles):
|
|||
# Arnold AOV driver extension mapping
|
||||
# Is there a better way?
|
||||
aiDriverExtension = {
|
||||
'jpeg': 'jpg',
|
||||
'exr': 'exr',
|
||||
'deepexr': 'exr',
|
||||
'png': 'png',
|
||||
'tiff': 'tif',
|
||||
'mtoa_shaders': 'ass', # TODO: research what those last two should be
|
||||
'maya': ''
|
||||
"jpeg": "jpg",
|
||||
"exr": "exr",
|
||||
"deepexr": "exr",
|
||||
"png": "png",
|
||||
"tiff": "tif",
|
||||
"mtoa_shaders": "ass", # TODO: research what those last two should be
|
||||
"maya": "",
|
||||
}
|
||||
|
||||
def __init__(self, layer):
|
||||
super(ExpectedFilesArnold, self).__init__(layer)
|
||||
self.renderer = 'arnold'
|
||||
self.renderer = "arnold"
|
||||
|
||||
def get_aovs(self):
|
||||
enabled_aovs = []
|
||||
try:
|
||||
if not (cmds.getAttr('defaultArnoldRenderOptions.aovMode')
|
||||
and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')): # noqa: W503, E501
|
||||
if not (
|
||||
cmds.getAttr("defaultArnoldRenderOptions.aovMode")
|
||||
and not cmds.getAttr("defaultArnoldDriver.mergeAOVs") # noqa: W503, E501
|
||||
):
|
||||
# AOVs are merged in mutli-channel file
|
||||
self.multipart = True
|
||||
return enabled_aovs
|
||||
except ValueError:
|
||||
# this occurs when Render Setting windows was not opened yet. In
|
||||
|
|
@ -656,46 +692,35 @@ class ExpectedFilesArnold(AExpectedFiles):
|
|||
# AOVs are set to be rendered separately. We should expect
|
||||
# <RenderPass> token in path.
|
||||
|
||||
ai_aovs = [n for n in cmds.ls(type='aiAOV')]
|
||||
ai_aovs = [n for n in cmds.ls(type="aiAOV")]
|
||||
|
||||
for aov in ai_aovs:
|
||||
enabled = self.maya_is_true(
|
||||
cmds.getAttr('{}.enabled'.format(aov)))
|
||||
ai_driver = cmds.listConnections(
|
||||
'{}.outputs'.format(aov))[0]
|
||||
ai_translator = cmds.getAttr(
|
||||
'{}.aiTranslator'.format(ai_driver))
|
||||
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
|
||||
ai_driver = cmds.listConnections("{}.outputs".format(aov))[0]
|
||||
ai_translator = cmds.getAttr("{}.aiTranslator".format(ai_driver))
|
||||
try:
|
||||
aov_ext = self.aiDriverExtension[ai_translator]
|
||||
except KeyError:
|
||||
msg = ('Unrecognized arnold '
|
||||
'driver format for AOV - {}').format(
|
||||
cmds.getAttr('{}.name'.format(aov))
|
||||
)
|
||||
msg = (
|
||||
"Unrecognized arnold " "driver format for AOV - {}"
|
||||
).format(cmds.getAttr("{}.name".format(aov)))
|
||||
raise AOVError(msg)
|
||||
|
||||
for override in self.get_layer_overrides(
|
||||
'{}.enabled'.format(aov), self.layer):
|
||||
"{}.enabled".format(aov), self.layer
|
||||
):
|
||||
enabled = self.maya_is_true(override)
|
||||
if enabled:
|
||||
# If aov RGBA is selected, arnold will translate it to `beauty`
|
||||
aov_name = cmds.getAttr('%s.name' % aov)
|
||||
if aov_name == 'RGBA':
|
||||
aov_name = 'beauty'
|
||||
enabled_aovs.append(
|
||||
(
|
||||
aov_name,
|
||||
aov_ext
|
||||
)
|
||||
)
|
||||
aov_name = cmds.getAttr("%s.name" % aov)
|
||||
if aov_name == "RGBA":
|
||||
aov_name = "beauty"
|
||||
enabled_aovs.append((aov_name, aov_ext))
|
||||
# Append 'beauty' as this is arnolds
|
||||
# default. If <RenderPass> token is specified and no AOVs are
|
||||
# defined, this will be used.
|
||||
enabled_aovs.append(
|
||||
(
|
||||
u'beauty',
|
||||
cmds.getAttr('defaultRenderGlobals.imfPluginKey')
|
||||
)
|
||||
(u"beauty", cmds.getAttr("defaultRenderGlobals.imfPluginKey"))
|
||||
)
|
||||
return enabled_aovs
|
||||
|
||||
|
|
@ -709,7 +734,7 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
|
||||
def __init__(self, layer):
|
||||
super(ExpectedFilesVray, self).__init__(layer)
|
||||
self.renderer = 'vray'
|
||||
self.renderer = "vray"
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
prefix = super(ExpectedFilesVray, self).get_renderer_prefix()
|
||||
|
|
@ -724,7 +749,9 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
|
||||
layer_data = self._get_layer_data()
|
||||
if layer_data.get("enabledAOVs"):
|
||||
expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501
|
||||
expected_files[0][u"beauty"] = self._generate_single_file_sequence(
|
||||
layer_data
|
||||
) # noqa: E501
|
||||
|
||||
return expected_files
|
||||
|
||||
|
|
@ -733,9 +760,12 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
|
||||
try:
|
||||
# really? do we set it in vray just by selecting multichannel exr?
|
||||
if cmds.getAttr(
|
||||
"vraySettings.imageFormatStr") == "exr (multichannel)":
|
||||
if (
|
||||
cmds.getAttr("vraySettings.imageFormatStr")
|
||||
== "exr (multichannel)" # noqa: W503
|
||||
):
|
||||
# AOVs are merged in mutli-channel file
|
||||
self.multipart = True
|
||||
return enabled_aovs
|
||||
except ValueError:
|
||||
# this occurs when Render Setting windows was not opened yet. In
|
||||
|
|
@ -745,19 +775,23 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
# anyway.
|
||||
return enabled_aovs
|
||||
|
||||
default_ext = cmds.getAttr('vraySettings.imageFormatStr')
|
||||
default_ext = cmds.getAttr("vraySettings.imageFormatStr")
|
||||
if default_ext == "exr (multichannel)" or default_ext == "exr (deep)":
|
||||
default_ext = "exr"
|
||||
|
||||
vr_aovs = [n for n in cmds.ls(
|
||||
type=["VRayRenderElement", "VRayRenderElementSet"])]
|
||||
vr_aovs = [
|
||||
n
|
||||
for n in cmds.ls(
|
||||
type=["VRayRenderElement", "VRayRenderElementSet"]
|
||||
)
|
||||
]
|
||||
|
||||
# todo: find out how to detect multichannel exr for vray
|
||||
for aov in vr_aovs:
|
||||
enabled = self.maya_is_true(
|
||||
cmds.getAttr('{}.enabled'.format(aov)))
|
||||
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
|
||||
for override in self.get_layer_overrides(
|
||||
'{}.enabled'.format(aov), 'rs_{}'.format(self.layer)):
|
||||
"{}.enabled".format(aov), "rs_{}".format(self.layer)
|
||||
):
|
||||
enabled = self.maya_is_true(override)
|
||||
|
||||
if enabled:
|
||||
|
|
@ -769,8 +803,11 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
def _get_vray_aov_name(self, node):
|
||||
|
||||
# Get render element pass type
|
||||
vray_node_attr = next(attr for attr in cmds.listAttr(node)
|
||||
if attr.startswith("vray_name"))
|
||||
vray_node_attr = next(
|
||||
attr
|
||||
for attr in cmds.listAttr(node)
|
||||
if attr.startswith("vray_name")
|
||||
)
|
||||
pass_type = vray_node_attr.rsplit("_", 1)[-1]
|
||||
|
||||
# Support V-Ray extratex explicit name (if set by user)
|
||||
|
|
@ -788,11 +825,11 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
class ExpectedFilesRedshift(AExpectedFiles):
|
||||
|
||||
# mapping redshift extension dropdown values to strings
|
||||
ext_mapping = ['iff', 'exr', 'tif', 'png', 'tga', 'jpg']
|
||||
ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"]
|
||||
|
||||
def __init__(self, layer):
|
||||
super(ExpectedFilesRedshift, self).__init__(layer)
|
||||
self.renderer = 'redshift'
|
||||
self.renderer = "redshift"
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
prefix = super(ExpectedFilesRedshift, self).get_renderer_prefix()
|
||||
|
|
@ -807,7 +844,9 @@ class ExpectedFilesRedshift(AExpectedFiles):
|
|||
|
||||
layer_data = self._get_layer_data()
|
||||
if layer_data.get("enabledAOVs"):
|
||||
expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501
|
||||
expected_files[0][u"beauty"] = self._generate_single_file_sequence(
|
||||
layer_data
|
||||
) # noqa: E501
|
||||
|
||||
return expected_files
|
||||
|
||||
|
|
@ -816,8 +855,10 @@ class ExpectedFilesRedshift(AExpectedFiles):
|
|||
|
||||
try:
|
||||
if self.maya_is_true(
|
||||
cmds.getAttr("redshiftOptions.exrForceMultilayer")):
|
||||
cmds.getAttr("redshiftOptions.exrForceMultilayer")
|
||||
):
|
||||
# AOVs are merged in mutli-channel file
|
||||
self.multipart = True
|
||||
return enabled_aovs
|
||||
except ValueError:
|
||||
# this occurs when Render Setting windows was not opened yet. In
|
||||
|
|
@ -828,34 +869,30 @@ class ExpectedFilesRedshift(AExpectedFiles):
|
|||
return enabled_aovs
|
||||
|
||||
default_ext = self.ext_mapping[
|
||||
cmds.getAttr('redshiftOptions.imageFormat')
|
||||
cmds.getAttr("redshiftOptions.imageFormat")
|
||||
]
|
||||
rs_aovs = [n for n in cmds.ls(type='RedshiftAOV')]
|
||||
rs_aovs = [n for n in cmds.ls(type="RedshiftAOV")]
|
||||
|
||||
# todo: find out how to detect multichannel exr for redshift
|
||||
for aov in rs_aovs:
|
||||
enabled = self.maya_is_true(
|
||||
cmds.getAttr('{}.enabled'.format(aov)))
|
||||
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
|
||||
for override in self.get_layer_overrides(
|
||||
'{}.enabled'.format(aov), self.layer):
|
||||
"{}.enabled".format(aov), self.layer
|
||||
):
|
||||
enabled = self.maya_is_true(override)
|
||||
|
||||
if enabled:
|
||||
enabled_aovs.append(
|
||||
(
|
||||
cmds.getAttr('%s.name' % aov),
|
||||
default_ext
|
||||
)
|
||||
(cmds.getAttr("%s.name" % aov), default_ext)
|
||||
)
|
||||
|
||||
return enabled_aovs
|
||||
|
||||
|
||||
class ExpectedFilesRenderman(AExpectedFiles):
|
||||
|
||||
def __init__(self, layer):
|
||||
super(ExpectedFilesRenderman, self).__init__(layer)
|
||||
self.renderer = 'renderman'
|
||||
self.renderer = "renderman"
|
||||
|
||||
def get_aovs(self):
|
||||
enabled_aovs = []
|
||||
|
|
@ -867,19 +904,14 @@ class ExpectedFilesRenderman(AExpectedFiles):
|
|||
if aov_name == "rmanDefaultDisplay":
|
||||
aov_name = "beauty"
|
||||
|
||||
enabled = self.maya_is_true(
|
||||
cmds.getAttr("{}.enable".format(aov)))
|
||||
enabled = self.maya_is_true(cmds.getAttr("{}.enable".format(aov)))
|
||||
for override in self.get_layer_overrides(
|
||||
'{}.enable'.format(aov), self.layer):
|
||||
"{}.enable".format(aov), self.layer
|
||||
):
|
||||
enabled = self.maya_is_true(override)
|
||||
|
||||
if enabled:
|
||||
enabled_aovs.append(
|
||||
(
|
||||
aov_name,
|
||||
default_ext
|
||||
)
|
||||
)
|
||||
enabled_aovs.append((aov_name, default_ext))
|
||||
|
||||
return enabled_aovs
|
||||
|
||||
|
|
@ -899,9 +931,9 @@ class ExpectedFilesRenderman(AExpectedFiles):
|
|||
for aov, files in expected_files[0].items():
|
||||
new_files = []
|
||||
for file in files:
|
||||
new_file = "{}/{}/{}".format(layer_data["sceneName"],
|
||||
layer_data["layerName"],
|
||||
file)
|
||||
new_file = "{}/{}/{}".format(
|
||||
layer_data["sceneName"], layer_data["layerName"], file
|
||||
)
|
||||
new_files.append(new_file)
|
||||
new_aovs[aov] = new_files
|
||||
|
||||
|
|
@ -909,9 +941,8 @@ class ExpectedFilesRenderman(AExpectedFiles):
|
|||
|
||||
|
||||
class ExpectedFilesMentalray(AExpectedFiles):
|
||||
|
||||
def __init__(self, layer):
|
||||
raise UnimplementedRendererException('Mentalray not implemented')
|
||||
raise UnimplementedRendererException("Mentalray not implemented")
|
||||
|
||||
def get_aovs(self):
|
||||
return []
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
data['frameEndHandle'] = instance.data["frameEndHandle"]
|
||||
data["frameStart"] = instance.data["frameStart"]
|
||||
data["frameEnd"] = instance.data["frameEnd"]
|
||||
data['handles'] = instance.data['handles']
|
||||
data['handles'] = instance.data.get('handles', None)
|
||||
data['step'] = instance.data['step']
|
||||
data['fps'] = instance.data['fps']
|
||||
cmds.setAttr(str(instance) + '.active', 1)
|
||||
|
|
|
|||
|
|
@ -1,9 +1,7 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
import avalon.api
|
||||
import os
|
||||
from pype.maya import lib
|
||||
from pype.maya import cmds
|
||||
|
||||
|
||||
class CollectMayaScene(pyblish.api.ContextPlugin):
|
||||
|
|
@ -44,8 +42,8 @@ class CollectMayaScene(pyblish.api.ContextPlugin):
|
|||
})
|
||||
|
||||
data['representations'] = [{
|
||||
'name': 'ma',
|
||||
'ext': 'ma',
|
||||
'name': ext.lstrip("."),
|
||||
'ext': ext.lstrip("."),
|
||||
'files': file,
|
||||
"stagingDir": folder,
|
||||
}]
|
||||
|
|
|
|||
|
|
@ -49,6 +49,10 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
|
|||
attr_data = {}
|
||||
for attr in SETTINGS:
|
||||
current = cmds.getAttr("%s.%s" % (shape, attr))
|
||||
# change None to empty string as Maya doesn't support
|
||||
# NoneType in attributes
|
||||
if current is None:
|
||||
current = ""
|
||||
attr_data[attr] = current
|
||||
|
||||
# Get transform data
|
||||
|
|
|
|||
|
|
@ -3,24 +3,23 @@ import glob
|
|||
import contextlib
|
||||
import clique
|
||||
import capture
|
||||
#
|
||||
|
||||
import pype.maya.lib as lib
|
||||
import pype.api
|
||||
#
|
||||
from maya import cmds, mel
|
||||
|
||||
from maya import cmds
|
||||
import pymel.core as pm
|
||||
|
||||
|
||||
# TODO: move codec settings to presets
|
||||
class ExtractQuicktime(pype.api.Extractor):
|
||||
"""Extract Quicktime from viewport capture.
|
||||
class ExtractPlayblast(pype.api.Extractor):
|
||||
"""Extract viewport playblast.
|
||||
|
||||
Takes review camera and creates review Quicktime video based on viewport
|
||||
capture.
|
||||
|
||||
"""
|
||||
|
||||
label = "Quicktime"
|
||||
label = "Extract Playblast"
|
||||
hosts = ["maya"]
|
||||
families = ["review"]
|
||||
optional = True
|
||||
|
|
@ -29,7 +28,7 @@ class ExtractQuicktime(pype.api.Extractor):
|
|||
self.log.info("Extracting capture..")
|
||||
|
||||
# get scene fps
|
||||
fps = mel.eval('currentTimeUnitToFPS()')
|
||||
fps = instance.data.get("fps") or instance.context.data.get("fps")
|
||||
|
||||
# if start and end frames cannot be determined, get them
|
||||
# from Maya timeline
|
||||
|
|
@ -39,6 +38,7 @@ class ExtractQuicktime(pype.api.Extractor):
|
|||
start = cmds.playbackOptions(query=True, animationStartTime=True)
|
||||
if end is None:
|
||||
end = cmds.playbackOptions(query=True, animationEndTime=True)
|
||||
|
||||
self.log.info("start: {}, end: {}".format(start, end))
|
||||
|
||||
# get cameras
|
||||
|
|
@ -47,7 +47,7 @@ class ExtractQuicktime(pype.api.Extractor):
|
|||
|
||||
try:
|
||||
preset = lib.load_capture_preset(data=capture_preset)
|
||||
except:
|
||||
except Exception:
|
||||
preset = {}
|
||||
self.log.info('using viewport preset: {}'.format(preset))
|
||||
|
||||
|
|
@ -55,21 +55,12 @@ class ExtractQuicktime(pype.api.Extractor):
|
|||
preset['format'] = "image"
|
||||
# preset['compression'] = "qt"
|
||||
preset['quality'] = 95
|
||||
preset['compression'] = "jpg"
|
||||
preset['compression'] = "png"
|
||||
preset['start_frame'] = start
|
||||
preset['end_frame'] = end
|
||||
preset['camera_options'] = {
|
||||
"displayGateMask": False,
|
||||
"displayResolution": False,
|
||||
"displayFilmGate": False,
|
||||
"displayFieldChart": False,
|
||||
"displaySafeAction": False,
|
||||
"displaySafeTitle": False,
|
||||
"displayFilmPivot": False,
|
||||
"displayFilmOrigin": False,
|
||||
"overscan": 1.0,
|
||||
"depthOfField": cmds.getAttr("{0}.depthOfField".format(camera)),
|
||||
}
|
||||
camera_option = preset.get("camera_option", {})
|
||||
camera_option["depthOfField"] = cmds.getAttr(
|
||||
"{0}.depthOfField".format(camera))
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{0}".format(instance.name)
|
||||
|
|
@ -90,8 +81,8 @@ class ExtractQuicktime(pype.api.Extractor):
|
|||
filename = preset.get("filename", "%TEMP%")
|
||||
|
||||
# Force viewer to False in call to capture because we have our own
|
||||
# viewer opening call to allow a signal to trigger between playblast
|
||||
# and viewer
|
||||
# viewer opening call to allow a signal to trigger between
|
||||
# playblast and viewer
|
||||
preset['viewer'] = False
|
||||
|
||||
# Remove panel key since it's internal value to capture_gui
|
||||
|
|
@ -112,8 +103,8 @@ class ExtractQuicktime(pype.api.Extractor):
|
|||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'mov',
|
||||
'ext': 'mov',
|
||||
'name': 'png',
|
||||
'ext': 'png',
|
||||
'files': collected_frames,
|
||||
"stagingDir": stagingdir,
|
||||
"frameStart": start,
|
||||
|
|
@ -133,7 +124,6 @@ class ExtractQuicktime(pype.api.Extractor):
|
|||
|
||||
To workaround this we just glob.glob() for any file extensions and
|
||||
assume the latest modified file is the correct file and return it.
|
||||
|
||||
"""
|
||||
# Catch cancelled playblast
|
||||
if filepath is None:
|
||||
|
|
@ -164,7 +154,6 @@ class ExtractQuicktime(pype.api.Extractor):
|
|||
return filepath
|
||||
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_time():
|
||||
ct = cmds.currentTime(query=True)
|
||||
|
|
@ -38,7 +38,8 @@ class ValidateRigOutputIds(pyblish.api.InstancePlugin):
|
|||
if compute:
|
||||
out_set = next(x for x in instance if x.endswith("out_SET"))
|
||||
instance_nodes = pc.sets(out_set, query=True)
|
||||
instance_nodes.extend([x.getShape() for x in instance_nodes])
|
||||
instance_nodes.extend(
|
||||
[x.getShape() for x in instance_nodes if x.getShape()])
|
||||
|
||||
scene_nodes = pc.ls(type="transform") + pc.ls(type="mesh")
|
||||
scene_nodes = set(scene_nodes) - set(instance_nodes)
|
||||
|
|
|
|||
|
|
@ -1,103 +1,10 @@
|
|||
from collections import OrderedDict
|
||||
from pype.nuke import plugin
|
||||
from pype.nuke import (
|
||||
plugin,
|
||||
lib as pnlib)
|
||||
import nuke
|
||||
|
||||
|
||||
class CreateWriteRender(plugin.PypeCreator):
|
||||
# change this to template preset
|
||||
name = "WriteRender"
|
||||
label = "Create Write Render"
|
||||
hosts = ["nuke"]
|
||||
n_class = "write"
|
||||
family = "render"
|
||||
icon = "sign-out"
|
||||
defaults = ["Main", "Mask"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateWriteRender, self).__init__(*args, **kwargs)
|
||||
|
||||
data = OrderedDict()
|
||||
|
||||
data["family"] = self.family
|
||||
data["families"] = self.n_class
|
||||
|
||||
for k, v in self.data.items():
|
||||
if k not in data.keys():
|
||||
data.update({k: v})
|
||||
|
||||
self.data = data
|
||||
self.nodes = nuke.selectedNodes()
|
||||
self.log.debug("_ self.data: '{}'".format(self.data))
|
||||
|
||||
def process(self):
|
||||
from pype.nuke import lib as pnlib
|
||||
|
||||
inputs = []
|
||||
outputs = []
|
||||
instance = nuke.toNode(self.data["subset"])
|
||||
selected_node = None
|
||||
|
||||
# use selection
|
||||
if (self.options or {}).get("useSelection"):
|
||||
nodes = self.nodes
|
||||
|
||||
if not (len(nodes) < 2):
|
||||
msg = ("Select only one node. The node you want to connect to, "
|
||||
"or tick off `Use selection`")
|
||||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
selected_node = nodes[0]
|
||||
inputs = [selected_node]
|
||||
outputs = selected_node.dependent()
|
||||
|
||||
if instance:
|
||||
if (instance.name() in selected_node.name()):
|
||||
selected_node = instance.dependencies()[0]
|
||||
|
||||
# if node already exist
|
||||
if instance:
|
||||
# collect input / outputs
|
||||
inputs = instance.dependencies()
|
||||
outputs = instance.dependent()
|
||||
selected_node = inputs[0]
|
||||
# remove old one
|
||||
nuke.delete(instance)
|
||||
|
||||
# recreate new
|
||||
write_data = {
|
||||
"class": self.n_class,
|
||||
"families": [self.family],
|
||||
"avalon": self.data
|
||||
}
|
||||
|
||||
if self.presets.get('fpath_template'):
|
||||
self.log.info("Adding template path from preset")
|
||||
write_data.update(
|
||||
{"fpath_template": self.presets["fpath_template"]}
|
||||
)
|
||||
else:
|
||||
self.log.info("Adding template path from plugin")
|
||||
write_data.update({
|
||||
"fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}"})
|
||||
|
||||
write_node = pnlib.create_write_node(
|
||||
self.data["subset"],
|
||||
write_data,
|
||||
input=selected_node)
|
||||
|
||||
# relinking to collected connections
|
||||
for i, input in enumerate(inputs):
|
||||
write_node.setInput(i, input)
|
||||
|
||||
write_node.autoplace()
|
||||
|
||||
for output in outputs:
|
||||
output.setInput(0, write_node)
|
||||
|
||||
return write_node
|
||||
|
||||
|
||||
class CreateWritePrerender(plugin.PypeCreator):
|
||||
# change this to template preset
|
||||
name = "WritePrerender"
|
||||
|
|
@ -125,8 +32,6 @@ class CreateWritePrerender(plugin.PypeCreator):
|
|||
self.log.debug("_ self.data: '{}'".format(self.data))
|
||||
|
||||
def process(self):
|
||||
from pype.nuke import lib as pnlib
|
||||
|
||||
inputs = []
|
||||
outputs = []
|
||||
instance = nuke.toNode(self.data["subset"])
|
||||
|
|
@ -137,8 +42,9 @@ class CreateWritePrerender(plugin.PypeCreator):
|
|||
nodes = self.nodes
|
||||
|
||||
if not (len(nodes) < 2):
|
||||
msg = ("Select only one node. The node you want to connect to, "
|
||||
"or tick off `Use selection`")
|
||||
msg = ("Select only one node. The node "
|
||||
"you want to connect to, "
|
||||
"or tick off `Use selection`")
|
||||
self.log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
|
|
@ -174,13 +80,15 @@ class CreateWritePrerender(plugin.PypeCreator):
|
|||
else:
|
||||
self.log.info("Adding template path from plugin")
|
||||
write_data.update({
|
||||
"fpath_template": "{work}/prerenders/nuke/{subset}/{subset}.{frame}.{ext}"})
|
||||
"fpath_template": ("{work}/prerenders/nuke/{subset}"
|
||||
"/{subset}.{frame}.{ext}")})
|
||||
|
||||
write_node = pnlib.create_write_node(
|
||||
self.data["subset"],
|
||||
write_data,
|
||||
input=selected_node,
|
||||
prenodes=[])
|
||||
prenodes=[],
|
||||
review=False)
|
||||
|
||||
# relinking to collected connections
|
||||
for i, input in enumerate(inputs):
|
||||
101
pype/plugins/nuke/create/create_write_render.py
Normal file
101
pype/plugins/nuke/create/create_write_render.py
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
from collections import OrderedDict
|
||||
from pype.nuke import (
|
||||
plugin,
|
||||
lib as pnlib)
|
||||
import nuke
|
||||
|
||||
|
||||
class CreateWriteRender(plugin.PypeCreator):
|
||||
# change this to template preset
|
||||
name = "WriteRender"
|
||||
label = "Create Write Render"
|
||||
hosts = ["nuke"]
|
||||
n_class = "write"
|
||||
family = "render"
|
||||
icon = "sign-out"
|
||||
defaults = ["Main", "Mask"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateWriteRender, self).__init__(*args, **kwargs)
|
||||
|
||||
data = OrderedDict()
|
||||
|
||||
data["family"] = self.family
|
||||
data["families"] = self.n_class
|
||||
|
||||
for k, v in self.data.items():
|
||||
if k not in data.keys():
|
||||
data.update({k: v})
|
||||
|
||||
self.data = data
|
||||
self.nodes = nuke.selectedNodes()
|
||||
self.log.debug("_ self.data: '{}'".format(self.data))
|
||||
|
||||
def process(self):
|
||||
|
||||
inputs = []
|
||||
outputs = []
|
||||
instance = nuke.toNode(self.data["subset"])
|
||||
selected_node = None
|
||||
|
||||
# use selection
|
||||
if (self.options or {}).get("useSelection"):
|
||||
nodes = self.nodes
|
||||
|
||||
if not (len(nodes) < 2):
|
||||
msg = ("Select only one node. "
|
||||
"The node you want to connect to, "
|
||||
"or tick off `Use selection`")
|
||||
self.log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
selected_node = nodes[0]
|
||||
inputs = [selected_node]
|
||||
outputs = selected_node.dependent()
|
||||
|
||||
if instance:
|
||||
if (instance.name() in selected_node.name()):
|
||||
selected_node = instance.dependencies()[0]
|
||||
|
||||
# if node already exist
|
||||
if instance:
|
||||
# collect input / outputs
|
||||
inputs = instance.dependencies()
|
||||
outputs = instance.dependent()
|
||||
selected_node = inputs[0]
|
||||
# remove old one
|
||||
nuke.delete(instance)
|
||||
|
||||
# recreate new
|
||||
write_data = {
|
||||
"class": self.n_class,
|
||||
"families": [self.family],
|
||||
"avalon": self.data
|
||||
}
|
||||
|
||||
if self.presets.get('fpath_template'):
|
||||
self.log.info("Adding template path from preset")
|
||||
write_data.update(
|
||||
{"fpath_template": self.presets["fpath_template"]}
|
||||
)
|
||||
else:
|
||||
self.log.info("Adding template path from plugin")
|
||||
write_data.update({
|
||||
"fpath_template": ("{work}/renders/nuke/{subset}"
|
||||
"/{subset}.{frame}.{ext}")})
|
||||
|
||||
write_node = pnlib.create_write_node(
|
||||
self.data["subset"],
|
||||
write_data,
|
||||
input=selected_node)
|
||||
|
||||
# relinking to collected connections
|
||||
for i, input in enumerate(inputs):
|
||||
write_node.setInput(i, input)
|
||||
|
||||
write_node.autoplace()
|
||||
|
||||
for output in outputs:
|
||||
output.setInput(0, write_node)
|
||||
|
||||
return write_node
|
||||
|
|
@ -92,6 +92,7 @@ class LoadMov(api.Loader):
|
|||
"source",
|
||||
"plate",
|
||||
"render",
|
||||
"prerender",
|
||||
"review"] + presets["families"]
|
||||
|
||||
representations = [
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ def loader_shift(node, frame, relative=True):
|
|||
class LoadSequence(api.Loader):
|
||||
"""Load image sequence into Nuke"""
|
||||
|
||||
families = ["render2d", "source", "plate", "render"]
|
||||
families = ["render2d", "source", "plate", "render", "prerender"]
|
||||
representations = ["exr", "dpx", "jpg", "jpeg", "png"]
|
||||
|
||||
label = "Load sequence"
|
||||
|
|
@ -87,7 +87,7 @@ class LoadSequence(api.Loader):
|
|||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
repr_id = context["representation"]["_id"]
|
||||
|
||||
|
||||
self.log.info("version_data: {}\n".format(version_data))
|
||||
self.log.debug(
|
||||
"Representation id `{}` ".format(repr_id))
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
# establish families
|
||||
family = avalon_knob_data["family"]
|
||||
families_ak = avalon_knob_data.get("families")
|
||||
families = list()
|
||||
|
||||
# except disabled nodes but exclude backdrops in test
|
||||
|
|
@ -68,16 +69,14 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
# Add all nodes in group instances.
|
||||
if node.Class() == "Group":
|
||||
# only alter families for render family
|
||||
if ("render" in family):
|
||||
# check if node is not disabled
|
||||
families.append(avalon_knob_data["families"])
|
||||
if "write" in families_ak:
|
||||
if node["render"].value():
|
||||
self.log.info("flagged for render")
|
||||
add_family = "render.local"
|
||||
add_family = "{}.local".format(family)
|
||||
# dealing with local/farm rendering
|
||||
if node["render_farm"].value():
|
||||
self.log.info("adding render farm family")
|
||||
add_family = "render.farm"
|
||||
add_family = "{}.farm".format(family)
|
||||
instance.data["transfer"] = False
|
||||
families.append(add_family)
|
||||
else:
|
||||
|
|
@ -89,9 +88,7 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
instance.append(i)
|
||||
node.end()
|
||||
|
||||
family = avalon_knob_data["family"]
|
||||
families = list()
|
||||
families_ak = avalon_knob_data.get("families")
|
||||
self.log.debug("__ families: `{}`".format(families))
|
||||
|
||||
if families_ak:
|
||||
families.append(families_ak)
|
||||
|
|
@ -104,22 +101,6 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
resolution_height = format.height()
|
||||
pixel_aspect = format.pixelAspect()
|
||||
|
||||
if node.Class() not in "Read":
|
||||
if "render" not in node.knobs().keys():
|
||||
pass
|
||||
elif node["render"].value():
|
||||
self.log.info("flagged for render")
|
||||
add_family = "render.local"
|
||||
# dealing with local/farm rendering
|
||||
if node["render_farm"].value():
|
||||
self.log.info("adding render farm family")
|
||||
add_family = "render.farm"
|
||||
instance.data["transfer"] = False
|
||||
families.append(add_family)
|
||||
else:
|
||||
# add family into families
|
||||
families.insert(0, family)
|
||||
|
||||
instance.data.update({
|
||||
"subset": subset,
|
||||
"asset": os.environ["AVALON_ASSET"],
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ class CollectSlate(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.CollectorOrder + 0.09
|
||||
label = "Collect Slate Node"
|
||||
hosts = ["nuke"]
|
||||
families = ["write"]
|
||||
families = ["render", "render.local", "render.farm"]
|
||||
|
||||
def process(self, instance):
|
||||
node = instance[0]
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
import os
|
||||
import nuke
|
||||
import pyblish.api
|
||||
import pype.api as pype
|
||||
|
||||
|
||||
@pyblish.api.log
|
||||
|
|
@ -13,9 +12,11 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
hosts = ["nuke", "nukeassist"]
|
||||
families = ["write"]
|
||||
|
||||
# preset attributes
|
||||
sync_workfile_version = True
|
||||
|
||||
def process(self, instance):
|
||||
# adding 2d focused rendering
|
||||
instance.data["families"].append("render2d")
|
||||
families = instance.data["families"]
|
||||
|
||||
node = None
|
||||
for x in instance:
|
||||
|
|
@ -53,10 +54,13 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
output_dir = os.path.dirname(path)
|
||||
self.log.debug('output dir: {}'.format(output_dir))
|
||||
|
||||
# get version to instance for integration
|
||||
instance.data['version'] = instance.context.data["version"]
|
||||
if not next((f for f in families
|
||||
if "prerender" in f),
|
||||
None) and self.sync_workfile_version:
|
||||
# get version to instance for integration
|
||||
instance.data['version'] = instance.context.data["version"]
|
||||
|
||||
self.log.debug('Write Version: %s' % instance.data('version'))
|
||||
self.log.debug('Write Version: %s' % instance.data('version'))
|
||||
|
||||
# create label
|
||||
name = node.name()
|
||||
|
|
@ -67,7 +71,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
int(last_frame)
|
||||
)
|
||||
|
||||
if 'render' in instance.data['families']:
|
||||
if [fm for fm in families
|
||||
if fm in ["render", "prerender"]]:
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = list()
|
||||
|
||||
|
|
@ -95,7 +100,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
# this will only run if slate frame is not already
|
||||
# rendered from previews publishes
|
||||
if "slate" in instance.data["families"] \
|
||||
and (frame_length == collected_frames_len):
|
||||
and (frame_length == collected_frames_len) \
|
||||
and ("prerender" not in instance.data["families"]):
|
||||
frame_slate_str = "%0{}d".format(
|
||||
len(str(last_frame))) % (first_frame - 1)
|
||||
slate_frame = collected_frames[0].replace(
|
||||
|
|
@ -124,6 +130,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
deadlinePriority = group_node["deadlinePriority"].value()
|
||||
|
||||
families = [f for f in instance.data["families"] if "write" not in f]
|
||||
|
||||
instance.data.update({
|
||||
"versionData": version_data,
|
||||
"path": path,
|
||||
|
|
@ -144,4 +151,12 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
"deadlinePriority": deadlinePriority
|
||||
})
|
||||
|
||||
if "prerender" in families:
|
||||
instance.data.update({
|
||||
"family": "prerender",
|
||||
"families": []
|
||||
})
|
||||
|
||||
self.log.debug("families: {}".format(families))
|
||||
|
||||
self.log.debug("instance.data: {}".format(instance.data))
|
||||
|
|
|
|||
|
|
@ -17,9 +17,11 @@ class NukeRenderLocal(pype.api.Extractor):
|
|||
order = pyblish.api.ExtractorOrder
|
||||
label = "Render Local"
|
||||
hosts = ["nuke"]
|
||||
families = ["render.local"]
|
||||
families = ["render.local", "prerender.local"]
|
||||
|
||||
def process(self, instance):
|
||||
families = instance.data["families"]
|
||||
|
||||
node = None
|
||||
for x in instance:
|
||||
if x.Class() == "Write":
|
||||
|
|
@ -30,7 +32,7 @@ class NukeRenderLocal(pype.api.Extractor):
|
|||
first_frame = instance.data.get("frameStartHandle", None)
|
||||
|
||||
# exception for slate workflow
|
||||
if "slate" in instance.data["families"]:
|
||||
if "slate" in families:
|
||||
first_frame -= 1
|
||||
|
||||
last_frame = instance.data.get("frameEndHandle", None)
|
||||
|
|
@ -53,7 +55,7 @@ class NukeRenderLocal(pype.api.Extractor):
|
|||
)
|
||||
|
||||
# exception for slate workflow
|
||||
if "slate" in instance.data["families"]:
|
||||
if "slate" in families:
|
||||
first_frame += 1
|
||||
|
||||
path = node['file'].value()
|
||||
|
|
@ -79,8 +81,16 @@ class NukeRenderLocal(pype.api.Extractor):
|
|||
out_dir
|
||||
))
|
||||
|
||||
instance.data['family'] = 'render'
|
||||
instance.data['families'].append('render')
|
||||
# redefinition of families
|
||||
if "render.local" in families:
|
||||
instance.data['family'] = 'render2d'
|
||||
families.remove('render.local')
|
||||
families.insert(0, "render")
|
||||
elif "prerender.local" in families:
|
||||
instance.data['family'] = 'prerender'
|
||||
families.remove('prerender.local')
|
||||
families.insert(0, "prerender")
|
||||
instance.data["families"] = families
|
||||
|
||||
collections, remainder = clique.assemble(collected_frames)
|
||||
self.log.info('collections: {}'.format(str(collections)))
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ import pyblish.api
|
|||
from avalon.nuke import lib as anlib
|
||||
from pype.nuke import lib as pnlib
|
||||
import pype
|
||||
reload(pnlib)
|
||||
|
||||
|
||||
class ExtractReviewDataMov(pype.api.Extractor):
|
||||
"""Extracts movie and thumbnail with baked in luts
|
||||
|
|
@ -15,7 +15,7 @@ class ExtractReviewDataMov(pype.api.Extractor):
|
|||
order = pyblish.api.ExtractorOrder + 0.01
|
||||
label = "Extract Review Data Mov"
|
||||
|
||||
families = ["review", "render", "render.local"]
|
||||
families = ["review"]
|
||||
hosts = ["nuke"]
|
||||
|
||||
# presets
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ class IncrementScriptVersion(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.IntegratorOrder + 0.9
|
||||
label = "Increment Script Version"
|
||||
optional = True
|
||||
families = ["workfile", "render", "render.local", "render.farm"]
|
||||
hosts = ['nuke']
|
||||
|
||||
def process(self, context):
|
||||
|
|
@ -16,19 +17,7 @@ class IncrementScriptVersion(pyblish.api.ContextPlugin):
|
|||
assert all(result["success"] for result in context.data["results"]), (
|
||||
"Publishing not succesfull so version is not increased.")
|
||||
|
||||
instances = context[:]
|
||||
|
||||
prerender_check = list()
|
||||
families_check = list()
|
||||
for instance in instances:
|
||||
if ("prerender" in str(instance)) and instance.data.get("families", None):
|
||||
prerender_check.append(instance)
|
||||
if instance.data.get("families", None):
|
||||
families_check.append(True)
|
||||
|
||||
|
||||
if len(prerender_check) != len(families_check):
|
||||
from pype.lib import version_up
|
||||
path = context.data["currentFile"]
|
||||
nuke.scriptSaveAs(version_up(path))
|
||||
self.log.info('Incrementing script version')
|
||||
from pype.lib import version_up
|
||||
path = context.data["currentFile"]
|
||||
nuke.scriptSaveAs(version_up(path))
|
||||
self.log.info('Incrementing script version')
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
label = "Submit to Deadline"
|
||||
order = pyblish.api.IntegratorOrder + 0.1
|
||||
hosts = ["nuke", "nukestudio"]
|
||||
families = ["render.farm"]
|
||||
families = ["render.farm", "prerender.farm"]
|
||||
optional = True
|
||||
|
||||
deadline_priority = 50
|
||||
|
|
@ -28,6 +28,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
deadline_chunk_size = 1
|
||||
|
||||
def process(self, instance):
|
||||
families = instance.data["families"]
|
||||
|
||||
node = instance[0]
|
||||
context = instance.context
|
||||
|
|
@ -82,6 +83,15 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
instance.data["deadlineSubmissionJob"] = resp.json()
|
||||
instance.data["publishJobState"] = "Suspended"
|
||||
|
||||
# redefinition of families
|
||||
if "render.farm" in families:
|
||||
instance.data['family'] = 'write'
|
||||
families.insert(0, "render2d")
|
||||
elif "prerender.farm" in families:
|
||||
instance.data['family'] = 'write'
|
||||
families.insert(0, "prerender")
|
||||
instance.data["families"] = families
|
||||
|
||||
def payload_submit(self,
|
||||
instance,
|
||||
script_path,
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
|
|||
""" Validates file output. """
|
||||
|
||||
order = pyblish.api.ValidatorOrder + 0.1
|
||||
families = ["render"]
|
||||
families = ["render", "prerender"]
|
||||
|
||||
label = "Validate rendered frame"
|
||||
hosts = ["nuke", "nukestudio"]
|
||||
|
|
|
|||
1
pype/version.py
Normal file
1
pype/version.py
Normal file
|
|
@ -0,0 +1 @@
|
|||
__version__ = "2.7.0"
|
||||
Loading…
Add table
Add a link
Reference in a new issue