mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Revert "Merge remote-tracking branch 'origin/2.x/develop' into develop"
This reverts commit b38585243c.
This commit is contained in:
parent
b38585243c
commit
be9c4d4841
9 changed files with 49 additions and 363 deletions
|
|
@ -32,9 +32,6 @@ Attributes:
|
|||
ImagePrefixes (dict): Mapping between renderers and their respective
|
||||
image prefix atrribute names.
|
||||
|
||||
Todo:
|
||||
Determine `multipart` from render instance.
|
||||
|
||||
"""
|
||||
|
||||
import types
|
||||
|
|
@ -97,10 +94,6 @@ class ExpectedFiles:
|
|||
|
||||
multipart = False
|
||||
|
||||
def __init__(self, render_instance):
|
||||
"""Constructor."""
|
||||
self._render_instance = render_instance
|
||||
|
||||
def get(self, renderer, layer):
|
||||
"""Get expected files for given renderer and render layer.
|
||||
|
||||
|
|
@ -121,20 +114,15 @@ class ExpectedFiles:
|
|||
renderSetup.instance().switchToLayerUsingLegacyName(layer)
|
||||
|
||||
if renderer.lower() == "arnold":
|
||||
return self._get_files(ExpectedFilesArnold(layer,
|
||||
self._render_instance))
|
||||
return self._get_files(ExpectedFilesArnold(layer))
|
||||
elif renderer.lower() == "vray":
|
||||
return self._get_files(ExpectedFilesVray(
|
||||
layer, self._render_instance))
|
||||
return self._get_files(ExpectedFilesVray(layer))
|
||||
elif renderer.lower() == "redshift":
|
||||
return self._get_files(ExpectedFilesRedshift(
|
||||
layer, self._render_instance))
|
||||
return self._get_files(ExpectedFilesRedshift(layer))
|
||||
elif renderer.lower() == "mentalray":
|
||||
return self._get_files(ExpectedFilesMentalray(
|
||||
layer, self._render_instance))
|
||||
return self._get_files(ExpectedFilesMentalray(layer))
|
||||
elif renderer.lower() == "renderman":
|
||||
return self._get_files(ExpectedFilesRenderman(
|
||||
layer, self._render_instance))
|
||||
return self._get_files(ExpectedFilesRenderman(layer))
|
||||
else:
|
||||
raise UnsupportedRendererException(
|
||||
"unsupported {}".format(renderer)
|
||||
|
|
@ -161,10 +149,9 @@ class AExpectedFiles:
|
|||
layer = None
|
||||
multipart = False
|
||||
|
||||
def __init__(self, layer, render_instance):
|
||||
def __init__(self, layer):
|
||||
"""Constructor."""
|
||||
self.layer = layer
|
||||
self.render_instance = render_instance
|
||||
|
||||
@abstractmethod
|
||||
def get_aovs(self):
|
||||
|
|
@ -473,9 +460,9 @@ class ExpectedFilesArnold(AExpectedFiles):
|
|||
"maya": "",
|
||||
}
|
||||
|
||||
def __init__(self, layer, render_instance):
|
||||
def __init__(self, layer):
|
||||
"""Constructor."""
|
||||
super(ExpectedFilesArnold, self).__init__(layer, render_instance)
|
||||
super(ExpectedFilesArnold, self).__init__(layer)
|
||||
self.renderer = "arnold"
|
||||
|
||||
def get_aovs(self):
|
||||
|
|
@ -544,9 +531,9 @@ class ExpectedFilesArnold(AExpectedFiles):
|
|||
class ExpectedFilesVray(AExpectedFiles):
|
||||
"""Expected files for V-Ray renderer."""
|
||||
|
||||
def __init__(self, layer, render_instance):
|
||||
def __init__(self, layer):
|
||||
"""Constructor."""
|
||||
super(ExpectedFilesVray, self).__init__(layer, render_instance)
|
||||
super(ExpectedFilesVray, self).__init__(layer)
|
||||
self.renderer = "vray"
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
|
|
@ -627,25 +614,24 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
if default_ext == "exr (multichannel)" or default_ext == "exr (deep)":
|
||||
default_ext = "exr"
|
||||
|
||||
# add beauty as default
|
||||
enabled_aovs.append(
|
||||
(u"beauty", default_ext)
|
||||
)
|
||||
|
||||
# handle aovs from references
|
||||
use_ref_aovs = self.render_instance.data.get(
|
||||
"vrayUseReferencedAovs", False) or False
|
||||
if not self.maya_is_true(
|
||||
cmds.getAttr("vraySettings.relements_enableall")
|
||||
):
|
||||
return enabled_aovs
|
||||
|
||||
# this will have list of all aovs no matter if they are coming from
|
||||
# reference or not.
|
||||
vr_aovs = cmds.ls(
|
||||
type=["VRayRenderElement", "VRayRenderElementSet"]) or []
|
||||
if not use_ref_aovs:
|
||||
ref_aovs = cmds.ls(
|
||||
type=["VRayRenderElement", "VRayRenderElementSet"],
|
||||
referencedNodes=True) or []
|
||||
# get difference
|
||||
vr_aovs = list(set(vr_aovs) - set(ref_aovs))
|
||||
# filter all namespace prefixed AOVs - they are pulled in from
|
||||
# references and are not rendered.
|
||||
vr_aovs = [
|
||||
n
|
||||
for n in cmds.ls(
|
||||
type=["VRayRenderElement", "VRayRenderElementSet"]
|
||||
)
|
||||
if len(n.split(":")) == 1
|
||||
]
|
||||
|
||||
for aov in vr_aovs:
|
||||
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
|
||||
|
|
@ -717,9 +703,9 @@ class ExpectedFilesRedshift(AExpectedFiles):
|
|||
|
||||
ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"]
|
||||
|
||||
def __init__(self, layer, render_instance):
|
||||
def __init__(self, layer):
|
||||
"""Construtor."""
|
||||
super(ExpectedFilesRedshift, self).__init__(layer, render_instance)
|
||||
super(ExpectedFilesRedshift, self).__init__(layer)
|
||||
self.renderer = "redshift"
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
|
|
@ -836,9 +822,9 @@ class ExpectedFilesRenderman(AExpectedFiles):
|
|||
This is very rudimentary and needs more love and testing.
|
||||
"""
|
||||
|
||||
def __init__(self, layer, render_instance):
|
||||
def __init__(self, layer):
|
||||
"""Constructor."""
|
||||
super(ExpectedFilesRenderman, self).__init__(layer, render_instance)
|
||||
super(ExpectedFilesRenderman, self).__init__(layer)
|
||||
self.renderer = "renderman"
|
||||
|
||||
def get_aovs(self):
|
||||
|
|
@ -901,7 +887,7 @@ class ExpectedFilesRenderman(AExpectedFiles):
|
|||
class ExpectedFilesMentalray(AExpectedFiles):
|
||||
"""Skeleton unimplemented class for Mentalray renderer."""
|
||||
|
||||
def __init__(self, layer, render_instance):
|
||||
def __init__(self, layer):
|
||||
"""Constructor.
|
||||
|
||||
Raises:
|
||||
|
|
|
|||
|
|
@ -56,11 +56,7 @@ from .plugin_tools import (
|
|||
filter_pyblish_plugins,
|
||||
source_hash,
|
||||
get_unique_layer_name,
|
||||
get_background_layers,
|
||||
oiio_supported,
|
||||
decompress,
|
||||
get_decompress_dir,
|
||||
should_decompress
|
||||
get_background_layers
|
||||
)
|
||||
|
||||
from .user_settings import (
|
||||
|
|
@ -112,10 +108,6 @@ __all__ = [
|
|||
"source_hash",
|
||||
"get_unique_layer_name",
|
||||
"get_background_layers",
|
||||
"oiio_supported",
|
||||
"decompress",
|
||||
"get_decompress_dir",
|
||||
"should_decompress",
|
||||
|
||||
"version_up",
|
||||
"get_version_from_path",
|
||||
|
|
|
|||
|
|
@ -5,8 +5,6 @@ import inspect
|
|||
import logging
|
||||
import re
|
||||
import json
|
||||
import pype.api
|
||||
import tempfile
|
||||
|
||||
from pype.settings import get_project_settings
|
||||
|
||||
|
|
@ -136,115 +134,3 @@ def get_background_layers(file_url):
|
|||
layer.get("filename")).
|
||||
replace("\\", "/"))
|
||||
return layers
|
||||
|
||||
|
||||
def oiio_supported():
|
||||
"""
|
||||
Checks if oiiotool is configured for this platform.
|
||||
|
||||
Expects full path to executable.
|
||||
|
||||
'should_decompress' will throw exception if configured,
|
||||
but not present or not working.
|
||||
Returns:
|
||||
(bool)
|
||||
"""
|
||||
oiio_path = os.getenv("PYPE_OIIO_PATH", "")
|
||||
if not oiio_path or not os.path.exists(oiio_path):
|
||||
log.debug("OIIOTool is not configured or not present at {}".
|
||||
format(oiio_path))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def decompress(target_dir, file_url,
|
||||
input_frame_start=None, input_frame_end=None, log=None):
|
||||
"""
|
||||
Decompresses DWAA 'file_url' .exr to 'target_dir'.
|
||||
|
||||
Creates uncompressed files in 'target_dir', they need to be cleaned.
|
||||
|
||||
File url could be for single file or for a sequence, in that case
|
||||
%0Xd will be as a placeholder for frame number AND input_frame* will
|
||||
be filled.
|
||||
In that case single oiio command with '--frames' will be triggered for
|
||||
all frames, this should be faster then looping and running sequentially
|
||||
|
||||
Args:
|
||||
target_dir (str): extended from stagingDir
|
||||
file_url (str): full urls to source file (with or without %0Xd)
|
||||
input_frame_start (int) (optional): first frame
|
||||
input_frame_end (int) (optional): last frame
|
||||
log (Logger) (optional): pype logger
|
||||
"""
|
||||
is_sequence = input_frame_start is not None and \
|
||||
input_frame_end is not None and \
|
||||
(int(input_frame_end) > int(input_frame_start))
|
||||
|
||||
oiio_cmd = []
|
||||
oiio_cmd.append(os.getenv("PYPE_OIIO_PATH"))
|
||||
|
||||
oiio_cmd.append("--compression none")
|
||||
|
||||
base_file_name = os.path.basename(file_url)
|
||||
oiio_cmd.append(file_url)
|
||||
|
||||
if is_sequence:
|
||||
oiio_cmd.append("--frames {}-{}".format(input_frame_start,
|
||||
input_frame_end))
|
||||
|
||||
oiio_cmd.append("-o")
|
||||
oiio_cmd.append(os.path.join(target_dir, base_file_name))
|
||||
|
||||
subprocess_exr = " ".join(oiio_cmd)
|
||||
|
||||
if not log:
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
log.debug("Decompressing {}".format(subprocess_exr))
|
||||
pype.api.subprocess(
|
||||
subprocess_exr, shell=True, logger=log
|
||||
)
|
||||
|
||||
|
||||
def get_decompress_dir():
|
||||
"""
|
||||
Creates temporary folder for decompressing.
|
||||
Its local, in case of farm it is 'local' to the farm machine.
|
||||
|
||||
Should be much faster, needs to be cleaned up later.
|
||||
"""
|
||||
return os.path.normpath(
|
||||
tempfile.mkdtemp(prefix="pyblish_tmp_")
|
||||
)
|
||||
|
||||
|
||||
def should_decompress(file_url):
|
||||
"""
|
||||
Tests that 'file_url' is compressed with DWAA.
|
||||
|
||||
Uses 'oiio_supported' to check that OIIO tool is available for this
|
||||
platform.
|
||||
|
||||
Shouldn't throw exception as oiiotool is guarded by check function.
|
||||
Currently implemented this way as there is no support for Mac and Linux
|
||||
In the future, it should be more strict and throws exception on
|
||||
misconfiguration.
|
||||
|
||||
Args:
|
||||
file_url (str): path to rendered file (in sequence it would be
|
||||
first file, if that compressed it is expected that whole seq
|
||||
will be too)
|
||||
Returns:
|
||||
(bool): 'file_url' is DWAA compressed and should be decompressed
|
||||
and we can decompress (oiiotool supported)
|
||||
"""
|
||||
if oiio_supported():
|
||||
output = pype.api.subprocess([
|
||||
os.getenv("PYPE_OIIO_PATH"),
|
||||
"--info", "-v", file_url])
|
||||
return "compression: \"dwaa\"" in output or \
|
||||
"compression: \"dwab\"" in output
|
||||
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -6,9 +6,6 @@ import tempfile
|
|||
|
||||
import pype.api
|
||||
import pyblish
|
||||
from pype.lib import should_decompress, \
|
||||
get_decompress_dir, decompress
|
||||
import shutil
|
||||
|
||||
|
||||
class ExtractBurnin(pype.api.Extractor):
|
||||
|
|
@ -31,8 +28,7 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
"premiere",
|
||||
"standalonepublisher",
|
||||
"harmony",
|
||||
"fusion",
|
||||
"aftereffects"
|
||||
"fusion"
|
||||
]
|
||||
optional = True
|
||||
|
||||
|
|
@ -208,26 +204,6 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
# Prepare paths and files for process.
|
||||
self.input_output_paths(new_repre, temp_data, filename_suffix)
|
||||
|
||||
decompressed_dir = ''
|
||||
full_input_path = temp_data["full_input_path"]
|
||||
do_decompress = should_decompress(full_input_path)
|
||||
if do_decompress:
|
||||
decompressed_dir = get_decompress_dir()
|
||||
|
||||
decompress(
|
||||
decompressed_dir,
|
||||
full_input_path,
|
||||
temp_data["frame_start"],
|
||||
temp_data["frame_end"],
|
||||
self.log
|
||||
)
|
||||
|
||||
# input path changed, 'decompressed' added
|
||||
input_file = os.path.basename(full_input_path)
|
||||
temp_data["full_input_path"] = os.path.join(
|
||||
decompressed_dir,
|
||||
input_file)
|
||||
|
||||
# Data for burnin script
|
||||
script_data = {
|
||||
"input": temp_data["full_input_path"],
|
||||
|
|
@ -287,9 +263,6 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
os.remove(filepath)
|
||||
self.log.debug("Removed: \"{}\"".format(filepath))
|
||||
|
||||
if do_decompress and os.path.exists(decompressed_dir):
|
||||
shutil.rmtree(decompressed_dir)
|
||||
|
||||
def prepare_basic_data(self, instance):
|
||||
"""Pick data from instance for processing and for burnin strings.
|
||||
|
||||
|
|
|
|||
|
|
@ -3,9 +3,6 @@ import os
|
|||
import pyblish.api
|
||||
import pype.api
|
||||
import pype.lib
|
||||
from pype.lib import should_decompress, \
|
||||
get_decompress_dir, decompress
|
||||
import shutil
|
||||
|
||||
|
||||
class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
||||
|
|
@ -25,8 +22,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
if 'crypto' in instance.data['subset']:
|
||||
return
|
||||
|
||||
do_decompress = False
|
||||
# ffmpeg doesn't support multipart exrs, use oiiotool if available
|
||||
# ffmpeg doesn't support multipart exrs
|
||||
if instance.data.get("multipartExr") is True:
|
||||
return
|
||||
|
||||
|
|
@ -40,6 +36,10 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
# filter out mov and img sequences
|
||||
representations_new = representations[:]
|
||||
|
||||
if instance.data.get("multipartExr"):
|
||||
# ffmpeg doesn't support multipart exrs
|
||||
return
|
||||
|
||||
for repre in representations:
|
||||
tags = repre.get("tags", [])
|
||||
self.log.debug(repre)
|
||||
|
|
@ -60,19 +60,6 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
full_input_path = os.path.join(stagingdir, input_file)
|
||||
self.log.info("input {}".format(full_input_path))
|
||||
|
||||
decompressed_dir = ''
|
||||
do_decompress = should_decompress(full_input_path)
|
||||
if do_decompress:
|
||||
decompressed_dir = get_decompress_dir()
|
||||
|
||||
decompress(
|
||||
decompressed_dir,
|
||||
full_input_path)
|
||||
# input path changed, 'decompressed' added
|
||||
full_input_path = os.path.join(
|
||||
decompressed_dir,
|
||||
input_file)
|
||||
|
||||
filename = os.path.splitext(input_file)[0]
|
||||
if not filename.endswith('.'):
|
||||
filename += "."
|
||||
|
|
@ -106,14 +93,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
|
||||
# run subprocess
|
||||
self.log.debug("{}".format(subprocess_jpeg))
|
||||
try: # temporary until oiiotool is supported cross platform
|
||||
pype.api.subprocess(subprocess_jpeg, shell=True)
|
||||
except RuntimeError as exp:
|
||||
if "Compression" in str(exp):
|
||||
self.log.debug("Unsupported compression on input files. " +
|
||||
"Skipping!!!")
|
||||
return
|
||||
raise
|
||||
pype.api.subprocess(subprocess_jpeg, shell=True)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
|
@ -131,7 +111,4 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
self.log.debug("Adding: {}".format(representation))
|
||||
representations_new.append(representation)
|
||||
|
||||
if do_decompress and os.path.exists(decompressed_dir):
|
||||
shutil.rmtree(decompressed_dir)
|
||||
|
||||
instance.data["representations"] = representations_new
|
||||
|
|
|
|||
|
|
@ -6,8 +6,6 @@ import pyblish.api
|
|||
import clique
|
||||
import pype.api
|
||||
import pype.lib
|
||||
from pype.lib import should_decompress, \
|
||||
get_decompress_dir, decompress
|
||||
|
||||
|
||||
class ExtractReview(pyblish.api.InstancePlugin):
|
||||
|
|
@ -16,7 +14,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
Compulsory attribute of representation is tags list with "review",
|
||||
otherwise the representation is ignored.
|
||||
|
||||
All new representations are created and encoded by ffmpeg following
|
||||
All new represetnations are created and encoded by ffmpeg following
|
||||
presets found in `pype-config/presets/plugins/global/
|
||||
publish.json:ExtractReview:outputs`.
|
||||
"""
|
||||
|
|
@ -190,17 +188,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
temp_data = self.prepare_temp_data(instance, repre, output_def)
|
||||
|
||||
try: # temporary until oiiotool is supported cross platform
|
||||
ffmpeg_args = self._ffmpeg_arguments(
|
||||
output_def, instance, new_repre, temp_data
|
||||
)
|
||||
except ZeroDivisionError:
|
||||
if 'exr' in temp_data["origin_repre"]["ext"]:
|
||||
self.log.debug("Unsupported compression on input " +
|
||||
"files. Skipping!!!")
|
||||
return
|
||||
raise
|
||||
|
||||
ffmpeg_args = self._ffmpeg_arguments(
|
||||
output_def, instance, new_repre, temp_data
|
||||
)
|
||||
subprcs_cmd = " ".join(ffmpeg_args)
|
||||
|
||||
# run subprocess
|
||||
|
|
@ -328,9 +318,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
Args:
|
||||
output_def (dict): Currently processed output definition.
|
||||
instance (Instance): Currently processed instance.
|
||||
new_repre (dict): Representation representing output of this
|
||||
new_repre (dict): Reprensetation representing output of this
|
||||
process.
|
||||
temp_data (dict): Base data for successful process.
|
||||
temp_data (dict): Base data for successfull process.
|
||||
"""
|
||||
|
||||
# Get FFmpeg arguments from profile presets
|
||||
|
|
@ -341,35 +331,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
ffmpeg_video_filters = out_def_ffmpeg_args.get("video_filters") or []
|
||||
ffmpeg_audio_filters = out_def_ffmpeg_args.get("audio_filters") or []
|
||||
|
||||
if isinstance(new_repre['files'], list):
|
||||
input_files_urls = [os.path.join(new_repre["stagingDir"], f) for f
|
||||
in new_repre['files']]
|
||||
test_path = input_files_urls[0]
|
||||
else:
|
||||
test_path = os.path.join(
|
||||
new_repre["stagingDir"], new_repre['files'])
|
||||
do_decompress = should_decompress(test_path)
|
||||
|
||||
if do_decompress:
|
||||
# change stagingDir, decompress first
|
||||
# calculate all paths with modified directory, used on too many
|
||||
# places
|
||||
# will be purged by cleanup.py automatically
|
||||
orig_staging_dir = new_repre["stagingDir"]
|
||||
new_repre["stagingDir"] = get_decompress_dir()
|
||||
|
||||
# Prepare input and output filepaths
|
||||
self.input_output_paths(new_repre, output_def, temp_data)
|
||||
|
||||
if do_decompress:
|
||||
input_file = temp_data["full_input_path"].\
|
||||
replace(new_repre["stagingDir"], orig_staging_dir)
|
||||
|
||||
decompress(new_repre["stagingDir"], input_file,
|
||||
temp_data["frame_start"],
|
||||
temp_data["frame_end"],
|
||||
self.log)
|
||||
|
||||
# Set output frames len to 1 when ouput is single image
|
||||
if (
|
||||
temp_data["output_ext_is_image"]
|
||||
|
|
@ -966,7 +930,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
return regexes
|
||||
|
||||
def validate_value_by_regexes(self, value, in_list):
|
||||
"""Validates in any regex from list match entered value.
|
||||
"""Validates in any regexe from list match entered value.
|
||||
|
||||
Args:
|
||||
in_list (list): List with regexes.
|
||||
|
|
@ -991,9 +955,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
def profile_exclusion(self, matching_profiles):
|
||||
"""Find out most matching profile byt host, task and family match.
|
||||
|
||||
Profiles are selectively filtered. Each profile should have
|
||||
Profiles are selectivelly filtered. Each profile should have
|
||||
"__value__" key with list of booleans. Each boolean represents
|
||||
existence of filter for specific key (host, tasks, family).
|
||||
existence of filter for specific key (host, taks, family).
|
||||
Profiles are looped in sequence. In each sequence are split into
|
||||
true_list and false_list. For next sequence loop are used profiles in
|
||||
true_list if there are any profiles else false_list is used.
|
||||
|
|
@ -1072,7 +1036,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
highest_profile_points = -1
|
||||
# Each profile get 1 point for each matching filter. Profile with most
|
||||
# points is returned. For cases when more than one profile will match
|
||||
# points is returnd. For cases when more than one profile will match
|
||||
# are also stored ordered lists of matching values.
|
||||
for profile in self.profiles:
|
||||
profile_points = 0
|
||||
|
|
@ -1684,7 +1648,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
def add_video_filter_args(self, args, inserting_arg):
|
||||
"""
|
||||
Fixing video filter arguments to be one long string
|
||||
Fixing video filter argumets to be one long string
|
||||
|
||||
Args:
|
||||
args (list): list of string arguments
|
||||
|
|
|
|||
|
|
@ -193,7 +193,6 @@ class CreateRender(avalon.maya.Creator):
|
|||
self.data["tilesX"] = 2
|
||||
self.data["tilesY"] = 2
|
||||
self.data["convertToScanline"] = False
|
||||
self.data["vrayUseReferencedAovs"] = False
|
||||
# Disable for now as this feature is not working yet
|
||||
# self.data["assScene"] = False
|
||||
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
|
||||
# return all expected files for all cameras and aovs in given
|
||||
# frame range
|
||||
ef = ExpectedFiles(render_instance)
|
||||
ef = ExpectedFiles()
|
||||
exp_files = ef.get(renderer, layer_name)
|
||||
self.log.info("multipart: {}".format(ef.multipart))
|
||||
assert exp_files, "no file names were generated, this is bug"
|
||||
|
|
@ -248,8 +248,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
"tilesX": render_instance.data.get("tilesX") or 2,
|
||||
"tilesY": render_instance.data.get("tilesY") or 2,
|
||||
"priority": render_instance.data.get("priority"),
|
||||
"convertToScanline": render_instance.data.get("convertToScanline") or False, # noqa: E501
|
||||
"vrayUseReferencedAovs": render_instance.data.get("vrayUseReferencedAovs") or False # noqa: E501
|
||||
"convertToScanline": render_instance.data.get("convertToScanline") or False # noqa: E501
|
||||
}
|
||||
|
||||
if self.sync_workfile_version:
|
||||
|
|
|
|||
|
|
@ -1,90 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Validate if there are AOVs pulled from references."""
|
||||
import pyblish.api
|
||||
import types
|
||||
from maya import cmds
|
||||
|
||||
import pype.hosts.maya.action
|
||||
|
||||
|
||||
class ValidateVrayReferencedAOVs(pyblish.api.InstancePlugin):
|
||||
"""Validate whether the V-Ray Render Elements (AOVs) include references.
|
||||
|
||||
This will check if there are AOVs pulled from references. If
|
||||
`Vray Use Referenced Aovs` is checked on render instance, u must add those
|
||||
manually to Render Elements as Pype will expect them to be rendered.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = 'VRay Referenced AOVs'
|
||||
hosts = ['maya']
|
||||
families = ['renderlayer']
|
||||
actions = [pype.api.RepairContextAction]
|
||||
|
||||
def process(self, instance):
|
||||
"""Plugin main entry point."""
|
||||
if instance.data.get("renderer") != "vray":
|
||||
# If not V-Ray ignore..
|
||||
return
|
||||
|
||||
ref_aovs = cmds.ls(
|
||||
type=["VRayRenderElement", "VRayRenderElementSet"],
|
||||
referencedNodes=True)
|
||||
ref_aovs_enabled = ValidateVrayReferencedAOVs.maya_is_true(
|
||||
cmds.getAttr("vraySettings.relements_usereferenced"))
|
||||
|
||||
if not instance.data.get("vrayUseReferencedAovs"):
|
||||
if ref_aovs_enabled and ref_aovs:
|
||||
self.log.warning((
|
||||
"Referenced AOVs are enabled in Vray "
|
||||
"Render Settings and are detected in scene, but "
|
||||
"Pype render instance option for referenced AOVs is "
|
||||
"disabled. Those AOVs will be rendered but not published "
|
||||
"by Pype."
|
||||
))
|
||||
self.log.warning(", ".join(ref_aovs))
|
||||
else:
|
||||
if not ref_aovs:
|
||||
self.log.warning((
|
||||
"Use of referenced AOVs enabled but there are none "
|
||||
"in the scene."
|
||||
))
|
||||
if not ref_aovs_enabled:
|
||||
self.log.error((
|
||||
"'Use referenced' not enabled in Vray Render Settings."
|
||||
))
|
||||
raise AssertionError("Invalid render settings")
|
||||
|
||||
@classmethod
|
||||
def repair(cls, context):
|
||||
"""Repair action."""
|
||||
vray_settings = cmds.ls(type="VRaySettingsNode")
|
||||
if not vray_settings:
|
||||
node = cmds.createNode("VRaySettingsNode")
|
||||
else:
|
||||
node = vray_settings[0]
|
||||
|
||||
cmds.setAttr("{}.relements_usereferenced".format(node), True)
|
||||
|
||||
@staticmethod
|
||||
def maya_is_true(attr_val):
|
||||
"""Whether a Maya attr evaluates to True.
|
||||
|
||||
When querying an attribute value from an ambiguous object the
|
||||
Maya API will return a list of values, which need to be properly
|
||||
handled to evaluate properly.
|
||||
|
||||
Args:
|
||||
attr_val (mixed): Maya attribute to be evaluated as bool.
|
||||
|
||||
Returns:
|
||||
bool: cast Maya attribute to Pythons boolean value.
|
||||
|
||||
"""
|
||||
if isinstance(attr_val, types.BooleanType):
|
||||
return attr_val
|
||||
elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
|
||||
return any(attr_val)
|
||||
else:
|
||||
return bool(attr_val)
|
||||
Loading…
Add table
Add a link
Reference in a new issue