return missing commits from previous 2.x/develop merge

This commit is contained in:
Milan Kolar 2020-12-21 19:30:20 +01:00
parent 3ec08dd6e9
commit 067666bbe8
9 changed files with 363 additions and 49 deletions

View file

@ -32,6 +32,9 @@ Attributes:
ImagePrefixes (dict): Mapping between renderers and their respective
image prefix atrribute names.
Todo:
Determine `multipart` from render instance.
"""
import types
@ -94,6 +97,10 @@ class ExpectedFiles:
multipart = False
def __init__(self, render_instance):
"""Constructor."""
self._render_instance = render_instance
def get(self, renderer, layer):
"""Get expected files for given renderer and render layer.
@ -114,15 +121,20 @@ class ExpectedFiles:
renderSetup.instance().switchToLayerUsingLegacyName(layer)
if renderer.lower() == "arnold":
return self._get_files(ExpectedFilesArnold(layer))
return self._get_files(ExpectedFilesArnold(layer,
self._render_instance))
elif renderer.lower() == "vray":
return self._get_files(ExpectedFilesVray(layer))
return self._get_files(ExpectedFilesVray(
layer, self._render_instance))
elif renderer.lower() == "redshift":
return self._get_files(ExpectedFilesRedshift(layer))
return self._get_files(ExpectedFilesRedshift(
layer, self._render_instance))
elif renderer.lower() == "mentalray":
return self._get_files(ExpectedFilesMentalray(layer))
return self._get_files(ExpectedFilesMentalray(
layer, self._render_instance))
elif renderer.lower() == "renderman":
return self._get_files(ExpectedFilesRenderman(layer))
return self._get_files(ExpectedFilesRenderman(
layer, self._render_instance))
else:
raise UnsupportedRendererException(
"unsupported {}".format(renderer)
@ -149,9 +161,10 @@ class AExpectedFiles:
layer = None
multipart = False
def __init__(self, layer):
def __init__(self, layer, render_instance):
"""Constructor."""
self.layer = layer
self.render_instance = render_instance
@abstractmethod
def get_aovs(self):
@ -460,9 +473,9 @@ class ExpectedFilesArnold(AExpectedFiles):
"maya": "",
}
def __init__(self, layer):
def __init__(self, layer, render_instance):
"""Constructor."""
super(ExpectedFilesArnold, self).__init__(layer)
super(ExpectedFilesArnold, self).__init__(layer, render_instance)
self.renderer = "arnold"
def get_aovs(self):
@ -531,9 +544,9 @@ class ExpectedFilesArnold(AExpectedFiles):
class ExpectedFilesVray(AExpectedFiles):
"""Expected files for V-Ray renderer."""
def __init__(self, layer):
def __init__(self, layer, render_instance):
"""Constructor."""
super(ExpectedFilesVray, self).__init__(layer)
super(ExpectedFilesVray, self).__init__(layer, render_instance)
self.renderer = "vray"
def get_renderer_prefix(self):
@ -614,24 +627,25 @@ class ExpectedFilesVray(AExpectedFiles):
if default_ext == "exr (multichannel)" or default_ext == "exr (deep)":
default_ext = "exr"
# add beauty as default
enabled_aovs.append(
(u"beauty", default_ext)
)
if not self.maya_is_true(
cmds.getAttr("vraySettings.relements_enableall")
):
return enabled_aovs
# handle aovs from references
use_ref_aovs = self.render_instance.data.get(
"vrayUseReferencedAovs", False) or False
# filter all namespace prefixed AOVs - they are pulled in from
# references and are not rendered.
vr_aovs = [
n
for n in cmds.ls(
type=["VRayRenderElement", "VRayRenderElementSet"]
)
if len(n.split(":")) == 1
]
# this will have list of all aovs no matter if they are coming from
# reference or not.
vr_aovs = cmds.ls(
type=["VRayRenderElement", "VRayRenderElementSet"]) or []
if not use_ref_aovs:
ref_aovs = cmds.ls(
type=["VRayRenderElement", "VRayRenderElementSet"],
referencedNodes=True) or []
# get difference
vr_aovs = list(set(vr_aovs) - set(ref_aovs))
for aov in vr_aovs:
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
@ -703,9 +717,9 @@ class ExpectedFilesRedshift(AExpectedFiles):
ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"]
def __init__(self, layer):
def __init__(self, layer, render_instance):
"""Construtor."""
super(ExpectedFilesRedshift, self).__init__(layer)
super(ExpectedFilesRedshift, self).__init__(layer, render_instance)
self.renderer = "redshift"
def get_renderer_prefix(self):
@ -822,9 +836,9 @@ class ExpectedFilesRenderman(AExpectedFiles):
This is very rudimentary and needs more love and testing.
"""
def __init__(self, layer):
def __init__(self, layer, render_instance):
"""Constructor."""
super(ExpectedFilesRenderman, self).__init__(layer)
super(ExpectedFilesRenderman, self).__init__(layer, render_instance)
self.renderer = "renderman"
def get_aovs(self):
@ -887,7 +901,7 @@ class ExpectedFilesRenderman(AExpectedFiles):
class ExpectedFilesMentalray(AExpectedFiles):
"""Skeleton unimplemented class for Mentalray renderer."""
def __init__(self, layer):
def __init__(self, layer, render_instance):
"""Constructor.
Raises:

View file

@ -56,7 +56,11 @@ from .plugin_tools import (
filter_pyblish_plugins,
source_hash,
get_unique_layer_name,
get_background_layers
get_background_layers,
oiio_supported,
decompress,
get_decompress_dir,
should_decompress
)
from .user_settings import (
@ -108,6 +112,10 @@ __all__ = [
"source_hash",
"get_unique_layer_name",
"get_background_layers",
"oiio_supported",
"decompress",
"get_decompress_dir",
"should_decompress",
"version_up",
"get_version_from_path",

View file

@ -5,6 +5,8 @@ import inspect
import logging
import re
import json
import pype.api
import tempfile
from pype.settings import get_project_settings
@ -134,3 +136,115 @@ def get_background_layers(file_url):
layer.get("filename")).
replace("\\", "/"))
return layers
def oiio_supported():
"""
Checks if oiiotool is configured for this platform.
Expects full path to executable.
'should_decompress' will throw exception if configured,
but not present or not working.
Returns:
(bool)
"""
oiio_path = os.getenv("PYPE_OIIO_PATH", "")
if not oiio_path or not os.path.exists(oiio_path):
log.debug("OIIOTool is not configured or not present at {}".
format(oiio_path))
return False
return True
def decompress(target_dir, file_url,
input_frame_start=None, input_frame_end=None, log=None):
"""
Decompresses DWAA 'file_url' .exr to 'target_dir'.
Creates uncompressed files in 'target_dir', they need to be cleaned.
File url could be for single file or for a sequence, in that case
%0Xd will be as a placeholder for frame number AND input_frame* will
be filled.
In that case single oiio command with '--frames' will be triggered for
all frames, this should be faster then looping and running sequentially
Args:
target_dir (str): extended from stagingDir
file_url (str): full urls to source file (with or without %0Xd)
input_frame_start (int) (optional): first frame
input_frame_end (int) (optional): last frame
log (Logger) (optional): pype logger
"""
is_sequence = input_frame_start is not None and \
input_frame_end is not None and \
(int(input_frame_end) > int(input_frame_start))
oiio_cmd = []
oiio_cmd.append(os.getenv("PYPE_OIIO_PATH"))
oiio_cmd.append("--compression none")
base_file_name = os.path.basename(file_url)
oiio_cmd.append(file_url)
if is_sequence:
oiio_cmd.append("--frames {}-{}".format(input_frame_start,
input_frame_end))
oiio_cmd.append("-o")
oiio_cmd.append(os.path.join(target_dir, base_file_name))
subprocess_exr = " ".join(oiio_cmd)
if not log:
log = logging.getLogger(__name__)
log.debug("Decompressing {}".format(subprocess_exr))
pype.api.subprocess(
subprocess_exr, shell=True, logger=log
)
def get_decompress_dir():
"""
Creates temporary folder for decompressing.
Its local, in case of farm it is 'local' to the farm machine.
Should be much faster, needs to be cleaned up later.
"""
return os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
def should_decompress(file_url):
"""
Tests that 'file_url' is compressed with DWAA.
Uses 'oiio_supported' to check that OIIO tool is available for this
platform.
Shouldn't throw exception as oiiotool is guarded by check function.
Currently implemented this way as there is no support for Mac and Linux
In the future, it should be more strict and throws exception on
misconfiguration.
Args:
file_url (str): path to rendered file (in sequence it would be
first file, if that compressed it is expected that whole seq
will be too)
Returns:
(bool): 'file_url' is DWAA compressed and should be decompressed
and we can decompress (oiiotool supported)
"""
if oiio_supported():
output = pype.api.subprocess([
os.getenv("PYPE_OIIO_PATH"),
"--info", "-v", file_url])
return "compression: \"dwaa\"" in output or \
"compression: \"dwab\"" in output
return False

View file

@ -6,6 +6,9 @@ import tempfile
import pype.api
import pyblish
from pype.lib import should_decompress, \
get_decompress_dir, decompress
import shutil
class ExtractBurnin(pype.api.Extractor):
@ -28,7 +31,8 @@ class ExtractBurnin(pype.api.Extractor):
"premiere",
"standalonepublisher",
"harmony",
"fusion"
"fusion",
"aftereffects"
]
optional = True
@ -204,6 +208,26 @@ class ExtractBurnin(pype.api.Extractor):
# Prepare paths and files for process.
self.input_output_paths(new_repre, temp_data, filename_suffix)
decompressed_dir = ''
full_input_path = temp_data["full_input_path"]
do_decompress = should_decompress(full_input_path)
if do_decompress:
decompressed_dir = get_decompress_dir()
decompress(
decompressed_dir,
full_input_path,
temp_data["frame_start"],
temp_data["frame_end"],
self.log
)
# input path changed, 'decompressed' added
input_file = os.path.basename(full_input_path)
temp_data["full_input_path"] = os.path.join(
decompressed_dir,
input_file)
# Data for burnin script
script_data = {
"input": temp_data["full_input_path"],
@ -263,6 +287,9 @@ class ExtractBurnin(pype.api.Extractor):
os.remove(filepath)
self.log.debug("Removed: \"{}\"".format(filepath))
if do_decompress and os.path.exists(decompressed_dir):
shutil.rmtree(decompressed_dir)
def prepare_basic_data(self, instance):
"""Pick data from instance for processing and for burnin strings.

View file

@ -3,6 +3,9 @@ import os
import pyblish.api
import pype.api
import pype.lib
from pype.lib import should_decompress, \
get_decompress_dir, decompress
import shutil
class ExtractJpegEXR(pyblish.api.InstancePlugin):
@ -22,7 +25,8 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
if 'crypto' in instance.data['subset']:
return
# ffmpeg doesn't support multipart exrs
do_decompress = False
# ffmpeg doesn't support multipart exrs, use oiiotool if available
if instance.data.get("multipartExr") is True:
return
@ -36,10 +40,6 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
# filter out mov and img sequences
representations_new = representations[:]
if instance.data.get("multipartExr"):
# ffmpeg doesn't support multipart exrs
return
for repre in representations:
tags = repre.get("tags", [])
self.log.debug(repre)
@ -60,6 +60,19 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
full_input_path = os.path.join(stagingdir, input_file)
self.log.info("input {}".format(full_input_path))
decompressed_dir = ''
do_decompress = should_decompress(full_input_path)
if do_decompress:
decompressed_dir = get_decompress_dir()
decompress(
decompressed_dir,
full_input_path)
# input path changed, 'decompressed' added
full_input_path = os.path.join(
decompressed_dir,
input_file)
filename = os.path.splitext(input_file)[0]
if not filename.endswith('.'):
filename += "."
@ -93,7 +106,14 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
# run subprocess
self.log.debug("{}".format(subprocess_jpeg))
pype.api.subprocess(subprocess_jpeg, shell=True)
try: # temporary until oiiotool is supported cross platform
pype.api.subprocess(subprocess_jpeg, shell=True)
except RuntimeError as exp:
if "Compression" in str(exp):
self.log.debug("Unsupported compression on input files. " +
"Skipping!!!")
return
raise
if "representations" not in instance.data:
instance.data["representations"] = []
@ -111,4 +131,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
self.log.debug("Adding: {}".format(representation))
representations_new.append(representation)
if do_decompress and os.path.exists(decompressed_dir):
shutil.rmtree(decompressed_dir)
instance.data["representations"] = representations_new

View file

@ -6,6 +6,8 @@ import pyblish.api
import clique
import pype.api
import pype.lib
from pype.lib import should_decompress, \
get_decompress_dir, decompress
class ExtractReview(pyblish.api.InstancePlugin):
@ -14,7 +16,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
Compulsory attribute of representation is tags list with "review",
otherwise the representation is ignored.
All new represetnations are created and encoded by ffmpeg following
All new representations are created and encoded by ffmpeg following
presets found in `pype-config/presets/plugins/global/
publish.json:ExtractReview:outputs`.
"""
@ -188,9 +190,17 @@ class ExtractReview(pyblish.api.InstancePlugin):
temp_data = self.prepare_temp_data(instance, repre, output_def)
ffmpeg_args = self._ffmpeg_arguments(
output_def, instance, new_repre, temp_data
)
try: # temporary until oiiotool is supported cross platform
ffmpeg_args = self._ffmpeg_arguments(
output_def, instance, new_repre, temp_data
)
except ZeroDivisionError:
if 'exr' in temp_data["origin_repre"]["ext"]:
self.log.debug("Unsupported compression on input " +
"files. Skipping!!!")
return
raise
subprcs_cmd = " ".join(ffmpeg_args)
# run subprocess
@ -318,9 +328,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
Args:
output_def (dict): Currently processed output definition.
instance (Instance): Currently processed instance.
new_repre (dict): Reprensetation representing output of this
new_repre (dict): Representation representing output of this
process.
temp_data (dict): Base data for successfull process.
temp_data (dict): Base data for successful process.
"""
# Get FFmpeg arguments from profile presets
@ -331,9 +341,35 @@ class ExtractReview(pyblish.api.InstancePlugin):
ffmpeg_video_filters = out_def_ffmpeg_args.get("video_filters") or []
ffmpeg_audio_filters = out_def_ffmpeg_args.get("audio_filters") or []
if isinstance(new_repre['files'], list):
input_files_urls = [os.path.join(new_repre["stagingDir"], f) for f
in new_repre['files']]
test_path = input_files_urls[0]
else:
test_path = os.path.join(
new_repre["stagingDir"], new_repre['files'])
do_decompress = should_decompress(test_path)
if do_decompress:
# change stagingDir, decompress first
# calculate all paths with modified directory, used on too many
# places
# will be purged by cleanup.py automatically
orig_staging_dir = new_repre["stagingDir"]
new_repre["stagingDir"] = get_decompress_dir()
# Prepare input and output filepaths
self.input_output_paths(new_repre, output_def, temp_data)
if do_decompress:
input_file = temp_data["full_input_path"].\
replace(new_repre["stagingDir"], orig_staging_dir)
decompress(new_repre["stagingDir"], input_file,
temp_data["frame_start"],
temp_data["frame_end"],
self.log)
# Set output frames len to 1 when ouput is single image
if (
temp_data["output_ext_is_image"]
@ -930,7 +966,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
return regexes
def validate_value_by_regexes(self, value, in_list):
"""Validates in any regexe from list match entered value.
"""Validates in any regex from list match entered value.
Args:
in_list (list): List with regexes.
@ -955,9 +991,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
def profile_exclusion(self, matching_profiles):
"""Find out most matching profile byt host, task and family match.
Profiles are selectivelly filtered. Each profile should have
Profiles are selectively filtered. Each profile should have
"__value__" key with list of booleans. Each boolean represents
existence of filter for specific key (host, taks, family).
existence of filter for specific key (host, tasks, family).
Profiles are looped in sequence. In each sequence are split into
true_list and false_list. For next sequence loop are used profiles in
true_list if there are any profiles else false_list is used.
@ -1036,7 +1072,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
highest_profile_points = -1
# Each profile get 1 point for each matching filter. Profile with most
# points is returnd. For cases when more than one profile will match
# points is returned. For cases when more than one profile will match
# are also stored ordered lists of matching values.
for profile in self.profiles:
profile_points = 0
@ -1648,7 +1684,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
def add_video_filter_args(self, args, inserting_arg):
"""
Fixing video filter argumets to be one long string
Fixing video filter arguments to be one long string
Args:
args (list): list of string arguments

View file

@ -193,6 +193,7 @@ class CreateRender(avalon.maya.Creator):
self.data["tilesX"] = 2
self.data["tilesY"] = 2
self.data["convertToScanline"] = False
self.data["vrayUseReferencedAovs"] = False
# Disable for now as this feature is not working yet
# self.data["assScene"] = False

View file

@ -149,7 +149,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
# return all expected files for all cameras and aovs in given
# frame range
ef = ExpectedFiles()
ef = ExpectedFiles(render_instance)
exp_files = ef.get(renderer, layer_name)
self.log.info("multipart: {}".format(ef.multipart))
assert exp_files, "no file names were generated, this is bug"
@ -248,7 +248,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
"tilesX": render_instance.data.get("tilesX") or 2,
"tilesY": render_instance.data.get("tilesY") or 2,
"priority": render_instance.data.get("priority"),
"convertToScanline": render_instance.data.get("convertToScanline") or False # noqa: E501
"convertToScanline": render_instance.data.get("convertToScanline") or False, # noqa: E501
"vrayUseReferencedAovs": render_instance.data.get("vrayUseReferencedAovs") or False # noqa: E501
}
if self.sync_workfile_version:

View file

@ -0,0 +1,90 @@
# -*- coding: utf-8 -*-
"""Validate if there are AOVs pulled from references."""
import pyblish.api
import types
from maya import cmds
import pype.hosts.maya.action
class ValidateVrayReferencedAOVs(pyblish.api.InstancePlugin):
"""Validate whether the V-Ray Render Elements (AOVs) include references.
This will check if there are AOVs pulled from references. If
`Vray Use Referenced Aovs` is checked on render instance, u must add those
manually to Render Elements as Pype will expect them to be rendered.
"""
order = pyblish.api.ValidatorOrder
label = 'VRay Referenced AOVs'
hosts = ['maya']
families = ['renderlayer']
actions = [pype.api.RepairContextAction]
def process(self, instance):
"""Plugin main entry point."""
if instance.data.get("renderer") != "vray":
# If not V-Ray ignore..
return
ref_aovs = cmds.ls(
type=["VRayRenderElement", "VRayRenderElementSet"],
referencedNodes=True)
ref_aovs_enabled = ValidateVrayReferencedAOVs.maya_is_true(
cmds.getAttr("vraySettings.relements_usereferenced"))
if not instance.data.get("vrayUseReferencedAovs"):
if ref_aovs_enabled and ref_aovs:
self.log.warning((
"Referenced AOVs are enabled in Vray "
"Render Settings and are detected in scene, but "
"Pype render instance option for referenced AOVs is "
"disabled. Those AOVs will be rendered but not published "
"by Pype."
))
self.log.warning(", ".join(ref_aovs))
else:
if not ref_aovs:
self.log.warning((
"Use of referenced AOVs enabled but there are none "
"in the scene."
))
if not ref_aovs_enabled:
self.log.error((
"'Use referenced' not enabled in Vray Render Settings."
))
raise AssertionError("Invalid render settings")
@classmethod
def repair(cls, context):
"""Repair action."""
vray_settings = cmds.ls(type="VRaySettingsNode")
if not vray_settings:
node = cmds.createNode("VRaySettingsNode")
else:
node = vray_settings[0]
cmds.setAttr("{}.relements_usereferenced".format(node), True)
@staticmethod
def maya_is_true(attr_val):
"""Whether a Maya attr evaluates to True.
When querying an attribute value from an ambiguous object the
Maya API will return a list of values, which need to be properly
handled to evaluate properly.
Args:
attr_val (mixed): Maya attribute to be evaluated as bool.
Returns:
bool: cast Maya attribute to Pythons boolean value.
"""
if isinstance(attr_val, types.BooleanType):
return attr_val
elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
return any(attr_val)
else:
return bool(attr_val)