mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 21:32:15 +01:00
DWAA support
Redo of PR on 'master'
This commit is contained in:
parent
461dc0eb48
commit
52feeabb44
5 changed files with 200 additions and 26 deletions
|
|
@ -29,7 +29,11 @@ from .plugin_tools import (
|
|||
filter_pyblish_plugins,
|
||||
source_hash,
|
||||
get_unique_layer_name,
|
||||
get_background_layers
|
||||
get_background_layers,
|
||||
oiio_supported,
|
||||
decompress,
|
||||
get_decompress_dir,
|
||||
should_decompress
|
||||
)
|
||||
|
||||
from .path_tools import (
|
||||
|
|
@ -64,6 +68,10 @@ __all__ = [
|
|||
"filter_pyblish_plugins",
|
||||
"get_unique_layer_name",
|
||||
"get_background_layers",
|
||||
"oiio_supported",
|
||||
"decompress",
|
||||
"get_decompress_dir",
|
||||
"should_decompress",
|
||||
|
||||
"version_up",
|
||||
"get_version_from_path",
|
||||
|
|
|
|||
|
|
@ -5,6 +5,8 @@ import inspect
|
|||
import logging
|
||||
import re
|
||||
import json
|
||||
import pype.api
|
||||
import tempfile
|
||||
|
||||
from ..api import config
|
||||
|
||||
|
|
@ -134,3 +136,98 @@ def get_background_layers(file_url):
|
|||
layer.get("filename")).
|
||||
replace("\\", "/"))
|
||||
return layers
|
||||
|
||||
|
||||
def oiio_supported():
|
||||
"""
|
||||
Checks if oiiotool is configured for this platform.
|
||||
|
||||
'should_decompress' will throw exception if configured,
|
||||
but not present or working.
|
||||
"""
|
||||
return os.getenv("PYPE_OIIO_PATH", "") != ""
|
||||
|
||||
|
||||
def decompress(target_dir, file_url,
|
||||
input_frame_start=None, input_frame_end=None, log=None):
|
||||
"""
|
||||
Decompresses DWAA 'file_url' .exr to 'target_dir'.
|
||||
|
||||
Creates uncompressed files in 'target_dir', they need to be cleaned.
|
||||
|
||||
File url could be for single file or for a sequence, in that case
|
||||
%0Xd will be as a placeholder for frame number AND input_frame* will
|
||||
be filled.
|
||||
In that case single oiio command with '--frames' will be triggered for
|
||||
all frames, this should be faster then looping and running sequentially
|
||||
|
||||
Args:
|
||||
target_dir (str): extended from stagingDir
|
||||
file_url (str): full urls to source file (with or without %0Xd)
|
||||
input_frame_start (int) (optional): first frame
|
||||
input_frame_end (int) (optional): last frame
|
||||
log (Logger) (optional): pype logger
|
||||
"""
|
||||
is_sequence = input_frame_start is not None and \
|
||||
input_frame_end is not None and \
|
||||
(int(input_frame_end) > int(input_frame_start))
|
||||
|
||||
oiio_cmd = []
|
||||
oiio_cmd.append(os.getenv("PYPE_OIIO_PATH"))
|
||||
|
||||
oiio_cmd.append("--compression none")
|
||||
|
||||
base_file_name = os.path.basename(file_url)
|
||||
oiio_cmd.append(file_url)
|
||||
|
||||
if is_sequence:
|
||||
oiio_cmd.append("--frames {}-{}".format(input_frame_start,
|
||||
input_frame_end))
|
||||
|
||||
oiio_cmd.append("-o")
|
||||
oiio_cmd.append(os.path.join(target_dir, base_file_name))
|
||||
|
||||
subprocess_exr = " ".join(oiio_cmd)
|
||||
|
||||
if not log:
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
log.debug("Decompressing {}".format(subprocess_exr))
|
||||
pype.api.subprocess(
|
||||
subprocess_exr, shell=True, logger=log
|
||||
)
|
||||
|
||||
|
||||
def get_decompress_dir():
|
||||
"""
|
||||
Creates temporary folder for decompressing.
|
||||
Its local, in case of farm it is 'local' to the farm machine.
|
||||
|
||||
Should be much faster, needs to be cleaned up later.
|
||||
"""
|
||||
return os.path.normpath(
|
||||
tempfile.mkdtemp(prefix="pyblish_tmp_")
|
||||
)
|
||||
|
||||
|
||||
def should_decompress(file_url):
|
||||
"""
|
||||
Tests that 'file_url' is compressed with DWAA.
|
||||
|
||||
Uses 'oiio_supported' to check that OIIO tool is available for this
|
||||
platform
|
||||
|
||||
Args:
|
||||
file_url (str): path to rendered file (in sequence it would be
|
||||
first file, if that compressed it is expected that whole seq
|
||||
will be too)
|
||||
Returns:
|
||||
(bool): 'file_url' is DWAA compressed and should be decompressed
|
||||
"""
|
||||
if oiio_supported():
|
||||
output = pype.api.subprocess([os.getenv("PYPE_OIIO_PATH"),
|
||||
"--info", "-v", file_url])
|
||||
return "compression: \"dwaa\"" in output or \
|
||||
"compression: \"dwab\"" in output
|
||||
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -6,6 +6,9 @@ import tempfile
|
|||
|
||||
import pype.api
|
||||
import pyblish
|
||||
from pype.lib import oiio_supported, should_decompress, \
|
||||
get_decompress_dir, decompress
|
||||
import shutil
|
||||
|
||||
|
||||
class ExtractBurnin(pype.api.Extractor):
|
||||
|
|
@ -28,7 +31,8 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
"premiere",
|
||||
"standalonepublisher",
|
||||
"harmony",
|
||||
"fusion"
|
||||
"fusion",
|
||||
"aftereffects"
|
||||
]
|
||||
optional = True
|
||||
|
||||
|
|
@ -54,15 +58,16 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
def process(self, instance):
|
||||
# ffmpeg doesn't support multipart exrs
|
||||
if instance.data.get("multipartExr") is True:
|
||||
instance_label = (
|
||||
getattr(instance, "label", None)
|
||||
or instance.data.get("label")
|
||||
or instance.data.get("name")
|
||||
)
|
||||
self.log.info((
|
||||
"Instance \"{}\" contain \"multipartExr\". Skipped."
|
||||
).format(instance_label))
|
||||
return
|
||||
if not oiio_supported():
|
||||
instance_label = (
|
||||
getattr(instance, "label", None)
|
||||
or instance.data.get("label")
|
||||
or instance.data.get("name")
|
||||
)
|
||||
self.log.info((
|
||||
"Instance \"{}\" contain \"multipartExr\". Skipped."
|
||||
).format(instance_label))
|
||||
return
|
||||
|
||||
# QUESTION what is this for and should we raise an exception?
|
||||
if "representations" not in instance.data:
|
||||
|
|
@ -212,6 +217,26 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
# Prepare paths and files for process.
|
||||
self.input_output_paths(new_repre, temp_data, filename_suffix)
|
||||
|
||||
decompressed_dir = ''
|
||||
full_input_path = temp_data["full_input_path"]
|
||||
do_decompress = should_decompress(full_input_path)
|
||||
if do_decompress:
|
||||
decompressed_dir = get_decompress_dir()
|
||||
|
||||
decompress(
|
||||
decompressed_dir,
|
||||
full_input_path,
|
||||
temp_data["frame_start"],
|
||||
temp_data["frame_end"],
|
||||
self.log
|
||||
)
|
||||
|
||||
# input path changed, 'decompressed' added
|
||||
input_file = os.path.basename(full_input_path)
|
||||
temp_data["full_input_path"] = os.path.join(
|
||||
decompressed_dir,
|
||||
input_file)
|
||||
|
||||
# Data for burnin script
|
||||
script_data = {
|
||||
"input": temp_data["full_input_path"],
|
||||
|
|
@ -271,6 +296,9 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
os.remove(filepath)
|
||||
self.log.debug("Removed: \"{}\"".format(filepath))
|
||||
|
||||
if do_decompress and os.path.exists(decompressed_dir):
|
||||
shutil.rmtree(decompressed_dir)
|
||||
|
||||
def prepare_basic_data(self, instance):
|
||||
"""Pick data from instance for processing and for burnin strings.
|
||||
|
||||
|
|
|
|||
|
|
@ -3,6 +3,9 @@ import os
|
|||
import pyblish.api
|
||||
import pype.api
|
||||
import pype.lib
|
||||
from pype.lib import oiio_supported, should_decompress, \
|
||||
get_decompress_dir, decompress
|
||||
import shutil
|
||||
|
||||
|
||||
class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
||||
|
|
@ -22,9 +25,11 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
if 'crypto' in instance.data['subset']:
|
||||
return
|
||||
|
||||
# ffmpeg doesn't support multipart exrs
|
||||
do_decompress = False
|
||||
# ffmpeg doesn't support multipart exrs, use oiiotool if available
|
||||
if instance.data.get("multipartExr") is True:
|
||||
return
|
||||
if not oiio_supported():
|
||||
return
|
||||
|
||||
# Skip review when requested.
|
||||
if not instance.data.get("review", True):
|
||||
|
|
@ -36,10 +41,6 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
# filter out mov and img sequences
|
||||
representations_new = representations[:]
|
||||
|
||||
if instance.data.get("multipartExr"):
|
||||
# ffmpeg doesn't support multipart exrs
|
||||
return
|
||||
|
||||
for repre in representations:
|
||||
tags = repre.get("tags", [])
|
||||
self.log.debug(repre)
|
||||
|
|
@ -60,6 +61,19 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
full_input_path = os.path.join(stagingdir, input_file)
|
||||
self.log.info("input {}".format(full_input_path))
|
||||
|
||||
decompressed_dir = ''
|
||||
do_decompress = should_decompress(full_input_path)
|
||||
if do_decompress:
|
||||
decompressed_dir = get_decompress_dir()
|
||||
|
||||
decompress(
|
||||
decompressed_dir,
|
||||
full_input_path)
|
||||
# input path changed, 'decompressed' added
|
||||
full_input_path = os.path.join(
|
||||
decompressed_dir,
|
||||
input_file)
|
||||
|
||||
filename = os.path.splitext(input_file)[0]
|
||||
if not filename.endswith('.'):
|
||||
filename += "."
|
||||
|
|
@ -111,4 +125,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
self.log.debug("Adding: {}".format(representation))
|
||||
representations_new.append(representation)
|
||||
|
||||
if do_decompress and os.path.exists(decompressed_dir):
|
||||
shutil.rmtree(decompressed_dir)
|
||||
|
||||
instance.data["representations"] = representations_new
|
||||
|
|
|
|||
|
|
@ -6,6 +6,8 @@ import pyblish.api
|
|||
import clique
|
||||
import pype.api
|
||||
import pype.lib
|
||||
from pype.lib import oiio_supported, should_decompress, \
|
||||
get_decompress_dir, decompress
|
||||
|
||||
|
||||
class ExtractReview(pyblish.api.InstancePlugin):
|
||||
|
|
@ -14,7 +16,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
Compulsory attribute of representation is tags list with "review",
|
||||
otherwise the representation is ignored.
|
||||
|
||||
All new represetnations are created and encoded by ffmpeg following
|
||||
All new representations are created and encoded by ffmpeg following
|
||||
presets found in `pype-config/presets/plugins/global/
|
||||
publish.json:ExtractReview:outputs`.
|
||||
"""
|
||||
|
|
@ -58,7 +60,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
return
|
||||
|
||||
# ffmpeg doesn't support multipart exrs
|
||||
if instance.data.get("multipartExr") is True:
|
||||
if instance.data.get("multipartExr") is True \
|
||||
and not oiio_supported():
|
||||
|
||||
instance_label = (
|
||||
getattr(instance, "label", None)
|
||||
or instance.data.get("label")
|
||||
|
|
@ -318,9 +322,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
Args:
|
||||
output_def (dict): Currently processed output definition.
|
||||
instance (Instance): Currently processed instance.
|
||||
new_repre (dict): Reprensetation representing output of this
|
||||
new_repre (dict): Representation representing output of this
|
||||
process.
|
||||
temp_data (dict): Base data for successfull process.
|
||||
temp_data (dict): Base data for successful process.
|
||||
"""
|
||||
|
||||
# Get FFmpeg arguments from profile presets
|
||||
|
|
@ -331,9 +335,29 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
ffmpeg_video_filters = out_def_ffmpeg_args.get("video_filters") or []
|
||||
ffmpeg_audio_filters = out_def_ffmpeg_args.get("audio_filters") or []
|
||||
|
||||
input_files_urls = [os.path.join(new_repre["stagingDir"], f) for f
|
||||
in new_repre['files']]
|
||||
do_decompress = should_decompress(input_files_urls[0])
|
||||
if do_decompress:
|
||||
# change stagingDir, decompress first
|
||||
# calculate all paths with modified directory, used on too many
|
||||
# places
|
||||
# will be purged by cleanup.py automatically
|
||||
orig_staging_dir = new_repre["stagingDir"]
|
||||
new_repre["stagingDir"] = get_decompress_dir()
|
||||
|
||||
# Prepare input and output filepaths
|
||||
self.input_output_paths(new_repre, output_def, temp_data)
|
||||
|
||||
if do_decompress:
|
||||
input_file = temp_data["full_input_path"].\
|
||||
replace(new_repre["stagingDir"], orig_staging_dir)
|
||||
|
||||
decompress(new_repre["stagingDir"], input_file,
|
||||
temp_data["frame_start"],
|
||||
temp_data["frame_end"],
|
||||
self.log)
|
||||
|
||||
# Set output frames len to 1 when ouput is single image
|
||||
if (
|
||||
temp_data["output_ext_is_image"]
|
||||
|
|
@ -930,7 +954,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
return regexes
|
||||
|
||||
def validate_value_by_regexes(self, value, in_list):
|
||||
"""Validates in any regexe from list match entered value.
|
||||
"""Validates in any regex from list match entered value.
|
||||
|
||||
Args:
|
||||
in_list (list): List with regexes.
|
||||
|
|
@ -955,9 +979,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
def profile_exclusion(self, matching_profiles):
|
||||
"""Find out most matching profile byt host, task and family match.
|
||||
|
||||
Profiles are selectivelly filtered. Each profile should have
|
||||
Profiles are selectively filtered. Each profile should have
|
||||
"__value__" key with list of booleans. Each boolean represents
|
||||
existence of filter for specific key (host, taks, family).
|
||||
existence of filter for specific key (host, tasks, family).
|
||||
Profiles are looped in sequence. In each sequence are split into
|
||||
true_list and false_list. For next sequence loop are used profiles in
|
||||
true_list if there are any profiles else false_list is used.
|
||||
|
|
@ -1036,7 +1060,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
highest_profile_points = -1
|
||||
# Each profile get 1 point for each matching filter. Profile with most
|
||||
# points is returnd. For cases when more than one profile will match
|
||||
# points is returned. For cases when more than one profile will match
|
||||
# are also stored ordered lists of matching values.
|
||||
for profile in self.profiles:
|
||||
profile_points = 0
|
||||
|
|
@ -1648,7 +1672,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
def add_video_filter_args(self, args, inserting_arg):
|
||||
"""
|
||||
Fixing video filter argumets to be one long string
|
||||
Fixing video filter arguments to be one long string
|
||||
|
||||
Args:
|
||||
args (list): list of string arguments
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue