mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' into feature/OP-1915_flame-ftrack-direct-link
This commit is contained in:
commit
b2344ada9a
31 changed files with 698 additions and 379 deletions
|
|
@ -13,7 +13,7 @@ class LaunchFoundryAppsWindows(PreLaunchHook):
|
|||
|
||||
# Should be as last hook because must change launch arguments to string
|
||||
order = 1000
|
||||
app_groups = ["nuke", "nukex", "hiero", "nukestudio", "photoshop"]
|
||||
app_groups = ["nuke", "nukex", "hiero", "nukestudio"]
|
||||
platforms = ["windows"]
|
||||
|
||||
def execute(self):
|
||||
|
|
|
|||
|
|
@ -49,7 +49,3 @@ class NonPythonHostHook(PreLaunchHook):
|
|||
if remainders:
|
||||
self.launch_context.launch_args.extend(remainders)
|
||||
|
||||
# This must be set otherwise it wouldn't be possible to catch output
|
||||
# when build OpenPype is used.
|
||||
self.launch_context.kwargs["stdout"] = subprocess.DEVNULL
|
||||
self.launch_context.kwargs["stderr"] = subprocess.DEVNULL
|
||||
|
|
|
|||
|
|
@ -47,10 +47,8 @@ class FlameAppFramework(object):
|
|||
def setdefault(self, k, default=None):
|
||||
return self.master[self.name].setdefault(k, default)
|
||||
|
||||
def pop(self, k, v=object()):
|
||||
if v is object():
|
||||
return self.master[self.name].pop(k)
|
||||
return self.master[self.name].pop(k, v)
|
||||
def pop(self, *args, **kwargs):
|
||||
return self.master[self.name].pop(*args, **kwargs)
|
||||
|
||||
def update(self, mapping=(), **kwargs):
|
||||
self.master[self.name].update(mapping, **kwargs)
|
||||
|
|
|
|||
|
|
@ -192,7 +192,10 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
render_products = layer_render_products.layer_data.products
|
||||
assert render_products, "no render products generated"
|
||||
exp_files = []
|
||||
multipart = False
|
||||
for product in render_products:
|
||||
if product.multipart:
|
||||
multipart = True
|
||||
product_name = product.productName
|
||||
if product.camera and layer_render_products.has_camera_token():
|
||||
product_name = "{}{}".format(
|
||||
|
|
@ -205,7 +208,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
})
|
||||
|
||||
self.log.info("multipart: {}".format(
|
||||
layer_render_products.multipart))
|
||||
multipart))
|
||||
assert exp_files, "no file names were generated, this is bug"
|
||||
self.log.info(exp_files)
|
||||
|
||||
|
|
@ -300,7 +303,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
"subset": expected_layer_name,
|
||||
"attachTo": attach_to,
|
||||
"setMembers": layer_name,
|
||||
"multipartExr": layer_render_products.multipart,
|
||||
"multipartExr": multipart,
|
||||
"review": render_instance.data.get("review") or False,
|
||||
"publish": True,
|
||||
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ class ValidateWorkfileData(pyblish.api.ContextPlugin):
|
|||
|
||||
label = "Validate Workfile Data"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
targets = ["tvpaint_worker"]
|
||||
|
||||
def process(self, context):
|
||||
# Data collected in `CollectAvalonEntities`
|
||||
|
|
|
|||
|
|
@ -49,7 +49,8 @@ from .vendor_bin_utils import (
|
|||
get_vendor_bin_path,
|
||||
get_oiio_tools_path,
|
||||
get_ffmpeg_tool_path,
|
||||
ffprobe_streams
|
||||
ffprobe_streams,
|
||||
is_oiio_supported
|
||||
)
|
||||
|
||||
from .python_module_tools import (
|
||||
|
|
@ -65,6 +66,11 @@ from .profiles_filtering import (
|
|||
filter_profiles
|
||||
)
|
||||
|
||||
from .transcoding import (
|
||||
get_transcode_temp_directory,
|
||||
should_convert_for_ffmpeg,
|
||||
convert_for_ffmpeg
|
||||
)
|
||||
from .avalon_context import (
|
||||
CURRENT_DOC_SCHEMAS,
|
||||
PROJECT_NAME_ALLOWED_SYMBOLS,
|
||||
|
|
@ -137,10 +143,6 @@ from .plugin_tools import (
|
|||
source_hash,
|
||||
get_unique_layer_name,
|
||||
get_background_layers,
|
||||
oiio_supported,
|
||||
decompress,
|
||||
get_decompress_dir,
|
||||
should_decompress
|
||||
)
|
||||
|
||||
from .path_tools import (
|
||||
|
|
@ -185,6 +187,7 @@ __all__ = [
|
|||
"get_oiio_tools_path",
|
||||
"get_ffmpeg_tool_path",
|
||||
"ffprobe_streams",
|
||||
"is_oiio_supported",
|
||||
|
||||
"import_filepath",
|
||||
"modules_from_path",
|
||||
|
|
@ -192,6 +195,10 @@ __all__ = [
|
|||
"classes_from_module",
|
||||
"import_module_from_dirpath",
|
||||
|
||||
"get_transcode_temp_directory",
|
||||
"should_convert_for_ffmpeg",
|
||||
"convert_for_ffmpeg",
|
||||
|
||||
"CURRENT_DOC_SCHEMAS",
|
||||
"PROJECT_NAME_ALLOWED_SYMBOLS",
|
||||
"PROJECT_NAME_REGEX",
|
||||
|
|
@ -256,10 +263,6 @@ __all__ = [
|
|||
"source_hash",
|
||||
"get_unique_layer_name",
|
||||
"get_background_layers",
|
||||
"oiio_supported",
|
||||
"decompress",
|
||||
"get_decompress_dir",
|
||||
"should_decompress",
|
||||
|
||||
"version_up",
|
||||
"get_version_from_path",
|
||||
|
|
|
|||
|
|
@ -5,12 +5,8 @@ import inspect
|
|||
import logging
|
||||
import re
|
||||
import json
|
||||
import tempfile
|
||||
import distutils
|
||||
|
||||
from .execute import run_subprocess
|
||||
from .profiles_filtering import filter_profiles
|
||||
from .vendor_bin_utils import get_oiio_tools_path
|
||||
|
||||
from openpype.settings import get_project_settings
|
||||
|
||||
|
|
@ -425,129 +421,6 @@ def get_background_layers(file_url):
|
|||
return layers
|
||||
|
||||
|
||||
def oiio_supported():
|
||||
"""
|
||||
Checks if oiiotool is configured for this platform.
|
||||
|
||||
Triggers simple subprocess, handles exception if fails.
|
||||
|
||||
'should_decompress' will throw exception if configured,
|
||||
but not present or not working.
|
||||
Returns:
|
||||
(bool)
|
||||
"""
|
||||
oiio_path = get_oiio_tools_path()
|
||||
if oiio_path:
|
||||
oiio_path = distutils.spawn.find_executable(oiio_path)
|
||||
|
||||
if not oiio_path:
|
||||
log.debug("OIIOTool is not configured or not present at {}".
|
||||
format(oiio_path))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def decompress(target_dir, file_url,
|
||||
input_frame_start=None, input_frame_end=None, log=None):
|
||||
"""
|
||||
Decompresses DWAA 'file_url' .exr to 'target_dir'.
|
||||
|
||||
Creates uncompressed files in 'target_dir', they need to be cleaned.
|
||||
|
||||
File url could be for single file or for a sequence, in that case
|
||||
%0Xd will be as a placeholder for frame number AND input_frame* will
|
||||
be filled.
|
||||
In that case single oiio command with '--frames' will be triggered for
|
||||
all frames, this should be faster then looping and running sequentially
|
||||
|
||||
Args:
|
||||
target_dir (str): extended from stagingDir
|
||||
file_url (str): full urls to source file (with or without %0Xd)
|
||||
input_frame_start (int) (optional): first frame
|
||||
input_frame_end (int) (optional): last frame
|
||||
log (Logger) (optional): pype logger
|
||||
"""
|
||||
is_sequence = input_frame_start is not None and \
|
||||
input_frame_end is not None and \
|
||||
(int(input_frame_end) > int(input_frame_start))
|
||||
|
||||
oiio_cmd = []
|
||||
oiio_cmd.append(get_oiio_tools_path())
|
||||
|
||||
oiio_cmd.append("--compression none")
|
||||
|
||||
base_file_name = os.path.basename(file_url)
|
||||
oiio_cmd.append(file_url)
|
||||
|
||||
if is_sequence:
|
||||
oiio_cmd.append("--frames {}-{}".format(input_frame_start,
|
||||
input_frame_end))
|
||||
|
||||
oiio_cmd.append("-o")
|
||||
oiio_cmd.append(os.path.join(target_dir, base_file_name))
|
||||
|
||||
subprocess_exr = " ".join(oiio_cmd)
|
||||
|
||||
if not log:
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
log.debug("Decompressing {}".format(subprocess_exr))
|
||||
run_subprocess(
|
||||
subprocess_exr, shell=True, logger=log
|
||||
)
|
||||
|
||||
|
||||
def get_decompress_dir():
|
||||
"""
|
||||
Creates temporary folder for decompressing.
|
||||
Its local, in case of farm it is 'local' to the farm machine.
|
||||
|
||||
Should be much faster, needs to be cleaned up later.
|
||||
"""
|
||||
return os.path.normpath(
|
||||
tempfile.mkdtemp(prefix="pyblish_tmp_")
|
||||
)
|
||||
|
||||
|
||||
def should_decompress(file_url):
|
||||
"""
|
||||
Tests that 'file_url' is compressed with DWAA.
|
||||
|
||||
Uses 'oiio_supported' to check that OIIO tool is available for this
|
||||
platform.
|
||||
|
||||
Shouldn't throw exception as oiiotool is guarded by check function.
|
||||
Currently implemented this way as there is no support for Mac and Linux
|
||||
In the future, it should be more strict and throws exception on
|
||||
misconfiguration.
|
||||
|
||||
Args:
|
||||
file_url (str): path to rendered file (in sequence it would be
|
||||
first file, if that compressed it is expected that whole seq
|
||||
will be too)
|
||||
Returns:
|
||||
(bool): 'file_url' is DWAA compressed and should be decompressed
|
||||
and we can decompress (oiiotool supported)
|
||||
"""
|
||||
if oiio_supported():
|
||||
try:
|
||||
output = run_subprocess([
|
||||
get_oiio_tools_path(),
|
||||
"--info", "-v", file_url])
|
||||
return "compression: \"dwaa\"" in output or \
|
||||
"compression: \"dwab\"" in output
|
||||
except RuntimeError:
|
||||
_name, ext = os.path.splitext(file_url)
|
||||
# TODO: should't the list of allowed extensions be
|
||||
# taken from an OIIO variable of supported formats
|
||||
if ext not in [".mxf"]:
|
||||
# Reraise exception
|
||||
raise
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
def parse_json(path):
|
||||
"""Parses json file at 'path' location
|
||||
|
||||
|
|
|
|||
266
openpype/lib/transcoding.py
Normal file
266
openpype/lib/transcoding.py
Normal file
|
|
@ -0,0 +1,266 @@
|
|||
import os
|
||||
import re
|
||||
import logging
|
||||
import collections
|
||||
import tempfile
|
||||
|
||||
from .execute import run_subprocess
|
||||
from .vendor_bin_utils import (
|
||||
get_oiio_tools_path,
|
||||
is_oiio_supported
|
||||
)
|
||||
|
||||
|
||||
def get_transcode_temp_directory():
|
||||
"""Creates temporary folder for transcoding.
|
||||
|
||||
Its local, in case of farm it is 'local' to the farm machine.
|
||||
|
||||
Should be much faster, needs to be cleaned up later.
|
||||
"""
|
||||
return os.path.normpath(
|
||||
tempfile.mkdtemp(prefix="op_transcoding_")
|
||||
)
|
||||
|
||||
|
||||
def get_oiio_info_for_input(filepath, logger=None):
|
||||
"""Call oiiotool to get information about input and return stdout."""
|
||||
args = [
|
||||
get_oiio_tools_path(), "--info", "-v", filepath
|
||||
]
|
||||
return run_subprocess(args, logger=logger)
|
||||
|
||||
|
||||
def parse_oiio_info(oiio_info):
|
||||
"""Create an object based on output from oiiotool.
|
||||
|
||||
Removes quotation marks from compression value. Parse channels into
|
||||
dictionary - key is channel name value is determined type of channel
|
||||
(e.g. 'uint', 'float').
|
||||
|
||||
Args:
|
||||
oiio_info (str): Output of calling "oiiotool --info -v <path>"
|
||||
|
||||
Returns:
|
||||
dict: Loaded data from output.
|
||||
"""
|
||||
lines = [
|
||||
line.strip()
|
||||
for line in oiio_info.split("\n")
|
||||
]
|
||||
# Each line should contain information about one key
|
||||
# key - value are separated with ": "
|
||||
oiio_sep = ": "
|
||||
data_map = {}
|
||||
for line in lines:
|
||||
parts = line.split(oiio_sep)
|
||||
if len(parts) < 2:
|
||||
continue
|
||||
key = parts.pop(0)
|
||||
value = oiio_sep.join(parts)
|
||||
data_map[key] = value
|
||||
|
||||
if "compression" in data_map:
|
||||
value = data_map["compression"]
|
||||
data_map["compression"] = value.replace("\"", "")
|
||||
|
||||
channels_info = {}
|
||||
channels_value = data_map.get("channel list") or ""
|
||||
if channels_value:
|
||||
channels = channels_value.split(", ")
|
||||
type_regex = re.compile(r"(?P<name>[^\(]+) \((?P<type>[^\)]+)\)")
|
||||
for channel in channels:
|
||||
match = type_regex.search(channel)
|
||||
if not match:
|
||||
channel_name = channel
|
||||
channel_type = "uint"
|
||||
else:
|
||||
channel_name = match.group("name")
|
||||
channel_type = match.group("type")
|
||||
channels_info[channel_name] = channel_type
|
||||
data_map["channels_info"] = channels_info
|
||||
return data_map
|
||||
|
||||
|
||||
def get_convert_rgb_channels(channels_info):
|
||||
"""Get first available RGB(A) group from channels info.
|
||||
|
||||
## Examples
|
||||
```
|
||||
# Ideal situation
|
||||
channels_info: {
|
||||
"R": ...,
|
||||
"G": ...,
|
||||
"B": ...,
|
||||
"A": ...
|
||||
}
|
||||
```
|
||||
Result will be `("R", "G", "B", "A")`
|
||||
|
||||
```
|
||||
# Not ideal situation
|
||||
channels_info: {
|
||||
"beauty.red": ...,
|
||||
"beuaty.green": ...,
|
||||
"beauty.blue": ...,
|
||||
"depth.Z": ...
|
||||
}
|
||||
```
|
||||
Result will be `("beauty.red", "beauty.green", "beauty.blue", None)`
|
||||
|
||||
Returns:
|
||||
NoneType: There is not channel combination that matches RGB
|
||||
combination.
|
||||
tuple: Tuple of 4 channel names defying channel names for R, G, B, A
|
||||
where A can be None.
|
||||
"""
|
||||
rgb_by_main_name = collections.defaultdict(dict)
|
||||
main_name_order = [""]
|
||||
for channel_name in channels_info.keys():
|
||||
name_parts = channel_name.split(".")
|
||||
rgb_part = name_parts.pop(-1).lower()
|
||||
main_name = ".".join(name_parts)
|
||||
if rgb_part in ("r", "red"):
|
||||
rgb_by_main_name[main_name]["R"] = channel_name
|
||||
elif rgb_part in ("g", "green"):
|
||||
rgb_by_main_name[main_name]["G"] = channel_name
|
||||
elif rgb_part in ("b", "blue"):
|
||||
rgb_by_main_name[main_name]["B"] = channel_name
|
||||
elif rgb_part in ("a", "alpha"):
|
||||
rgb_by_main_name[main_name]["A"] = channel_name
|
||||
else:
|
||||
continue
|
||||
if main_name not in main_name_order:
|
||||
main_name_order.append(main_name)
|
||||
|
||||
output = None
|
||||
for main_name in main_name_order:
|
||||
colors = rgb_by_main_name.get(main_name) or {}
|
||||
red = colors.get("R")
|
||||
green = colors.get("G")
|
||||
blue = colors.get("B")
|
||||
alpha = colors.get("A")
|
||||
if red is not None and green is not None and blue is not None:
|
||||
output = (red, green, blue, alpha)
|
||||
break
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def should_convert_for_ffmpeg(src_filepath):
|
||||
"""Find out if input should be converted for ffmpeg.
|
||||
|
||||
Currently cares only about exr inputs and is based on OpenImageIO.
|
||||
|
||||
Returns:
|
||||
bool/NoneType: True if should be converted, False if should not and
|
||||
None if can't determine.
|
||||
"""
|
||||
# Care only about exr at this moment
|
||||
ext = os.path.splitext(src_filepath)[-1].lower()
|
||||
if ext != ".exr":
|
||||
return False
|
||||
|
||||
# Can't determine if should convert or not without oiio_tool
|
||||
if not is_oiio_supported():
|
||||
return None
|
||||
|
||||
# Load info about info from oiio tool
|
||||
oiio_info = get_oiio_info_for_input(src_filepath)
|
||||
input_info = parse_oiio_info(oiio_info)
|
||||
|
||||
# Check compression
|
||||
compression = input_info["compression"]
|
||||
if compression in ("dwaa", "dwab"):
|
||||
return True
|
||||
|
||||
# Check channels
|
||||
channels_info = input_info["channels_info"]
|
||||
review_channels = get_convert_rgb_channels(channels_info)
|
||||
if review_channels is None:
|
||||
return None
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def convert_for_ffmpeg(
|
||||
first_input_path,
|
||||
output_dir,
|
||||
input_frame_start,
|
||||
input_frame_end,
|
||||
logger=None
|
||||
):
|
||||
"""Contert source file to format supported in ffmpeg.
|
||||
|
||||
Currently can convert only exrs.
|
||||
|
||||
Args:
|
||||
first_input_path (str): Path to first file of a sequence or a single
|
||||
file path for non-sequential input.
|
||||
output_dir (str): Path to directory where output will be rendered.
|
||||
Must not be same as input's directory.
|
||||
input_frame_start (int): Frame start of input.
|
||||
input_frame_end (int): Frame end of input.
|
||||
logger (logging.Logger): Logger used for logging.
|
||||
|
||||
Raises:
|
||||
ValueError: If input filepath has extension not supported by function.
|
||||
Currently is supported only ".exr" extension.
|
||||
"""
|
||||
if logger is None:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ext = os.path.splitext(first_input_path)[1].lower()
|
||||
if ext != ".exr":
|
||||
raise ValueError((
|
||||
"Function 'convert_for_ffmpeg' currently support only"
|
||||
" \".exr\" extension. Got \"{}\"."
|
||||
).format(ext))
|
||||
|
||||
is_sequence = False
|
||||
if input_frame_start is not None and input_frame_end is not None:
|
||||
is_sequence = int(input_frame_end) != int(input_frame_start)
|
||||
|
||||
oiio_info = get_oiio_info_for_input(first_input_path)
|
||||
input_info = parse_oiio_info(oiio_info)
|
||||
|
||||
# Change compression only if source compression is "dwaa" or "dwab"
|
||||
# - they're not supported in ffmpeg
|
||||
compression = input_info["compression"]
|
||||
if compression in ("dwaa", "dwab"):
|
||||
compression = "none"
|
||||
|
||||
# Prepare subprocess arguments
|
||||
oiio_cmd = [
|
||||
get_oiio_tools_path(),
|
||||
"--compression", compression,
|
||||
first_input_path
|
||||
]
|
||||
|
||||
channels_info = input_info["channels_info"]
|
||||
review_channels = get_convert_rgb_channels(channels_info)
|
||||
if review_channels is None:
|
||||
raise ValueError(
|
||||
"Couldn't find channels that can be used for conversion."
|
||||
)
|
||||
|
||||
red, green, blue, alpha = review_channels
|
||||
channels_arg = "R={},G={},B={}".format(red, green, blue)
|
||||
if alpha is not None:
|
||||
channels_arg += ",A={}".format(alpha)
|
||||
oiio_cmd.append("--ch")
|
||||
oiio_cmd.append(channels_arg)
|
||||
|
||||
# Add frame definitions to arguments
|
||||
if is_sequence:
|
||||
oiio_cmd.append("--frames")
|
||||
oiio_cmd.append("{}-{}".format(input_frame_start, input_frame_end))
|
||||
|
||||
# Add last argument - path to output
|
||||
base_file_name = os.path.basename(first_input_path)
|
||||
output_path = os.path.join(output_dir, base_file_name)
|
||||
oiio_cmd.append("-o")
|
||||
oiio_cmd.append(output_path)
|
||||
|
||||
logger.debug("Conversion command: {}".format(" ".join(oiio_cmd)))
|
||||
run_subprocess(oiio_cmd, logger=logger)
|
||||
|
|
@ -3,6 +3,7 @@ import logging
|
|||
import json
|
||||
import platform
|
||||
import subprocess
|
||||
import distutils
|
||||
|
||||
log = logging.getLogger("FFmpeg utils")
|
||||
|
||||
|
|
@ -105,3 +106,21 @@ def ffprobe_streams(path_to_file, logger=None):
|
|||
))
|
||||
|
||||
return json.loads(popen_stdout)["streams"]
|
||||
|
||||
|
||||
def is_oiio_supported():
|
||||
"""Checks if oiiotool is configured for this platform.
|
||||
|
||||
Returns:
|
||||
bool: OIIO tool executable is available.
|
||||
"""
|
||||
loaded_path = oiio_path = get_oiio_tools_path()
|
||||
if oiio_path:
|
||||
oiio_path = distutils.spawn.find_executable(oiio_path)
|
||||
|
||||
if not oiio_path:
|
||||
log.debug("OIIOTool is not configured or not present at {}".format(
|
||||
loaded_path
|
||||
))
|
||||
return False
|
||||
return True
|
||||
|
|
|
|||
|
|
@ -445,9 +445,14 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
preview = True
|
||||
break
|
||||
|
||||
if instance_data.get("multipartExr"):
|
||||
preview = True
|
||||
|
||||
new_instance = copy(instance_data)
|
||||
new_instance["subset"] = subset_name
|
||||
new_instance["subsetGroup"] = group_name
|
||||
if preview:
|
||||
new_instance["review"] = True
|
||||
|
||||
# create represenation
|
||||
if isinstance(col, (list, tuple)):
|
||||
|
|
@ -527,6 +532,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
if bake_renders:
|
||||
preview = False
|
||||
|
||||
# toggle preview on if multipart is on
|
||||
if instance.get("multipartExr", False):
|
||||
preview = True
|
||||
|
||||
staging = os.path.dirname(list(collection)[0])
|
||||
success, rootless_staging_dir = (
|
||||
self.anatomy.find_root_template_from_path(staging)
|
||||
|
|
|
|||
|
|
@ -50,11 +50,12 @@ class JobQueueModule(OpenPypeModule):
|
|||
name = "job_queue"
|
||||
|
||||
def initialize(self, modules_settings):
|
||||
server_url = modules_settings.get("server_url") or ""
|
||||
module_settings = modules_settings.get(self.name) or {}
|
||||
server_url = module_settings.get("server_url") or ""
|
||||
|
||||
self._server_url = self.url_conversion(server_url)
|
||||
jobs_root_mapping = self._roots_mapping_conversion(
|
||||
modules_settings.get("jobs_root")
|
||||
module_settings.get("jobs_root")
|
||||
)
|
||||
|
||||
self._jobs_root_mapping = jobs_root_mapping
|
||||
|
|
|
|||
|
|
@ -14,9 +14,11 @@ import openpype
|
|||
import openpype.api
|
||||
from openpype.lib import (
|
||||
get_pype_execute_args,
|
||||
should_decompress,
|
||||
get_decompress_dir,
|
||||
decompress,
|
||||
|
||||
get_transcode_temp_directory,
|
||||
convert_for_ffmpeg,
|
||||
should_convert_for_ffmpeg,
|
||||
|
||||
CREATE_NO_WINDOW
|
||||
)
|
||||
|
||||
|
|
@ -70,18 +72,6 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
options = None
|
||||
|
||||
def process(self, instance):
|
||||
# ffmpeg doesn't support multipart exrs
|
||||
if instance.data.get("multipartExr") is True:
|
||||
instance_label = (
|
||||
getattr(instance, "label", None)
|
||||
or instance.data.get("label")
|
||||
or instance.data.get("name")
|
||||
)
|
||||
self.log.info((
|
||||
"Instance \"{}\" contain \"multipartExr\". Skipped."
|
||||
).format(instance_label))
|
||||
return
|
||||
|
||||
# QUESTION what is this for and should we raise an exception?
|
||||
if "representations" not in instance.data:
|
||||
raise RuntimeError("Burnin needs already created mov to work on.")
|
||||
|
|
@ -95,6 +85,55 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
self.log.debug("Removing representation: {}".format(repre))
|
||||
instance.data["representations"].remove(repre)
|
||||
|
||||
def _get_burnins_per_representations(self, instance, src_burnin_defs):
|
||||
self.log.debug("Filtering of representations and their burnins starts")
|
||||
|
||||
filtered_repres = []
|
||||
repres = instance.data.get("representations") or []
|
||||
for idx, repre in enumerate(repres):
|
||||
self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"]))
|
||||
if not self.repres_is_valid(repre):
|
||||
continue
|
||||
|
||||
repre_burnin_links = repre.get("burnins", [])
|
||||
self.log.debug(
|
||||
"repre_burnin_links: {}".format(repre_burnin_links)
|
||||
)
|
||||
|
||||
burnin_defs = copy.deepcopy(src_burnin_defs)
|
||||
self.log.debug(
|
||||
"burnin_defs.keys(): {}".format(burnin_defs.keys())
|
||||
)
|
||||
|
||||
# Filter output definition by `burnin` represetation key
|
||||
repre_linked_burnins = {
|
||||
name: output
|
||||
for name, output in burnin_defs.items()
|
||||
if name in repre_burnin_links
|
||||
}
|
||||
self.log.debug(
|
||||
"repre_linked_burnins: {}".format(repre_linked_burnins)
|
||||
)
|
||||
|
||||
# if any match then replace burnin defs and follow tag filtering
|
||||
if repre_linked_burnins:
|
||||
burnin_defs = repre_linked_burnins
|
||||
|
||||
# Filter output definition by representation tags (optional)
|
||||
repre_burnin_defs = self.filter_burnins_by_tags(
|
||||
burnin_defs, repre["tags"]
|
||||
)
|
||||
if not repre_burnin_defs:
|
||||
self.log.info((
|
||||
"Skipped representation. All burnin definitions from"
|
||||
" selected profile does not match to representation's"
|
||||
" tags. \"{}\""
|
||||
).format(str(repre["tags"])))
|
||||
continue
|
||||
filtered_repres.append((repre, repre_burnin_defs))
|
||||
|
||||
return filtered_repres
|
||||
|
||||
def main_process(self, instance):
|
||||
# TODO get these data from context
|
||||
host_name = instance.context.data["hostName"]
|
||||
|
|
@ -110,8 +149,7 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
).format(host_name, family, task_name))
|
||||
return
|
||||
|
||||
self.log.debug("profile: {}".format(
|
||||
profile))
|
||||
self.log.debug("profile: {}".format(profile))
|
||||
|
||||
# Pre-filter burnin definitions by instance families
|
||||
burnin_defs = self.filter_burnins_defs(profile, instance)
|
||||
|
|
@ -133,46 +171,10 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
# Executable args that will execute the script
|
||||
# [pype executable, *pype script, "run"]
|
||||
executable_args = get_pype_execute_args("run", scriptpath)
|
||||
|
||||
for idx, repre in enumerate(tuple(instance.data["representations"])):
|
||||
self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"]))
|
||||
|
||||
repre_burnin_links = repre.get("burnins", [])
|
||||
|
||||
if not self.repres_is_valid(repre):
|
||||
continue
|
||||
|
||||
self.log.debug("repre_burnin_links: {}".format(
|
||||
repre_burnin_links))
|
||||
|
||||
self.log.debug("burnin_defs.keys(): {}".format(
|
||||
burnin_defs.keys()))
|
||||
|
||||
# Filter output definition by `burnin` represetation key
|
||||
repre_linked_burnins = {
|
||||
name: output for name, output in burnin_defs.items()
|
||||
if name in repre_burnin_links
|
||||
}
|
||||
self.log.debug("repre_linked_burnins: {}".format(
|
||||
repre_linked_burnins))
|
||||
|
||||
# if any match then replace burnin defs and follow tag filtering
|
||||
_burnin_defs = copy.deepcopy(burnin_defs)
|
||||
if repre_linked_burnins:
|
||||
_burnin_defs = repre_linked_burnins
|
||||
|
||||
# Filter output definition by representation tags (optional)
|
||||
repre_burnin_defs = self.filter_burnins_by_tags(
|
||||
_burnin_defs, repre["tags"]
|
||||
)
|
||||
if not repre_burnin_defs:
|
||||
self.log.info((
|
||||
"Skipped representation. All burnin definitions from"
|
||||
" selected profile does not match to representation's"
|
||||
" tags. \"{}\""
|
||||
).format(str(repre["tags"])))
|
||||
continue
|
||||
|
||||
burnins_per_repres = self._get_burnins_per_representations(
|
||||
instance, burnin_defs
|
||||
)
|
||||
for repre, repre_burnin_defs in burnins_per_repres:
|
||||
# Create copy of `_burnin_data` and `_temp_data` for repre.
|
||||
burnin_data = copy.deepcopy(_burnin_data)
|
||||
temp_data = copy.deepcopy(_temp_data)
|
||||
|
|
@ -180,6 +182,41 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
# Prepare representation based data.
|
||||
self.prepare_repre_data(instance, repre, burnin_data, temp_data)
|
||||
|
||||
src_repre_staging_dir = repre["stagingDir"]
|
||||
# Should convert representation source files before processing?
|
||||
repre_files = repre["files"]
|
||||
if isinstance(repre_files, (tuple, list)):
|
||||
filename = repre_files[0]
|
||||
else:
|
||||
filename = repre_files
|
||||
|
||||
first_input_path = os.path.join(src_repre_staging_dir, filename)
|
||||
# Determine if representation requires pre conversion for ffmpeg
|
||||
do_convert = should_convert_for_ffmpeg(first_input_path)
|
||||
# If result is None the requirement of conversion can't be
|
||||
# determined
|
||||
if do_convert is None:
|
||||
self.log.info((
|
||||
"Can't determine if representation requires conversion."
|
||||
" Skipped."
|
||||
))
|
||||
continue
|
||||
|
||||
# Do conversion if needed
|
||||
# - change staging dir of source representation
|
||||
# - must be set back after output definitions processing
|
||||
if do_convert:
|
||||
new_staging_dir = get_transcode_temp_directory()
|
||||
repre["stagingDir"] = new_staging_dir
|
||||
|
||||
convert_for_ffmpeg(
|
||||
first_input_path,
|
||||
new_staging_dir,
|
||||
_temp_data["frameStart"],
|
||||
_temp_data["frameEnd"],
|
||||
self.log
|
||||
)
|
||||
|
||||
# Add anatomy keys to burnin_data.
|
||||
filled_anatomy = anatomy.format_all(burnin_data)
|
||||
burnin_data["anatomy"] = filled_anatomy.get_solved()
|
||||
|
|
@ -199,6 +236,7 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
files_to_delete = []
|
||||
for filename_suffix, burnin_def in repre_burnin_defs.items():
|
||||
new_repre = copy.deepcopy(repre)
|
||||
new_repre["stagingDir"] = src_repre_staging_dir
|
||||
|
||||
# Keep "ftrackreview" tag only on first output
|
||||
if first_output:
|
||||
|
|
@ -229,27 +267,9 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
new_repre["outputName"] = new_name
|
||||
|
||||
# Prepare paths and files for process.
|
||||
self.input_output_paths(new_repre, temp_data, filename_suffix)
|
||||
|
||||
decompressed_dir = ''
|
||||
full_input_path = temp_data["full_input_path"]
|
||||
do_decompress = should_decompress(full_input_path)
|
||||
if do_decompress:
|
||||
decompressed_dir = get_decompress_dir()
|
||||
|
||||
decompress(
|
||||
decompressed_dir,
|
||||
full_input_path,
|
||||
temp_data["frame_start"],
|
||||
temp_data["frame_end"],
|
||||
self.log
|
||||
)
|
||||
|
||||
# input path changed, 'decompressed' added
|
||||
input_file = os.path.basename(full_input_path)
|
||||
temp_data["full_input_path"] = os.path.join(
|
||||
decompressed_dir,
|
||||
input_file)
|
||||
self.input_output_paths(
|
||||
repre, new_repre, temp_data, filename_suffix
|
||||
)
|
||||
|
||||
# Data for burnin script
|
||||
script_data = {
|
||||
|
|
@ -305,6 +325,14 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
# Add new representation to instance
|
||||
instance.data["representations"].append(new_repre)
|
||||
|
||||
# Cleanup temp staging dir after procesisng of output definitions
|
||||
if do_convert:
|
||||
temp_dir = repre["stagingDir"]
|
||||
shutil.rmtree(temp_dir)
|
||||
# Set staging dir of source representation back to previous
|
||||
# value
|
||||
repre["stagingDir"] = src_repre_staging_dir
|
||||
|
||||
# Remove source representation
|
||||
# NOTE we maybe can keep source representation if necessary
|
||||
instance.data["representations"].remove(repre)
|
||||
|
|
@ -317,9 +345,6 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
os.remove(filepath)
|
||||
self.log.debug("Removed: \"{}\"".format(filepath))
|
||||
|
||||
if do_decompress and os.path.exists(decompressed_dir):
|
||||
shutil.rmtree(decompressed_dir)
|
||||
|
||||
def _get_burnin_options(self):
|
||||
# Prepare burnin options
|
||||
burnin_options = copy.deepcopy(self.default_options)
|
||||
|
|
@ -474,6 +499,12 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
"Representation \"{}\" don't have \"burnin\" tag. Skipped."
|
||||
).format(repre["name"]))
|
||||
return False
|
||||
|
||||
if not repre.get("files"):
|
||||
self.log.warning((
|
||||
"Representation \"{}\" have empty files. Skipped."
|
||||
).format(repre["name"]))
|
||||
return False
|
||||
return True
|
||||
|
||||
def filter_burnins_by_tags(self, burnin_defs, tags):
|
||||
|
|
@ -504,7 +535,9 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
|
||||
return filtered_burnins
|
||||
|
||||
def input_output_paths(self, new_repre, temp_data, filename_suffix):
|
||||
def input_output_paths(
|
||||
self, src_repre, new_repre, temp_data, filename_suffix
|
||||
):
|
||||
"""Prepare input and output paths for representation.
|
||||
|
||||
Store data to `temp_data` for keys "full_input_path" which is full path
|
||||
|
|
@ -565,12 +598,13 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
|
||||
repre_files = output_filename
|
||||
|
||||
stagingdir = new_repre["stagingDir"]
|
||||
src_stagingdir = src_repre["stagingDir"]
|
||||
dst_stagingdir = new_repre["stagingDir"]
|
||||
full_input_path = os.path.join(
|
||||
os.path.normpath(stagingdir), input_filename
|
||||
os.path.normpath(src_stagingdir), input_filename
|
||||
).replace("\\", "/")
|
||||
full_output_path = os.path.join(
|
||||
os.path.normpath(stagingdir), output_filename
|
||||
os.path.normpath(dst_stagingdir), output_filename
|
||||
).replace("\\", "/")
|
||||
|
||||
temp_data["full_input_path"] = full_input_path
|
||||
|
|
@ -587,7 +621,7 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
if is_sequence:
|
||||
for filename in input_filenames:
|
||||
filepath = os.path.join(
|
||||
os.path.normpath(stagingdir), filename
|
||||
os.path.normpath(src_stagingdir), filename
|
||||
).replace("\\", "/")
|
||||
full_input_paths.append(filepath)
|
||||
|
||||
|
|
|
|||
|
|
@ -7,10 +7,11 @@ from openpype.lib import (
|
|||
run_subprocess,
|
||||
path_to_subprocess_arg,
|
||||
|
||||
should_decompress,
|
||||
get_decompress_dir,
|
||||
decompress
|
||||
get_transcode_temp_directory,
|
||||
convert_for_ffmpeg,
|
||||
should_convert_for_ffmpeg
|
||||
)
|
||||
|
||||
import shutil
|
||||
|
||||
|
||||
|
|
@ -31,57 +32,56 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
|
||||
def process(self, instance):
|
||||
self.log.info("subset {}".format(instance.data['subset']))
|
||||
|
||||
# skip crypto passes.
|
||||
if 'crypto' in instance.data['subset']:
|
||||
self.log.info("Skipping crypto passes.")
|
||||
return
|
||||
|
||||
do_decompress = False
|
||||
# ffmpeg doesn't support multipart exrs, use oiiotool if available
|
||||
if instance.data.get("multipartExr") is True:
|
||||
return
|
||||
|
||||
# Skip review when requested.
|
||||
# Skip if review not set.
|
||||
if not instance.data.get("review", True):
|
||||
self.log.info("Skipping - no review set on instance.")
|
||||
return
|
||||
|
||||
# get representation and loop them
|
||||
representations = instance.data["representations"]
|
||||
|
||||
# filter out mov and img sequences
|
||||
representations_new = representations[:]
|
||||
|
||||
for repre in representations:
|
||||
tags = repre.get("tags", [])
|
||||
self.log.debug(repre)
|
||||
valid = 'review' in tags or "thumb-nuke" in tags
|
||||
if not valid:
|
||||
continue
|
||||
|
||||
if not isinstance(repre['files'], (list, tuple)):
|
||||
input_file = repre['files']
|
||||
filtered_repres = self._get_filtered_repres(instance)
|
||||
for repre in filtered_repres:
|
||||
repre_files = repre["files"]
|
||||
if not isinstance(repre_files, (list, tuple)):
|
||||
input_file = repre_files
|
||||
else:
|
||||
file_index = int(float(len(repre['files'])) * 0.5)
|
||||
input_file = repre['files'][file_index]
|
||||
file_index = int(float(len(repre_files)) * 0.5)
|
||||
input_file = repre_files[file_index]
|
||||
|
||||
stagingdir = os.path.normpath(repre.get("stagingDir"))
|
||||
stagingdir = os.path.normpath(repre["stagingDir"])
|
||||
|
||||
# input_file = (
|
||||
# collections[0].format('{head}{padding}{tail}') % start
|
||||
# )
|
||||
full_input_path = os.path.join(stagingdir, input_file)
|
||||
self.log.info("input {}".format(full_input_path))
|
||||
|
||||
decompressed_dir = ''
|
||||
do_decompress = should_decompress(full_input_path)
|
||||
if do_decompress:
|
||||
decompressed_dir = get_decompress_dir()
|
||||
do_convert = should_convert_for_ffmpeg(full_input_path)
|
||||
# If result is None the requirement of conversion can't be
|
||||
# determined
|
||||
if do_convert is None:
|
||||
self.log.info((
|
||||
"Can't determine if representation requires conversion."
|
||||
" Skipped."
|
||||
))
|
||||
continue
|
||||
|
||||
decompress(
|
||||
decompressed_dir,
|
||||
full_input_path)
|
||||
# input path changed, 'decompressed' added
|
||||
full_input_path = os.path.join(
|
||||
decompressed_dir,
|
||||
input_file)
|
||||
# Do conversion if needed
|
||||
# - change staging dir of source representation
|
||||
# - must be set back after output definitions processing
|
||||
convert_dir = None
|
||||
if do_convert:
|
||||
convert_dir = get_transcode_temp_directory()
|
||||
filename = os.path.basename(full_input_path)
|
||||
convert_for_ffmpeg(
|
||||
full_input_path,
|
||||
convert_dir,
|
||||
None,
|
||||
None,
|
||||
self.log
|
||||
)
|
||||
full_input_path = os.path.join(convert_dir, filename)
|
||||
|
||||
filename = os.path.splitext(input_file)[0]
|
||||
if not filename.endswith('.'):
|
||||
|
|
@ -124,29 +124,45 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
)
|
||||
except RuntimeError as exp:
|
||||
if "Compression" in str(exp):
|
||||
self.log.debug("Unsupported compression on input files. " +
|
||||
"Skipping!!!")
|
||||
self.log.debug(
|
||||
"Unsupported compression on input files. Skipping!!!"
|
||||
)
|
||||
return
|
||||
self.log.warning("Conversion crashed", exc_info=True)
|
||||
raise
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'thumbnail',
|
||||
'ext': 'jpg',
|
||||
'files': jpeg_file,
|
||||
new_repre = {
|
||||
"name": "thumbnail",
|
||||
"ext": "jpg",
|
||||
"files": jpeg_file,
|
||||
"stagingDir": stagingdir,
|
||||
"thumbnail": True,
|
||||
"tags": ['thumbnail']
|
||||
"tags": ["thumbnail"]
|
||||
}
|
||||
|
||||
# adding representation
|
||||
self.log.debug("Adding: {}".format(representation))
|
||||
representations_new.append(representation)
|
||||
self.log.debug("Adding: {}".format(new_repre))
|
||||
instance.data["representations"].append(new_repre)
|
||||
|
||||
if do_decompress and os.path.exists(decompressed_dir):
|
||||
shutil.rmtree(decompressed_dir)
|
||||
# Cleanup temp folder
|
||||
if convert_dir is not None and os.path.exists(convert_dir):
|
||||
shutil.rmtree(convert_dir)
|
||||
|
||||
instance.data["representations"] = representations_new
|
||||
def _get_filtered_repres(self, instance):
|
||||
filtered_repres = []
|
||||
src_repres = instance.data.get("representations") or []
|
||||
for repre in src_repres:
|
||||
self.log.debug(repre)
|
||||
tags = repre.get("tags") or []
|
||||
valid = "review" in tags or "thumb-nuke" in tags
|
||||
if not valid:
|
||||
continue
|
||||
|
||||
if not repre.get("files"):
|
||||
self.log.info((
|
||||
"Representation \"{}\" don't have files. Skipping"
|
||||
).format(repre["name"]))
|
||||
continue
|
||||
|
||||
filtered_repres.append(repre)
|
||||
return filtered_repres
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import re
|
||||
import copy
|
||||
import json
|
||||
import shutil
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
import six
|
||||
|
|
@ -16,9 +17,10 @@ from openpype.lib import (
|
|||
|
||||
path_to_subprocess_arg,
|
||||
|
||||
should_decompress,
|
||||
get_decompress_dir,
|
||||
decompress
|
||||
should_convert_for_ffmpeg,
|
||||
convert_for_ffmpeg,
|
||||
get_transcode_temp_directory,
|
||||
get_transcode_temp_directory
|
||||
)
|
||||
import speedcopy
|
||||
|
||||
|
|
@ -71,18 +73,6 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
if not instance.data.get("review", True):
|
||||
return
|
||||
|
||||
# ffmpeg doesn't support multipart exrs
|
||||
if instance.data.get("multipartExr") is True:
|
||||
instance_label = (
|
||||
getattr(instance, "label", None)
|
||||
or instance.data.get("label")
|
||||
or instance.data.get("name")
|
||||
)
|
||||
self.log.info((
|
||||
"Instance \"{}\" contain \"multipartExr\". Skipped."
|
||||
).format(instance_label))
|
||||
return
|
||||
|
||||
# Run processing
|
||||
self.main_process(instance)
|
||||
|
||||
|
|
@ -92,7 +82,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
if "delete" in tags and "thumbnail" not in tags:
|
||||
instance.data["representations"].remove(repre)
|
||||
|
||||
def main_process(self, instance):
|
||||
def _get_outputs_for_instance(self, instance):
|
||||
host_name = instance.context.data["hostName"]
|
||||
task_name = os.environ["AVALON_TASK"]
|
||||
family = self.main_family_from_instance(instance)
|
||||
|
|
@ -114,24 +104,25 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
self.log.debug("Matching profile: \"{}\"".format(json.dumps(profile)))
|
||||
|
||||
instance_families = self.families_from_instance(instance)
|
||||
_profile_outputs = self.filter_outputs_by_families(
|
||||
filtered_outputs = self.filter_outputs_by_families(
|
||||
profile, instance_families
|
||||
)
|
||||
if not _profile_outputs:
|
||||
# Store `filename_suffix` to save arguments
|
||||
profile_outputs = []
|
||||
for filename_suffix, definition in filtered_outputs.items():
|
||||
definition["filename_suffix"] = filename_suffix
|
||||
profile_outputs.append(definition)
|
||||
|
||||
if not filtered_outputs:
|
||||
self.log.info((
|
||||
"Skipped instance. All output definitions from selected"
|
||||
" profile does not match to instance families. \"{}\""
|
||||
).format(str(instance_families)))
|
||||
return
|
||||
return profile_outputs
|
||||
|
||||
# Store `filename_suffix` to save arguments
|
||||
profile_outputs = []
|
||||
for filename_suffix, definition in _profile_outputs.items():
|
||||
definition["filename_suffix"] = filename_suffix
|
||||
profile_outputs.append(definition)
|
||||
|
||||
# Loop through representations
|
||||
for repre in tuple(instance.data["representations"]):
|
||||
def _get_outputs_per_representations(self, instance, profile_outputs):
|
||||
outputs_per_representations = []
|
||||
for repre in instance.data["representations"]:
|
||||
repre_name = str(repre.get("name"))
|
||||
tags = repre.get("tags") or []
|
||||
if "review" not in tags:
|
||||
|
|
@ -173,6 +164,80 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
" tags. \"{}\""
|
||||
).format(str(tags)))
|
||||
continue
|
||||
outputs_per_representations.append((repre, outputs))
|
||||
return outputs_per_representations
|
||||
|
||||
@staticmethod
|
||||
def get_instance_label(instance):
|
||||
return (
|
||||
getattr(instance, "label", None)
|
||||
or instance.data.get("label")
|
||||
or instance.data.get("name")
|
||||
or str(instance)
|
||||
)
|
||||
|
||||
def main_process(self, instance):
|
||||
instance_label = self.get_instance_label(instance)
|
||||
self.log.debug("Processing instance \"{}\"".format(instance_label))
|
||||
profile_outputs = self._get_outputs_for_instance(instance)
|
||||
if not profile_outputs:
|
||||
return
|
||||
|
||||
# Loop through representations
|
||||
outputs_per_repres = self._get_outputs_per_representations(
|
||||
instance, profile_outputs
|
||||
)
|
||||
for repre, outputs in outputs_per_repres:
|
||||
# Check if input should be preconverted before processing
|
||||
# Store original staging dir (it's value may change)
|
||||
src_repre_staging_dir = repre["stagingDir"]
|
||||
# Receive filepath to first file in representation
|
||||
first_input_path = None
|
||||
if not self.input_is_sequence(repre):
|
||||
first_input_path = os.path.join(
|
||||
src_repre_staging_dir, repre["files"]
|
||||
)
|
||||
else:
|
||||
for filename in repre["files"]:
|
||||
first_input_path = os.path.join(
|
||||
src_repre_staging_dir, filename
|
||||
)
|
||||
break
|
||||
|
||||
# Skip if file is not set
|
||||
if first_input_path is None:
|
||||
self.log.warning((
|
||||
"Representation \"{}\" have empty files. Skipped."
|
||||
).format(repre["name"]))
|
||||
continue
|
||||
|
||||
# Determine if representation requires pre conversion for ffmpeg
|
||||
do_convert = should_convert_for_ffmpeg(first_input_path)
|
||||
# If result is None the requirement of conversion can't be
|
||||
# determined
|
||||
if do_convert is None:
|
||||
self.log.info((
|
||||
"Can't determine if representation requires conversion."
|
||||
" Skipped."
|
||||
))
|
||||
continue
|
||||
|
||||
# Do conversion if needed
|
||||
# - change staging dir of source representation
|
||||
# - must be set back after output definitions processing
|
||||
if do_convert:
|
||||
new_staging_dir = get_transcode_temp_directory()
|
||||
repre["stagingDir"] = new_staging_dir
|
||||
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = instance.data["frameEnd"]
|
||||
convert_for_ffmpeg(
|
||||
first_input_path,
|
||||
new_staging_dir,
|
||||
frame_start,
|
||||
frame_end,
|
||||
self.log
|
||||
)
|
||||
|
||||
for _output_def in outputs:
|
||||
output_def = copy.deepcopy(_output_def)
|
||||
|
|
@ -185,6 +250,10 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
# Create copy of representation
|
||||
new_repre = copy.deepcopy(repre)
|
||||
# Make sure new representation has origin staging dir
|
||||
# - this is because source representation may change
|
||||
# it's staging dir because of ffmpeg conversion
|
||||
new_repre["stagingDir"] = src_repre_staging_dir
|
||||
|
||||
# Remove "delete" tag from new repre if there is
|
||||
if "delete" in new_repre["tags"]:
|
||||
|
|
@ -276,6 +345,14 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
)
|
||||
instance.data["representations"].append(new_repre)
|
||||
|
||||
# Cleanup temp staging dir after procesisng of output definitions
|
||||
if do_convert:
|
||||
temp_dir = repre["stagingDir"]
|
||||
shutil.rmtree(temp_dir)
|
||||
# Set staging dir of source representation back to previous
|
||||
# value
|
||||
repre["stagingDir"] = src_repre_staging_dir
|
||||
|
||||
def input_is_sequence(self, repre):
|
||||
"""Deduce from representation data if input is sequence."""
|
||||
# TODO GLOBAL ISSUE - Find better way how to find out if input
|
||||
|
|
@ -405,35 +482,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
value for value in _ffmpeg_audio_filters if value.strip()
|
||||
]
|
||||
|
||||
if isinstance(new_repre['files'], list):
|
||||
input_files_urls = [os.path.join(new_repre["stagingDir"], f) for f
|
||||
in new_repre['files']]
|
||||
test_path = input_files_urls[0]
|
||||
else:
|
||||
test_path = os.path.join(
|
||||
new_repre["stagingDir"], new_repre['files'])
|
||||
do_decompress = should_decompress(test_path)
|
||||
|
||||
if do_decompress:
|
||||
# change stagingDir, decompress first
|
||||
# calculate all paths with modified directory, used on too many
|
||||
# places
|
||||
# will be purged by cleanup.py automatically
|
||||
orig_staging_dir = new_repre["stagingDir"]
|
||||
new_repre["stagingDir"] = get_decompress_dir()
|
||||
|
||||
# Prepare input and output filepaths
|
||||
self.input_output_paths(new_repre, output_def, temp_data)
|
||||
|
||||
if do_decompress:
|
||||
input_file = temp_data["full_input_path"].\
|
||||
replace(new_repre["stagingDir"], orig_staging_dir)
|
||||
|
||||
decompress(new_repre["stagingDir"], input_file,
|
||||
temp_data["frame_start"],
|
||||
temp_data["frame_end"],
|
||||
self.log)
|
||||
|
||||
# Set output frames len to 1 when ouput is single image
|
||||
if (
|
||||
temp_data["output_ext_is_image"]
|
||||
|
|
@ -744,13 +795,14 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
"sequence_file" (if output is sequence) keys to new representation.
|
||||
"""
|
||||
|
||||
staging_dir = new_repre["stagingDir"]
|
||||
repre = temp_data["origin_repre"]
|
||||
src_staging_dir = repre["stagingDir"]
|
||||
dst_staging_dir = new_repre["stagingDir"]
|
||||
|
||||
if temp_data["input_is_sequence"]:
|
||||
collections = clique.assemble(repre["files"])[0]
|
||||
full_input_path = os.path.join(
|
||||
staging_dir,
|
||||
src_staging_dir,
|
||||
collections[0].format("{head}{padding}{tail}")
|
||||
)
|
||||
|
||||
|
|
@ -760,12 +812,12 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
# Make sure to have full path to one input file
|
||||
full_input_path_single_file = os.path.join(
|
||||
staging_dir, repre["files"][0]
|
||||
src_staging_dir, repre["files"][0]
|
||||
)
|
||||
|
||||
else:
|
||||
full_input_path = os.path.join(
|
||||
staging_dir, repre["files"]
|
||||
src_staging_dir, repre["files"]
|
||||
)
|
||||
filename = os.path.splitext(repre["files"])[0]
|
||||
|
||||
|
|
@ -811,27 +863,27 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
new_repre["sequence_file"] = repr_file
|
||||
full_output_path = os.path.join(
|
||||
staging_dir, filename_base, repr_file
|
||||
dst_staging_dir, filename_base, repr_file
|
||||
)
|
||||
|
||||
else:
|
||||
repr_file = "{}_{}.{}".format(
|
||||
filename, filename_suffix, output_ext
|
||||
)
|
||||
full_output_path = os.path.join(staging_dir, repr_file)
|
||||
full_output_path = os.path.join(dst_staging_dir, repr_file)
|
||||
new_repre_files = repr_file
|
||||
|
||||
# Store files to representation
|
||||
new_repre["files"] = new_repre_files
|
||||
|
||||
# Make sure stagingDire exists
|
||||
staging_dir = os.path.normpath(os.path.dirname(full_output_path))
|
||||
if not os.path.exists(staging_dir):
|
||||
self.log.debug("Creating dir: {}".format(staging_dir))
|
||||
os.makedirs(staging_dir)
|
||||
dst_staging_dir = os.path.normpath(os.path.dirname(full_output_path))
|
||||
if not os.path.exists(dst_staging_dir):
|
||||
self.log.debug("Creating dir: {}".format(dst_staging_dir))
|
||||
os.makedirs(dst_staging_dir)
|
||||
|
||||
# Store stagingDir to representaion
|
||||
new_repre["stagingDir"] = staging_dir
|
||||
new_repre["stagingDir"] = dst_staging_dir
|
||||
|
||||
# Store paths to temp data
|
||||
temp_data["full_input_path"] = full_input_path
|
||||
|
|
|
|||
|
|
@ -161,6 +161,23 @@ def _dnxhd_codec_args(stream_data, source_ffmpeg_cmd):
|
|||
return output
|
||||
|
||||
|
||||
def _mxf_format_args(ffprobe_data, source_ffmpeg_cmd):
|
||||
input_format = ffprobe_data["format"]
|
||||
format_tags = input_format.get("tags") or {}
|
||||
product_name = format_tags.get("product_name") or ""
|
||||
output = []
|
||||
if "opatom" in product_name.lower():
|
||||
output.extend(["-f", "mxf_opatom"])
|
||||
return output
|
||||
|
||||
|
||||
def get_format_args(ffprobe_data, source_ffmpeg_cmd):
|
||||
input_format = ffprobe_data.get("format") or {}
|
||||
if input_format.get("format_name") == "mxf":
|
||||
return _mxf_format_args(ffprobe_data, source_ffmpeg_cmd)
|
||||
return []
|
||||
|
||||
|
||||
def get_codec_args(ffprobe_data, source_ffmpeg_cmd):
|
||||
stream_data = ffprobe_data["streams"][0]
|
||||
codec_name = stream_data.get("codec_name")
|
||||
|
|
@ -595,9 +612,9 @@ def burnins_from_data(
|
|||
if source_timecode is None:
|
||||
source_timecode = stream.get("tags", {}).get("timecode")
|
||||
|
||||
# Use "format" key from ffprobe data
|
||||
# - this is used e.g. in mxf extension
|
||||
if source_timecode is None:
|
||||
# Use "format" key from ffprobe data
|
||||
# - this is used e.g. in mxf extension
|
||||
input_format = burnin.ffprobe_data.get("format") or {}
|
||||
source_timecode = input_format.get("timecode")
|
||||
if source_timecode is None:
|
||||
|
|
@ -692,6 +709,9 @@ def burnins_from_data(
|
|||
ffmpeg_args.append("-g 1")
|
||||
|
||||
else:
|
||||
ffmpeg_args.extend(
|
||||
get_format_args(burnin.ffprobe_data, source_ffmpeg_cmd)
|
||||
)
|
||||
ffmpeg_args.extend(
|
||||
get_codec_args(burnin.ffprobe_data, source_ffmpeg_cmd)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -243,7 +243,11 @@ class LauncherWindow(QtWidgets.QDialog):
|
|||
|
||||
# Allow minimize
|
||||
self.setWindowFlags(
|
||||
self.windowFlags() | QtCore.Qt.WindowMinimizeButtonHint
|
||||
QtCore.Qt.Window
|
||||
| QtCore.Qt.CustomizeWindowHint
|
||||
| QtCore.Qt.WindowTitleHint
|
||||
| QtCore.Qt.WindowMinimizeButtonHint
|
||||
| QtCore.Qt.WindowCloseButtonHint
|
||||
)
|
||||
|
||||
project_model = ProjectModel(self.dbcon)
|
||||
|
|
|
|||
|
|
@ -4,7 +4,10 @@ from Qt import QtWidgets, QtCore
|
|||
from avalon import api, io, pipeline
|
||||
|
||||
from openpype import style
|
||||
from openpype.tools.utils import lib
|
||||
from openpype.tools.utils import (
|
||||
lib,
|
||||
PlaceholderLineEdit
|
||||
)
|
||||
from openpype.tools.utils.assets_widget import MultiSelectAssetsWidget
|
||||
|
||||
from .widgets import (
|
||||
|
|
@ -517,7 +520,7 @@ class SubsetGroupingDialog(QtWidgets.QDialog):
|
|||
self.subsets = parent._subsets_widget
|
||||
self.asset_ids = parent.data["state"]["assetIds"]
|
||||
|
||||
name = QtWidgets.QLineEdit()
|
||||
name = PlaceholderLineEdit(self)
|
||||
name.setPlaceholderText("Remain blank to ungroup..")
|
||||
|
||||
# Menu for pre-defined subset groups
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ from openpype.lib import (
|
|||
PROJECT_NAME_REGEX
|
||||
)
|
||||
from openpype.style import load_stylesheet
|
||||
from openpype.tools.utils import PlaceholderLineEdit
|
||||
from avalon.api import AvalonMongoDB
|
||||
|
||||
from Qt import QtWidgets, QtCore
|
||||
|
|
@ -345,7 +346,7 @@ class ConfirmProjectDeletion(QtWidgets.QDialog):
|
|||
|
||||
question_label = QtWidgets.QLabel("<b>Are you sure?</b>", self)
|
||||
|
||||
confirm_input = QtWidgets.QLineEdit(self)
|
||||
confirm_input = PlaceholderLineEdit(self)
|
||||
confirm_input.setPlaceholderText("Type \"Delete\" to confirm...")
|
||||
|
||||
cancel_btn = _SameSizeBtns("Cancel", self)
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ from avalon.vendor import qtawesome
|
|||
|
||||
from openpype.widgets.attribute_defs import create_widget_for_attr_def
|
||||
from openpype.tools.flickcharm import FlickCharm
|
||||
from openpype.tools.utils import PlaceholderLineEdit
|
||||
from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS
|
||||
from .models import (
|
||||
AssetsHierarchyModel,
|
||||
|
|
@ -396,7 +397,7 @@ class AssetsDialog(QtWidgets.QDialog):
|
|||
proxy_model.setSourceModel(model)
|
||||
proxy_model.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
|
||||
|
||||
filter_input = QtWidgets.QLineEdit(self)
|
||||
filter_input = PlaceholderLineEdit(self)
|
||||
filter_input.setPlaceholderText("Filter assets..")
|
||||
|
||||
asset_view = QtWidgets.QTreeView(self)
|
||||
|
|
@ -934,7 +935,7 @@ class TasksCombobox(QtWidgets.QComboBox):
|
|||
self.set_selected_items(self._origin_value)
|
||||
|
||||
|
||||
class VariantInputWidget(QtWidgets.QLineEdit):
|
||||
class VariantInputWidget(PlaceholderLineEdit):
|
||||
"""Input widget for variant."""
|
||||
value_changed = QtCore.Signal()
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from openpype import (
|
|||
resources,
|
||||
style
|
||||
)
|
||||
|
||||
from openpype.tools.utils import PlaceholderLineEdit
|
||||
from .control import PublisherController
|
||||
from .widgets import (
|
||||
BorderedLabelWidget,
|
||||
|
|
@ -131,7 +131,7 @@ class PublisherWindow(QtWidgets.QDialog):
|
|||
subset_content_layout.addWidget(subset_attributes_wrap, 7)
|
||||
|
||||
# Footer
|
||||
comment_input = QtWidgets.QLineEdit(subset_frame)
|
||||
comment_input = PlaceholderLineEdit(subset_frame)
|
||||
comment_input.setObjectName("PublishCommentInput")
|
||||
comment_input.setPlaceholderText(
|
||||
"Attach a comment to your publish"
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from .widgets import (
|
|||
ExpandingWidget
|
||||
)
|
||||
from openpype.tools.settings import CHILD_OFFSET
|
||||
from openpype.tools.utils import PlaceholderLineEdit
|
||||
|
||||
|
||||
class AppVariantWidget(QtWidgets.QWidget):
|
||||
|
|
@ -45,7 +46,7 @@ class AppVariantWidget(QtWidgets.QWidget):
|
|||
content_layout.addWidget(warn_label)
|
||||
return
|
||||
|
||||
executable_input_widget = QtWidgets.QLineEdit(content_widget)
|
||||
executable_input_widget = PlaceholderLineEdit(content_widget)
|
||||
executable_input_widget.setPlaceholderText(self.exec_placeholder)
|
||||
content_layout.addWidget(executable_input_widget)
|
||||
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import getpass
|
|||
from Qt import QtWidgets, QtCore
|
||||
from openpype.lib import is_admin_password_required
|
||||
from openpype.widgets import PasswordDialog
|
||||
from openpype.tools.utils import PlaceholderLineEdit
|
||||
|
||||
|
||||
class LocalGeneralWidgets(QtWidgets.QWidget):
|
||||
|
|
@ -11,7 +12,7 @@ class LocalGeneralWidgets(QtWidgets.QWidget):
|
|||
|
||||
self._loading_local_settings = False
|
||||
|
||||
username_input = QtWidgets.QLineEdit(self)
|
||||
username_input = PlaceholderLineEdit(self)
|
||||
username_input.setPlaceholderText(getpass.getuser())
|
||||
|
||||
is_admin_input = QtWidgets.QCheckBox(self)
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ from Qt import QtWidgets
|
|||
from pymongo.errors import ServerSelectionTimeoutError
|
||||
|
||||
from openpype.api import change_openpype_mongo_url
|
||||
from openpype.tools.utils import PlaceholderLineEdit
|
||||
|
||||
|
||||
class OpenPypeMongoWidget(QtWidgets.QWidget):
|
||||
|
|
@ -25,7 +26,7 @@ class OpenPypeMongoWidget(QtWidgets.QWidget):
|
|||
mongo_url_label = QtWidgets.QLabel("OpenPype Mongo URL", self)
|
||||
|
||||
# Input
|
||||
mongo_url_input = QtWidgets.QLineEdit(self)
|
||||
mongo_url_input = PlaceholderLineEdit(self)
|
||||
mongo_url_input.setPlaceholderText("< OpenPype Mongo URL >")
|
||||
mongo_url_input.setText(os.environ["OPENPYPE_MONGO"])
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import platform
|
|||
import copy
|
||||
from Qt import QtWidgets, QtCore, QtGui
|
||||
from openpype.tools.settings.settings import ProjectListWidget
|
||||
from openpype.tools.utils import PlaceholderLineEdit
|
||||
from openpype.settings.constants import (
|
||||
PROJECT_ANATOMY_KEY,
|
||||
DEFAULT_PROJECT_KEY
|
||||
|
|
@ -45,7 +46,7 @@ class DynamicInputItem(QtCore.QObject):
|
|||
parent
|
||||
):
|
||||
super(DynamicInputItem, self).__init__()
|
||||
input_widget = QtWidgets.QLineEdit(parent)
|
||||
input_widget = PlaceholderLineEdit(parent)
|
||||
|
||||
settings_value = input_def.get("value")
|
||||
placeholder = input_def.get("placeholder")
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ from openpype.tools.utils.widgets import ImageButton
|
|||
from openpype.tools.utils.lib import paint_image_with_color
|
||||
|
||||
from openpype.widgets.nice_checkbox import NiceCheckbox
|
||||
from openpype.tools.utils import PlaceholderLineEdit
|
||||
from openpype.settings.lib import get_system_settings
|
||||
from .images import (
|
||||
get_pixmap,
|
||||
|
|
@ -24,7 +25,7 @@ from .constants import (
|
|||
)
|
||||
|
||||
|
||||
class SettingsLineEdit(QtWidgets.QLineEdit):
|
||||
class SettingsLineEdit(PlaceholderLineEdit):
|
||||
focused_in = QtCore.Signal()
|
||||
|
||||
def focusInEvent(self, event):
|
||||
|
|
|
|||
|
|
@ -1,8 +1,12 @@
|
|||
import contextlib
|
||||
from Qt import QtWidgets, QtCore
|
||||
from . import RecursiveSortFilterProxyModel, AssetModel
|
||||
|
||||
from openpype.tools.utils import PlaceholderLineEdit
|
||||
|
||||
from avalon.vendor import qtawesome
|
||||
from avalon import style
|
||||
|
||||
from . import RecursiveSortFilterProxyModel, AssetModel
|
||||
from . import TasksTemplateModel, DeselectableTreeView
|
||||
from . import _iter_model_rows
|
||||
|
||||
|
|
@ -165,7 +169,7 @@ class AssetWidget(QtWidgets.QWidget):
|
|||
refresh = QtWidgets.QPushButton(icon, "")
|
||||
refresh.setToolTip("Refresh items")
|
||||
|
||||
filter = QtWidgets.QLineEdit()
|
||||
filter = PlaceholderLineEdit()
|
||||
filter.textChanged.connect(proxy.setFilterFixedString)
|
||||
filter.setPlaceholderText("Filter assets..")
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ from openpype.api import (
|
|||
Creator
|
||||
)
|
||||
from openpype.lib import TaskNotSetError
|
||||
from avalon.tools.creator.app import SubsetAllowedSymbols
|
||||
from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS
|
||||
|
||||
|
||||
class FamilyWidget(QtWidgets.QWidget):
|
||||
|
|
@ -223,7 +223,7 @@ class FamilyWidget(QtWidgets.QWidget):
|
|||
# QUESTION should Creator care about this and here should be
|
||||
# only validated with schema regex?
|
||||
subset_name = re.sub(
|
||||
"[^{}]+".format(SubsetAllowedSymbols),
|
||||
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
|
||||
"",
|
||||
subset_name
|
||||
)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from avalon import api
|
|||
from avalon.vendor import qtawesome
|
||||
|
||||
from openpype import style
|
||||
from openpype.tools.utils import PlaceholderLineEdit
|
||||
from openpype.tools.utils.lib import (
|
||||
iter_model_rows,
|
||||
qt_app_context
|
||||
|
|
@ -44,7 +45,7 @@ class SubsetManagerWindow(QtWidgets.QDialog):
|
|||
header_widget = QtWidgets.QWidget(left_side_widget)
|
||||
|
||||
# Filter input
|
||||
filter_input = QtWidgets.QLineEdit(header_widget)
|
||||
filter_input = PlaceholderLineEdit(header_widget)
|
||||
filter_input.setPlaceholderText("Filter subsets..")
|
||||
|
||||
# Refresh button
|
||||
|
|
|
|||
|
|
@ -0,0 +1,8 @@
|
|||
from .widgets import (
|
||||
PlaceholderLineEdit,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"PlaceholderLineEdit",
|
||||
)
|
||||
|
|
@ -15,6 +15,7 @@ from openpype.tools.utils.lib import (
|
|||
schedule,
|
||||
qt_app_context
|
||||
)
|
||||
from openpype.tools.utils import PlaceholderLineEdit
|
||||
from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget
|
||||
from openpype.tools.utils.tasks_widget import TasksWidget
|
||||
from openpype.tools.utils.delegates import PrettyTimeDelegate
|
||||
|
|
@ -139,7 +140,7 @@ class NameWindow(QtWidgets.QDialog):
|
|||
preview_label = QtWidgets.QLabel("Preview filename", inputs_widget)
|
||||
|
||||
# Subversion input
|
||||
subversion_input = QtWidgets.QLineEdit(inputs_widget)
|
||||
subversion_input = PlaceholderLineEdit(inputs_widget)
|
||||
subversion_input.setPlaceholderText("Will be part of filename.")
|
||||
|
||||
# Extensions combobox
|
||||
|
|
@ -394,9 +395,9 @@ class FilesWidget(QtWidgets.QWidget):
|
|||
files_view.setColumnWidth(0, 330)
|
||||
|
||||
# Filtering input
|
||||
filter_input = QtWidgets.QLineEdit(self)
|
||||
filter_input.textChanged.connect(proxy_model.setFilterFixedString)
|
||||
filter_input = PlaceholderLineEdit(self)
|
||||
filter_input.setPlaceholderText("Filter files..")
|
||||
filter_input.textChanged.connect(proxy_model.setFilterFixedString)
|
||||
|
||||
# Home Page
|
||||
# Build buttons widget for files widget
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
Subproject commit 7e5efd6885330d84bb8495975bcab84df49bfa3d
|
||||
Subproject commit 9499f6517a1ff2d3bf94c5d34c0aece146734760
|
||||
Loading…
Add table
Add a link
Reference in a new issue