Auto stash before merge of "feature/1585-maya-support-for-frame-steps-and-frame-lists" and "origin/develop"

This commit is contained in:
Ondrej Samohel 2021-06-01 14:11:27 +02:00
parent c3d759a836
commit 3576cb9564
No known key found for this signature in database
GPG key ID: 02376E18990A97C6
5 changed files with 119 additions and 55 deletions

View file

@ -243,9 +243,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
"Cannot get value of {}.{}".format(
node, attribute_name))
else:
# compare values as strings to get around various
# datatypes possible in Settings and Render
# Settings
if str(value) != str(render_value):
invalid = True
cls.log.error(

View file

@ -105,7 +105,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
families = ["render.farm", "prerender.farm",
"renderlayer", "imagesequence", "vrayscene"]
aov_filter = {"maya": [r".*(?:\.|_)?([Bb]eauty)(?:\.|_)?.*"],
aov_filter = {"maya": [r".*(?:\.|_)*([Bb]eauty)(?:\.|_)*.*"],
"aftereffects": [r".*"], # for everything from AE
"harmony": [r".*"], # for everything from AE
"celaction": [r".*"]}
@ -433,12 +433,18 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
app = os.environ.get("AVALON_APP", "")
preview = False
self.log.info("ffffff")
self.log.info(app)
if app in self.aov_filter.keys():
self.log.info("in")
for aov_pattern in self.aov_filter[app]:
self.log.info(aov_pattern)
self.log.info(aov)
if re.match(aov_pattern,
aov
):
preview = True
self.log.info("{}:{}:{}".format(aov, app, instance_data))
break
new_instance = copy(instance_data)
@ -505,18 +511,23 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
bake_render_path = instance.get("bakeRenderPath", [])
# create representation for every collected sequence
self.log.debug("-------")
for collection in collections:
self.log.debug(collection)
ext = collection.tail.lstrip(".")
preview = False
# if filtered aov name is found in filename, toggle it for
# preview video rendering
for app in self.aov_filter.keys():
self.log.debug(app)
if os.environ.get("AVALON_APP", "") == app:
for aov in self.aov_filter[app]:
self.log.debug(aov)
if re.match(
aov,
list(collection)[0]
):
self.log.info("{}:{}:{}".format(aov, app, instance))
preview = True
break

View file

@ -20,7 +20,8 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
# NOTE Import python module here to know if import was successful
import ftrack_api
session = ftrack_api.Session(auto_connect_event_hub=True)
session = ftrack_api.Session(auto_connect_event_hub=False)
session = ftrack_api.Session(auto_connect_event_hub=False)
self.log.debug("Ftrack user: \"{0}\"".format(session.api_user))
# Collect task

View file

@ -193,6 +193,15 @@ class ExtractReview(pyblish.api.InstancePlugin):
)
temp_data = self.prepare_temp_data(instance, repre, output_def)
files_to_clean = []
self.log.info("Is sequence: {}".format(temp_data["input_is_sequence"]))
if temp_data["input_is_sequence"]:
self.log.info("Filling gaps in sequence.")
files_to_clean = self.fill_sequence_gaps(
temp_data["origin_repre"]["files"],
new_repre["stagingDir"],
temp_data["frame_start"],
temp_data["frame_end"])
try: # temporary until oiiotool is supported cross platform
ffmpeg_args = self._ffmpeg_arguments(
@ -203,7 +212,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
self.log.debug("Unsupported compression on input " +
"files. Skipping!!!")
return
raise
raise NotImplementedError
subprcs_cmd = " ".join(ffmpeg_args)
@ -214,6 +223,11 @@ class ExtractReview(pyblish.api.InstancePlugin):
subprcs_cmd, shell=True, logger=self.log
)
# delete files added to fill gaps
if files_to_clean:
for f in files_to_clean:
os.unlink(f)
output_name = output_def["filename_suffix"]
if temp_data["without_handles"]:
output_name += "_noHandles"
@ -606,6 +620,91 @@ class ExtractReview(pyblish.api.InstancePlugin):
return all_args
def fill_sequence_gaps(self, files, staging_dir, start_frame, end_frame):
# type: (list, str, int, int) -> list
"""Fill missing files in sequence by duplicating existing ones.
This will take nearest frame file and copy it with so as to fill
gaps in sequence. Last existing file there is is used to for the
hole ahead.
Args:
files (list): List of representation files.
staging_dir (str): Path to staging directory.
start_frame (int): Sequence start (no matter what files are there)
end_frame (int): Sequence end (no matter what files are there)
Returns:
list of added files. Those should be cleaned after work
is done.
Raises:
AssertionError: if more then one collection is obtained.
"""
from pprint import pprint
collections = clique.assemble(files)[0]
assert len(collections) == 1, "Multiple collections found."
col = collections[0]
# do nothing if sequence is complete
if list(col.indexes)[0] == start_frame and \
list(col.indexes)[-1] == end_frame and \
col.is_contiguous():
return []
holes = col.holes()
# generate ideal sequence
complete_col = clique.assemble(
[("{}{:0" + str(col.padding) + "d}{}").format(
col.head, f, col.tail
) for f in range(start_frame, end_frame)]
)[0][0] # type: clique.Collection
new_files = {}
last_existing_file = None
for idx in holes.indexes:
# get previous existing file
test_file = os.path.normpath(os.path.join(
staging_dir,
("{}{:0" + str(complete_col.padding) + "d}{}").format(
complete_col.head, idx - 1, complete_col.tail)))
if os.path.isfile(test_file):
new_files[idx] = test_file
last_existing_file = test_file
else:
if not last_existing_file:
# previous file is not found (sequence has a hole
# at the beginning. Use first available frame
# there is.
try:
last_existing_file = list(col)[0]
except IndexError:
# empty collection?
raise AssertionError(
"Invalid sequence collected")
new_files[idx] = os.path.normpath(
os.path.join(staging_dir, last_existing_file))
files_to_clean = []
if new_files:
# so now new files are dict with missing frame as a key and
# existing file as a value.
for frame, file in new_files.items():
self.log.info(
"Filling gap {} with {}".format(frame, file))
hole = os.path.join(
staging_dir,
("{}{:0" + str(col.padding) + "d}{}").format(
col.head, frame, col.tail))
shutil.copy2(file, hole)
files_to_clean.append(hole)
return files_to_clean
def input_output_paths(self, new_repre, output_def, temp_data):
"""Deduce input nad output file paths based on entered data.
@ -624,52 +723,6 @@ class ExtractReview(pyblish.api.InstancePlugin):
if temp_data["input_is_sequence"]:
collections = clique.assemble(repre["files"])[0]
if not collections[0].is_contiguous():
# there are holes in sequence, lets get them
holes = collections[0].holes()
new_files = {}
last_existing_file = None
for idx in holes.indexes:
# get previous existing file
test_file = os.path.join(
staging_dir, "{}{:0" + holes.padding + "d}{}".format(
holes.head, idx - 1, holes.tail))
if os.path.isfile(test_file):
new_files[idx] = test_file
last_existing_file = test_file
else:
if not last_existing_file:
# previous file is not found (sequence has a hole
# at the beginning. Use first available frame
# there is.
try:
last_existing_file = list(collections[0])[0]
except IndexError:
# empty collection?
raise AssertionError(
"Invalid sequence collected")
new_files[idx] = last_existing_file
# so now new files are dict with missing frame as a key and
# existing file as a value.
files_to_clean = []
if new_files:
for frame, file in new_files.items():
self.log.info(
"Filling gap {} with {}".format(frame, file))
hole = os.path.join(
staging_dir,
"{}{:0" + holes.padding + "d}{}".format(
holes.head, frame, holes.tail))
shutil.copy2(file, hole)
files_to_clean.append(hole)
# 1) copy existing files to temp
# 2) process holes with existing frames
# 3) create new complete collection
# 4) put it into ffmpeg
full_input_path = os.path.join(
staging_dir,
collections[0].format("{head}{padding}{tail}")

View file

@ -417,21 +417,23 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst_padding_exp = src_padding_exp
dst_start_frame = None
collection_start = list(src_collection.indexes)[0]
index_frame = index_frame_start
for i in src_collection.indexes:
# TODO 1.) do not count padding in each index iteration
# 2.) do not count dst_padding from src_padding before
# index_frame_start check
frame_number = i - collection_start
src_padding = src_padding_exp % i
src_file_name = "{0}{1}{2}".format(
src_head, src_padding, src_tail)
dst_padding = src_padding_exp % i
dst_padding = src_padding_exp % frame_number
if index_frame_start is not None:
dst_padding_exp = "%0{}d".format(frame_start_padding)
dst_padding = dst_padding_exp % index_frame_start
index_frame_start += 1
dst_padding = dst_padding_exp % (index_frame_start + frame_number)
dst = "{0}{1}{2}".format(
dst_head,