Merge branch 'develop' into feature/1004-deadline-better-handling-of-pype

This commit is contained in:
Milan Kolar 2021-03-05 13:34:22 +01:00
commit 1aebe4b8dc
8 changed files with 597 additions and 568 deletions

View file

@ -0,0 +1,20 @@
from PIL import Image
def composite_images(input_image_paths, output_filepath):
"""Composite images in order from passed list.
Raises:
ValueError: When entered list is empty.
"""
if not input_image_paths:
raise ValueError("Nothing to composite.")
img_obj = None
for image_filepath in input_image_paths:
_img_obj = Image.open(image_filepath)
if img_obj is None:
img_obj = _img_obj
else:
img_obj.alpha_composite(_img_obj)
img_obj.save(output_filepath)

View file

@ -48,7 +48,10 @@ class CollectInstances(pyblish.api.ContextPlugin):
instance_data["subset"] = new_subset_name
instance = context.create_instance(**instance_data)
instance.data["layers"] = context.data["layersData"]
instance.data["layers"] = copy.deepcopy(
context.data["layersData"]
)
# Add ftrack family
instance.data["families"].append("ftrack")
@ -70,15 +73,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
if instance is None:
continue
frame_start = context.data["frameStart"]
frame_end = frame_start
for layer in instance.data["layers"]:
_frame_end = layer["frame_end"]
if _frame_end > frame_end:
frame_end = _frame_end
instance.data["frameStart"] = frame_start
instance.data["frameEnd"] = frame_end
instance.data["frameStart"] = context.data["frameStart"]
instance.data["frameEnd"] = context.data["frameEnd"]
self.log.debug("Created instance: {}\n{}".format(
instance, json.dumps(instance.data, indent=4)

View file

@ -113,7 +113,8 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
self.log.info("Collecting scene data from workfile")
workfile_info_parts = lib.execute_george("tv_projectinfo").split(" ")
frame_start = int(workfile_info_parts.pop(-1))
# Project frame start - not used
workfile_info_parts.pop(-1)
field_order = workfile_info_parts.pop(-1)
frame_rate = float(workfile_info_parts.pop(-1))
pixel_apsect = float(workfile_info_parts.pop(-1))
@ -121,21 +122,14 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
width = int(workfile_info_parts.pop(-1))
workfile_path = " ".join(workfile_info_parts).replace("\"", "")
# TODO This is not porper way of getting last frame
# - but don't know better
last_frame = frame_start
for layer in layers_data:
frame_end = layer["frame_end"]
if frame_end > last_frame:
last_frame = frame_end
frame_start, frame_end = self.collect_clip_frames()
scene_data = {
"currentFile": workfile_path,
"sceneWidth": width,
"sceneHeight": height,
"pixelAspect": pixel_apsect,
"frameStart": frame_start,
"frameEnd": last_frame,
"frameEnd": frame_end,
"fps": frame_rate,
"fieldOrder": field_order
}
@ -143,3 +137,21 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
"Scene data: {}".format(json.dumps(scene_data, indent=4))
)
context.data.update(scene_data)
def collect_clip_frames(self):
clip_info_str = lib.execute_george("tv_clipinfo")
self.log.debug("Clip info: {}".format(clip_info_str))
clip_info_items = clip_info_str.split(" ")
# Color index - not used
clip_info_items.pop(-1)
clip_info_items.pop(-1)
mark_out = int(clip_info_items.pop(-1))
frame_end = mark_out + 1
clip_info_items.pop(-1)
mark_in = int(clip_info_items.pop(-1))
frame_start = mark_in + 1
clip_info_items.pop(-1)
return frame_start, frame_end

View file

@ -1,9 +1,13 @@
import os
import shutil
import time
import tempfile
import multiprocessing
import pyblish.api
from avalon.tvpaint import lib
from pype.hosts.tvpaint.api.lib import composite_images
from PIL import Image, ImageDraw
class ExtractSequence(pyblish.api.Extractor):
@ -11,47 +15,6 @@ class ExtractSequence(pyblish.api.Extractor):
hosts = ["tvpaint"]
families = ["review", "renderPass", "renderLayer"]
save_mode_to_ext = {
"avi": ".avi",
"bmp": ".bmp",
"cin": ".cin",
"deep": ".dip",
"dps": ".dps",
"dpx": ".dpx",
"flc": ".fli",
"gif": ".gif",
"ilbm": ".iff",
"jpg": ".jpg",
"jpeg": ".jpg",
"pcx": ".pcx",
"png": ".png",
"psd": ".psd",
"qt": ".qt",
"rtv": ".rtv",
"sun": ".ras",
"tiff": ".tiff",
"tga": ".tga",
"vpb": ".vpb"
}
sequential_save_mode = {
"bmp",
"dpx",
"ilbm",
"jpg",
"jpeg",
"png",
"sun",
"tiff",
"tga"
}
default_save_mode = "\"PNG\""
save_mode_for_family = {
"review": "\"PNG\"",
"renderPass": "\"PNG\"",
"renderLayer": "\"PNG\"",
}
def process(self, instance):
self.log.info(
"* Processing instance \"{}\"".format(instance.data["label"])
@ -67,7 +30,7 @@ class ExtractSequence(pyblish.api.Extractor):
layer_names = [str(layer["name"]) for layer in filtered_layers]
if not layer_names:
self.log.info(
f"None of the layers from the instance"
"None of the layers from the instance"
" are visible. Extraction skipped."
)
return
@ -80,34 +43,15 @@ class ExtractSequence(pyblish.api.Extractor):
len(layer_names), joined_layer_names
)
)
# This is plugin attribe cleanup method
self._prepare_save_modes()
family_lowered = instance.data["family"].lower()
save_mode = self.save_mode_for_family.get(
family_lowered, self.default_save_mode
)
save_mode_type = self._get_save_mode_type(save_mode)
if not bool(save_mode_type in self.sequential_save_mode):
raise AssertionError((
"Plugin can export only sequential frame output"
" but save mode for family \"{}\" is not for sequence > {} <"
).format(instance.data["family"], save_mode))
frame_start = instance.data["frameStart"]
frame_end = instance.data["frameEnd"]
filename_template = self._get_filename_template(
save_mode_type, save_mode, frame_end
)
filename_template = self._get_filename_template(frame_end)
ext = os.path.splitext(filename_template)[1].replace(".", "")
self.log.debug(
"Using save mode > {} < and file template \"{}\"".format(
save_mode, filename_template
)
)
self.log.debug("Using file template \"{}\"".format(filename_template))
# Save to staging dir
output_dir = instance.data.get("stagingDir")
@ -120,34 +64,22 @@ class ExtractSequence(pyblish.api.Extractor):
"Files will be rendered to folder: {}".format(output_dir)
)
thumbnail_filename = "thumbnail"
# Render output
output_files_by_frame = self.render(
save_mode, filename_template, output_dir,
filtered_layers, frame_start, frame_end, thumbnail_filename
)
thumbnail_fullpath = output_files_by_frame.pop(
thumbnail_filename, None
)
# Fill gaps in sequence
self.fill_missing_frames(
output_files_by_frame,
frame_start,
frame_end,
filename_template
)
if instance.data["family"] == "review":
repre_files, thumbnail_fullpath = self.render_review(
filename_template, output_dir, frame_start, frame_end
)
else:
# Render output
repre_files, thumbnail_fullpath = self.render(
filename_template, output_dir, frame_start, frame_end,
filtered_layers
)
# Fill tags and new families
tags = []
if family_lowered in ("review", "renderlayer"):
tags.append("review")
repre_files = [
os.path.basename(filepath)
for filepath in output_files_by_frame.values()
]
# Sequence of one frame
if len(repre_files) == 1:
repre_files = repre_files[0]
@ -157,8 +89,8 @@ class ExtractSequence(pyblish.api.Extractor):
"ext": ext,
"files": repre_files,
"stagingDir": output_dir,
"frameStart": frame_start + 1,
"frameEnd": frame_end + 1,
"frameStart": frame_start,
"frameEnd": frame_end,
"tags": tags
}
self.log.debug("Creating new representation: {}".format(new_repre))
@ -186,33 +118,7 @@ class ExtractSequence(pyblish.api.Extractor):
}
instance.data["representations"].append(thumbnail_repre)
def _prepare_save_modes(self):
"""Lower family names in keys and skip empty values."""
new_specifications = {}
for key, value in self.save_mode_for_family.items():
if value:
new_specifications[key.lower()] = value
else:
self.log.warning((
"Save mode for family \"{}\" has empty value."
" The family will use default save mode: > {} <."
).format(key, self.default_save_mode))
self.save_mode_for_family = new_specifications
def _get_save_mode_type(self, save_mode):
"""Extract type of save mode.
Helps to define output files extension.
"""
save_mode_type = (
save_mode.lower()
.split(" ")[0]
.replace("\"", "")
)
self.log.debug("Save mode type is \"{}\"".format(save_mode_type))
return save_mode_type
def _get_filename_template(self, save_mode_type, save_mode, frame_end):
def _get_filename_template(self, frame_end):
"""Get filetemplate for rendered files.
This is simple template contains `{frame}{ext}` for sequential outputs
@ -220,145 +126,504 @@ class ExtractSequence(pyblish.api.Extractor):
temporary folder so filename should not matter as integrator change
them.
"""
ext = self.save_mode_to_ext.get(save_mode_type)
if ext is None:
raise AssertionError((
"Couldn't find file extension for TVPaint's save mode: > {} <"
).format(save_mode))
frame_padding = 4
frame_end_str_len = len(str(frame_end))
if frame_end_str_len > frame_padding:
frame_padding = frame_end_str_len
return "{{frame:0>{}}}".format(frame_padding) + ext
return "{{frame:0>{}}}".format(frame_padding) + ".png"
def render(
self, save_mode, filename_template, output_dir, layers,
first_frame, last_frame, thumbnail_filename
def render_review(
self, filename_template, output_dir, frame_start, frame_end
):
""" Export images from TVPaint.
""" Export images from TVPaint using `tv_savesequence` command.
Args:
save_mode (str): Argument for `tv_savemode` george script function.
More about save mode in documentation.
filename_template (str): Filename template of an output. Template
should already contain extension. Template may contain only
keyword argument `{frame}` or index argument (for same value).
Extension in template must match `save_mode`.
layers (list): List of layers to be exported.
output_dir (str): Directory where files will be stored.
first_frame (int): Starting frame from which export will begin.
last_frame (int): On which frame export will end.
Retruns:
dict: Mapping frame to output filepath.
tuple: With 2 items first is list of filenames second is path to
thumbnail.
"""
self.log.debug("Preparing data for rendering.")
first_frame_filepath = os.path.join(
output_dir,
filename_template.format(frame=frame_start)
)
mark_in = frame_start - 1
mark_out = frame_end - 1
# Add save mode arguments to function
save_mode = "tv_SaveMode {}".format(save_mode)
george_script_lines = [
"tv_SaveMode \"PNG\"",
"export_path = \"{}\"".format(
first_frame_filepath.replace("\\", "/")
),
"tv_savesequence '\"'export_path'\"' {} {}".format(
mark_in, mark_out
)
]
lib.execute_george_through_file("\n".join(george_script_lines))
output = []
first_frame_filepath = None
for frame in range(frame_start, frame_end + 1):
filename = filename_template.format(frame=frame)
output.append(filename)
if first_frame_filepath is None:
first_frame_filepath = os.path.join(output_dir, filename)
thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg")
if first_frame_filepath and os.path.exists(first_frame_filepath):
source_img = Image.open(first_frame_filepath)
thumbnail_obj = Image.new("RGB", source_img.size, (255, 255, 255))
thumbnail_obj.paste(source_img)
thumbnail_obj.save(thumbnail_filepath)
return output, thumbnail_filepath
def render(
self, filename_template, output_dir, frame_start, frame_end, layers
):
""" Export images from TVPaint.
Args:
filename_template (str): Filename template of an output. Template
should already contain extension. Template may contain only
keyword argument `{frame}` or index argument (for same value).
Extension in template must match `save_mode`.
output_dir (str): Directory where files will be stored.
first_frame (int): Starting frame from which export will begin.
last_frame (int): On which frame export will end.
layers (list): List of layers to be exported.
Retruns:
tuple: With 2 items first is list of filenames second is path to
thumbnail.
"""
self.log.debug("Preparing data for rendering.")
# Map layers by position
layers_by_position = {
layer["position"]: layer
for layer in layers
}
layers_by_position = {}
layer_ids = []
for layer in layers:
position = layer["position"]
layers_by_position[position] = layer
layer_ids.append(layer["layer_id"])
# Sort layer positions in reverse order
sorted_positions = list(reversed(sorted(layers_by_position.keys())))
if not sorted_positions:
return
# Create temporary layer
new_layer_id = lib.execute_george("tv_layercreate _tmp_layer")
self.log.debug("Collecting pre/post behavior of individual layers.")
behavior_by_layer_id = lib.get_layers_pre_post_behavior(layer_ids)
# Merge layers to temp layer
george_script_lines = []
# Set duplicated layer as current
george_script_lines.append("tv_layerset {}".format(new_layer_id))
mark_in_index = frame_start - 1
mark_out_index = frame_end - 1
tmp_filename_template = "pos_{pos}." + filename_template
files_by_position = {}
for position in sorted_positions:
layer = layers_by_position[position]
george_script_lines.append(
"tv_layermerge {}".format(layer["layer_id"])
behavior = behavior_by_layer_id[layer["layer_id"]]
files_by_frames = self._render_layer(
layer,
tmp_filename_template,
output_dir,
behavior,
mark_in_index,
mark_out_index
)
files_by_position[position] = files_by_frames
lib.execute_george_through_file("\n".join(george_script_lines))
output_filepaths = self._composite_files(
files_by_position,
mark_in_index,
mark_out_index,
filename_template,
output_dir
)
self._cleanup_tmp_files(files_by_position)
# Frames with keyframe
thumbnail_src_filepath = None
thumbnail_filepath = None
if output_filepaths:
thumbnail_src_filepath = tuple(sorted(output_filepaths))[0]
if thumbnail_src_filepath and os.path.exists(thumbnail_src_filepath):
source_img = Image.open(thumbnail_src_filepath)
thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg")
thumbnail_obj = Image.new("RGB", source_img.size, (255, 255, 255))
thumbnail_obj.paste(source_img)
thumbnail_obj.save(thumbnail_filepath)
repre_files = [
os.path.basename(path)
for path in output_filepaths
]
return repre_files, thumbnail_filepath
def _render_layer(
self,
layer,
tmp_filename_template,
output_dir,
behavior,
mark_in_index,
mark_out_index
):
layer_id = layer["layer_id"]
frame_start_index = layer["frame_start"]
frame_end_index = layer["frame_end"]
exposure_frames = lib.get_exposure_frames(
new_layer_id, first_frame, last_frame
layer_id, frame_start_index, frame_end_index
)
# TODO what if there is not exposue frames?
# - this force to have first frame all the time
if first_frame not in exposure_frames:
exposure_frames.insert(0, first_frame)
if frame_start_index not in exposure_frames:
exposure_frames.append(frame_start_index)
# Restart george script lines
george_script_lines = []
george_script_lines.append(save_mode)
layer_files_by_frame = {}
george_script_lines = [
"tv_SaveMode \"PNG\""
]
layer_position = layer["position"]
all_output_files = {}
for frame in exposure_frames:
filename = filename_template.format(frame, frame=frame)
for frame_idx in exposure_frames:
filename = tmp_filename_template.format(
pos=layer_position,
frame=frame_idx
)
dst_path = "/".join([output_dir, filename])
all_output_files[frame] = os.path.normpath(dst_path)
layer_files_by_frame[frame_idx] = os.path.normpath(dst_path)
# Go to frame
george_script_lines.append("tv_layerImage {}".format(frame))
george_script_lines.append("tv_layerImage {}".format(frame_idx))
# Store image to output
george_script_lines.append("tv_saveimage \"{}\"".format(dst_path))
# Export thumbnail
if thumbnail_filename:
basename, ext = os.path.splitext(thumbnail_filename)
if not ext:
ext = ".jpg"
thumbnail_fullpath = "/".join([output_dir, basename + ext])
all_output_files[thumbnail_filename] = thumbnail_fullpath
# Force save mode to png for thumbnail
george_script_lines.append("tv_SaveMode \"JPG\"")
# Go to frame
george_script_lines.append("tv_layerImage {}".format(first_frame))
# Store image to output
george_script_lines.append(
"tv_saveimage \"{}\"".format(thumbnail_fullpath)
)
# Delete temporary layer
george_script_lines.append("tv_layerkill {}".format(new_layer_id))
self.log.debug("Rendering Exposure frames {} of layer {} ({})".format(
str(exposure_frames), layer_id, layer["name"]
))
# Let TVPaint render layer's image
lib.execute_george_through_file("\n".join(george_script_lines))
return all_output_files
# Fill frames between `frame_start_index` and `frame_end_index`
self.log.debug((
"Filling frames between first and last frame of layer ({} - {})."
).format(frame_start_index + 1, frame_end_index + 1))
def fill_missing_frames(
self, filepaths_by_frame, first_frame, last_frame, filename_template
):
"""Fill not rendered frames with previous frame.
Extractor is rendering only frames with keyframes (exposure frames) to
get output faster which means there may be gaps between frames.
This function fill the missing frames.
"""
output_dir = None
previous_frame_filepath = None
for frame in range(first_frame, last_frame + 1):
if frame in filepaths_by_frame:
previous_frame_filepath = filepaths_by_frame[frame]
_debug_filled_frames = []
prev_filepath = None
for frame_idx in range(frame_start_index, frame_end_index + 1):
if frame_idx in layer_files_by_frame:
prev_filepath = layer_files_by_frame[frame_idx]
continue
elif previous_frame_filepath is None:
self.log.warning(
"No frames to fill. Seems like nothing was exported."
if prev_filepath is None:
raise ValueError("BUG: First frame of layer was not rendered!")
_debug_filled_frames.append(frame_idx)
filename = tmp_filename_template.format(
pos=layer_position,
frame=frame_idx
)
new_filepath = "/".join([output_dir, filename])
self._copy_image(prev_filepath, new_filepath)
layer_files_by_frame[frame_idx] = new_filepath
self.log.debug("Filled frames {}".format(str(_debug_filled_frames)))
# Fill frames by pre/post behavior of layer
pre_behavior = behavior["pre"]
post_behavior = behavior["post"]
self.log.debug((
"Completing image sequence of layer by pre/post behavior."
" PRE: {} | POST: {}"
).format(pre_behavior, post_behavior))
# Pre behavior
self._fill_frame_by_pre_behavior(
layer,
pre_behavior,
mark_in_index,
layer_files_by_frame,
tmp_filename_template,
output_dir
)
self._fill_frame_by_post_behavior(
layer,
post_behavior,
mark_out_index,
layer_files_by_frame,
tmp_filename_template,
output_dir
)
return layer_files_by_frame
def _fill_frame_by_pre_behavior(
self,
layer,
pre_behavior,
mark_in_index,
layer_files_by_frame,
filename_template,
output_dir
):
layer_position = layer["position"]
frame_start_index = layer["frame_start"]
frame_end_index = layer["frame_end"]
frame_count = frame_end_index - frame_start_index + 1
if mark_in_index >= frame_start_index:
self.log.debug((
"Skipping pre-behavior."
" All frames after Mark In are rendered."
))
return
if pre_behavior == "none":
# Empty frames are handled during `_composite_files`
pass
elif pre_behavior == "hold":
# Keep first frame for whole time
eq_frame_filepath = layer_files_by_frame[frame_start_index]
for frame_idx in range(mark_in_index, frame_start_index):
filename = filename_template.format(
pos=layer_position,
frame=frame_idx
)
new_filepath = "/".join([output_dir, filename])
self._copy_image(eq_frame_filepath, new_filepath)
layer_files_by_frame[frame_idx] = new_filepath
elif pre_behavior == "loop":
# Loop backwards from last frame of layer
for frame_idx in reversed(range(mark_in_index, frame_start_index)):
eq_frame_idx_offset = (
(frame_end_index - frame_idx) % frame_count
)
eq_frame_idx = frame_end_index - eq_frame_idx_offset
eq_frame_filepath = layer_files_by_frame[eq_frame_idx]
filename = filename_template.format(
pos=layer_position,
frame=frame_idx
)
new_filepath = "/".join([output_dir, filename])
self._copy_image(eq_frame_filepath, new_filepath)
layer_files_by_frame[frame_idx] = new_filepath
elif pre_behavior == "pingpong":
half_seq_len = frame_count - 1
seq_len = half_seq_len * 2
for frame_idx in reversed(range(mark_in_index, frame_start_index)):
eq_frame_idx_offset = (frame_start_index - frame_idx) % seq_len
if eq_frame_idx_offset > half_seq_len:
eq_frame_idx_offset = (seq_len - eq_frame_idx_offset)
eq_frame_idx = frame_start_index + eq_frame_idx_offset
eq_frame_filepath = layer_files_by_frame[eq_frame_idx]
filename = filename_template.format(
pos=layer_position,
frame=frame_idx
)
new_filepath = "/".join([output_dir, filename])
self._copy_image(eq_frame_filepath, new_filepath)
layer_files_by_frame[frame_idx] = new_filepath
def _fill_frame_by_post_behavior(
self,
layer,
post_behavior,
mark_out_index,
layer_files_by_frame,
filename_template,
output_dir
):
layer_position = layer["position"]
frame_start_index = layer["frame_start"]
frame_end_index = layer["frame_end"]
frame_count = frame_end_index - frame_start_index + 1
if mark_out_index <= frame_end_index:
self.log.debug((
"Skipping post-behavior."
" All frames up to Mark Out are rendered."
))
return
if post_behavior == "none":
# Empty frames are handled during `_composite_files`
pass
elif post_behavior == "hold":
# Keep first frame for whole time
eq_frame_filepath = layer_files_by_frame[frame_end_index]
for frame_idx in range(frame_end_index + 1, mark_out_index + 1):
filename = filename_template.format(
pos=layer_position,
frame=frame_idx
)
new_filepath = "/".join([output_dir, filename])
self._copy_image(eq_frame_filepath, new_filepath)
layer_files_by_frame[frame_idx] = new_filepath
elif post_behavior == "loop":
# Loop backwards from last frame of layer
for frame_idx in range(frame_end_index + 1, mark_out_index + 1):
eq_frame_idx = frame_idx % frame_count
eq_frame_filepath = layer_files_by_frame[eq_frame_idx]
filename = filename_template.format(
pos=layer_position,
frame=frame_idx
)
new_filepath = "/".join([output_dir, filename])
self._copy_image(eq_frame_filepath, new_filepath)
layer_files_by_frame[frame_idx] = new_filepath
elif post_behavior == "pingpong":
half_seq_len = frame_count - 1
seq_len = half_seq_len * 2
for frame_idx in range(frame_end_index + 1, mark_out_index + 1):
eq_frame_idx_offset = (frame_idx - frame_end_index) % seq_len
if eq_frame_idx_offset > half_seq_len:
eq_frame_idx_offset = seq_len - eq_frame_idx_offset
eq_frame_idx = frame_end_index - eq_frame_idx_offset
eq_frame_filepath = layer_files_by_frame[eq_frame_idx]
filename = filename_template.format(
pos=layer_position,
frame=frame_idx
)
new_filepath = "/".join([output_dir, filename])
self._copy_image(eq_frame_filepath, new_filepath)
layer_files_by_frame[frame_idx] = new_filepath
def _composite_files(
self, files_by_position, frame_start, frame_end,
filename_template, output_dir
):
"""Composite frames when more that one layer was exported.
This method is used when more than one layer is rendered out so and
output should be composition of each frame of rendered layers.
Missing frames are filled with transparent images.
"""
self.log.debug("Preparing files for compisiting.")
# Prepare paths to images by frames into list where are stored
# in order of compositing.
images_by_frame = {}
for frame_idx in range(frame_start, frame_end + 1):
images_by_frame[frame_idx] = []
for position in sorted(files_by_position.keys(), reverse=True):
position_data = files_by_position[position]
if frame_idx in position_data:
filepath = position_data[frame_idx]
images_by_frame[frame_idx].append(filepath)
process_count = os.cpu_count()
if process_count > 1:
process_count -= 1
processes = {}
output_filepaths = []
missing_frame_paths = []
random_frame_path = None
for frame_idx in sorted(images_by_frame.keys()):
image_filepaths = images_by_frame[frame_idx]
output_filename = filename_template.format(frame=frame_idx + 1)
output_filepath = os.path.join(output_dir, output_filename)
output_filepaths.append(output_filepath)
# Store information about missing frame and skip
if not image_filepaths:
missing_frame_paths.append(output_filepath)
continue
# Just rename the file if is no need of compositing
if len(image_filepaths) == 1:
os.rename(image_filepaths[0], output_filepath)
# Prepare process for compositing of images
else:
processes[frame_idx] = multiprocessing.Process(
target=composite_images,
args=(image_filepaths, output_filepath)
)
# Store path of random output image that will 100% exist after all
# multiprocessing as mockup for missing frames
if random_frame_path is None:
random_frame_path = output_filepath
self.log.info(
"Running {} compositing processes - this mey take a while.".format(
len(processes)
)
)
# Wait until all compositing processes are done
running_processes = {}
while True:
for idx in tuple(running_processes.keys()):
process = running_processes[idx]
if not process.is_alive():
running_processes.pop(idx).join()
if processes and len(running_processes) != process_count:
indexes = list(processes.keys())
for _ in range(process_count - len(running_processes)):
if not indexes:
break
idx = indexes.pop(0)
running_processes[idx] = processes.pop(idx)
running_processes[idx].start()
if not running_processes and not processes:
break
if output_dir is None:
output_dir = os.path.dirname(previous_frame_filepath)
time.sleep(0.01)
filename = filename_template.format(frame=frame)
space_filepath = os.path.normpath(
os.path.join(output_dir, filename)
self.log.debug(
"Creating transparent images for frames without render {}.".format(
str(missing_frame_paths)
)
filepaths_by_frame[frame] = space_filepath
shutil.copy(previous_frame_filepath, space_filepath)
)
# Fill the sequence with transparent frames
transparent_filepath = None
for filepath in missing_frame_paths:
if transparent_filepath is None:
img_obj = Image.open(random_frame_path)
painter = ImageDraw.Draw(img_obj)
painter.rectangle((0, 0, *img_obj.size), fill=(0, 0, 0, 0))
img_obj.save(filepath)
transparent_filepath = filepath
else:
self._copy_image(transparent_filepath, filepath)
return output_filepaths
def _cleanup_tmp_files(self, files_by_position):
"""Remove temporary files that were used for compositing."""
for data in files_by_position.values():
for filepath in data.values():
if os.path.exists(filepath):
os.remove(filepath)
def _copy_image(self, src_path, dst_path):
"""Create a copy of an image.
This was added to be able easier change copy method.
"""
# Create hardlink of image instead of copying if possible
if hasattr(os, "link"):
os.link(src_path, dst_path)
else:
shutil.copy(src_path, dst_path)

View file

@ -0,0 +1,16 @@
import pyblish.api
class ValidateLayersVisiblity(pyblish.api.InstancePlugin):
"""Validate existence of renderPass layers."""
label = "Validate Layers Visibility"
order = pyblish.api.ValidatorOrder
families = ["review", "renderPass", "renderLayer"]
def process(self, instance):
for layer in instance.data["layers"]:
if layer["visible"]:
return
raise AssertionError("All layers of instance are not visible.")

View file

@ -1,12 +1,36 @@
import sys
import collections
import six
import pyblish.api
from avalon import io
try:
from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_AUTO_SYNC
except Exception:
CUST_ATTR_AUTO_SYNC = "avalon_auto_sync"
# Copy of constant `pype.modules.ftrack.lib.avalon_sync.CUST_ATTR_AUTO_SYNC`
CUST_ATTR_AUTO_SYNC = "avalon_auto_sync"
# Copy of `get_pype_attr` from pype.modules.ftrack.lib
def get_pype_attr(session, split_hierarchical=True):
custom_attributes = []
hier_custom_attributes = []
# TODO remove deprecated "avalon" group from query
cust_attrs_query = (
"select id, entity_type, object_type_id, is_hierarchical, default"
" from CustomAttributeConfiguration"
" where group.name in (\"avalon\", \"pype\")"
)
all_avalon_attr = session.query(cust_attrs_query).all()
for cust_attr in all_avalon_attr:
if split_hierarchical and cust_attr["is_hierarchical"]:
hier_custom_attributes.append(cust_attr)
continue
custom_attributes.append(cust_attr)
if split_hierarchical:
# return tuple
return custom_attributes, hier_custom_attributes
return custom_attributes
class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
@ -36,7 +60,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
order = pyblish.api.IntegratorOrder - 0.04
label = 'Integrate Hierarchy To Ftrack'
families = ["shot"]
hosts = ["hiero", "resolve"]
hosts = ["hiero", "resolve", "standalonepublisher"]
optional = False
def process(self, context):
@ -74,6 +98,15 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
self.auto_sync_on(project)
def import_to_ftrack(self, input_data, parent=None):
# Prequery hiearchical custom attributes
hier_custom_attributes = get_pype_attr(self.session)[1]
hier_attr_by_key = {
attr["key"]: attr
for attr in hier_custom_attributes
}
# Get ftrack api module (as they are different per python version)
ftrack_api = self.context.data["ftrackPythonModule"]
for entity_name in input_data:
entity_data = input_data[entity_name]
entity_type = entity_data['entity_type']
@ -116,12 +149,33 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
i for i in self.context if i.data['asset'] in entity['name']
]
for key in custom_attributes:
assert (key in entity['custom_attributes']), (
'Missing custom attribute key: `{0}` in attrs: '
'`{1}`'.format(key, entity['custom_attributes'].keys())
)
hier_attr = hier_attr_by_key.get(key)
# Use simple method if key is not hierarchical
if not hier_attr:
assert (key in entity['custom_attributes']), (
'Missing custom attribute key: `{0}` in attrs: '
'`{1}`'.format(key, entity['custom_attributes'].keys())
)
entity['custom_attributes'][key] = custom_attributes[key]
entity['custom_attributes'][key] = custom_attributes[key]
else:
# Use ftrack operations method to set hiearchical
# attribute value.
# - this is because there may be non hiearchical custom
# attributes with different properties
entity_key = collections.OrderedDict()
entity_key["configuration_id"] = hier_attr["id"]
entity_key["entity_id"] = entity["id"]
self.session.recorded_operations.push(
ftrack_api.operation.UpdateEntityOperation(
"ContextCustomAttributeValue",
entity_key,
"value",
ftrack_api.symbol.NOT_SET,
custom_attributes[key]
)
)
for instance in instances:
instance.data['ftrackEntity'] = entity

View file

@ -1,331 +0,0 @@
import sys
import six
import collections
import pyblish.api
from avalon import io
from pype.modules.ftrack.lib.avalon_sync import (
CUST_ATTR_AUTO_SYNC,
get_pype_attr
)
class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
"""
Create entities in ftrack based on collected data from premiere
Example of entry data:
{
"ProjectXS": {
"entity_type": "Project",
"custom_attributes": {
"fps": 24,...
},
"tasks": [
"Compositing",
"Lighting",... *task must exist as task type in project schema*
],
"childs": {
"sq01": {
"entity_type": "Sequence",
...
}
}
}
}
"""
order = pyblish.api.IntegratorOrder - 0.04
label = 'Integrate Hierarchy To Ftrack'
families = ["shot"]
hosts = ["standalonepublisher"]
optional = False
def process(self, context):
self.context = context
if "hierarchyContext" not in self.context.data:
return
hierarchy_context = self.context.data["hierarchyContext"]
self.session = self.context.data["ftrackSession"]
project_name = self.context.data["projectEntity"]["name"]
query = 'Project where full_name is "{}"'.format(project_name)
project = self.session.query(query).one()
auto_sync_state = project[
"custom_attributes"][CUST_ATTR_AUTO_SYNC]
if not io.Session:
io.install()
self.ft_project = None
input_data = hierarchy_context
# disable termporarily ftrack project's autosyncing
if auto_sync_state:
self.auto_sync_off(project)
try:
# import ftrack hierarchy
self.import_to_ftrack(input_data)
except Exception:
raise
finally:
if auto_sync_state:
self.auto_sync_on(project)
def import_to_ftrack(self, input_data, parent=None):
# Prequery hiearchical custom attributes
hier_custom_attributes = get_pype_attr(self.session)[1]
hier_attr_by_key = {
attr["key"]: attr
for attr in hier_custom_attributes
}
# Get ftrack api module (as they are different per python version)
ftrack_api = self.context.data["ftrackPythonModule"]
for entity_name in input_data:
entity_data = input_data[entity_name]
entity_type = entity_data['entity_type']
self.log.debug(entity_data)
self.log.debug(entity_type)
if entity_type.lower() == 'project':
query = 'Project where full_name is "{}"'.format(entity_name)
entity = self.session.query(query).one()
self.ft_project = entity
self.task_types = self.get_all_task_types(entity)
elif self.ft_project is None or parent is None:
raise AssertionError(
"Collected items are not in right order!"
)
# try to find if entity already exists
else:
query = (
'TypedContext where name is "{0}" and '
'project_id is "{1}"'
).format(entity_name, self.ft_project["id"])
try:
entity = self.session.query(query).one()
except Exception:
entity = None
# Create entity if not exists
if entity is None:
entity = self.create_entity(
name=entity_name,
type=entity_type,
parent=parent
)
# self.log.info('entity: {}'.format(dict(entity)))
# CUSTOM ATTRIBUTES
custom_attributes = entity_data.get('custom_attributes', [])
instances = [
i for i in self.context if i.data['asset'] in entity['name']
]
for key in custom_attributes:
hier_attr = hier_attr_by_key.get(key)
# Use simple method if key is not hierarchical
if not hier_attr:
assert (key in entity['custom_attributes']), (
'Missing custom attribute key: `{0}` in attrs: '
'`{1}`'.format(key, entity['custom_attributes'].keys())
)
entity['custom_attributes'][key] = custom_attributes[key]
else:
# Use ftrack operations method to set hiearchical
# attribute value.
# - this is because there may be non hiearchical custom
# attributes with different properties
entity_key = collections.OrderedDict({
"configuration_id": hier_attr["id"],
"entity_id": entity["id"]
})
self.session.recorded_operations.push(
ftrack_api.operation.UpdateEntityOperation(
"ContextCustomAttributeValue",
entity_key,
"value",
ftrack_api.symbol.NOT_SET,
custom_attributes[key]
)
)
for instance in instances:
instance.data['ftrackEntity'] = entity
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
# TASKS
tasks = entity_data.get('tasks', [])
existing_tasks = []
tasks_to_create = []
for child in entity['children']:
if child.entity_type.lower() == 'task':
existing_tasks.append(child['name'].lower())
# existing_tasks.append(child['type']['name'])
for task_name in tasks:
task_type = tasks[task_name]["type"]
if task_name.lower() in existing_tasks:
print("Task {} already exists".format(task_name))
continue
tasks_to_create.append((task_name, task_type))
for task_name, task_type in tasks_to_create:
self.create_task(
name=task_name,
task_type=task_type,
parent=entity
)
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
# Incoming links.
self.create_links(entity_data, entity)
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
# Create notes.
user = self.session.query(
"User where username is \"{}\"".format(self.session.api_user)
).first()
if user:
for comment in entity_data.get("comments", []):
entity.create_note(comment, user)
else:
self.log.warning(
"Was not able to query current User {}".format(
self.session.api_user
)
)
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
# Import children.
if 'childs' in entity_data:
self.import_to_ftrack(
entity_data['childs'], entity)
def create_links(self, entity_data, entity):
# Clear existing links.
for link in entity.get("incoming_links", []):
self.session.delete(link)
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
# Create new links.
for input in entity_data.get("inputs", []):
input_id = io.find_one({"_id": input})["data"]["ftrackId"]
assetbuild = self.session.get("AssetBuild", input_id)
self.log.debug(
"Creating link from {0} to {1}".format(
assetbuild["name"], entity["name"]
)
)
self.session.create(
"TypedContextLink", {"from": assetbuild, "to": entity}
)
def get_all_task_types(self, project):
tasks = {}
proj_template = project['project_schema']
temp_task_types = proj_template['_task_type_schema']['types']
for type in temp_task_types:
if type['name'] not in tasks:
tasks[type['name']] = type
return tasks
def create_task(self, name, task_type, parent):
task = self.session.create('Task', {
'name': name,
'parent': parent
})
# TODO not secured!!! - check if task_type exists
self.log.info(task_type)
self.log.info(self.task_types)
task['type'] = self.task_types[task_type]
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
return task
def create_entity(self, name, type, parent):
entity = self.session.create(type, {
'name': name,
'parent': parent
})
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
return entity
def auto_sync_off(self, project):
project["custom_attributes"][CUST_ATTR_AUTO_SYNC] = False
self.log.info("Ftrack autosync swithed off")
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
def auto_sync_on(self, project):
project["custom_attributes"][CUST_ATTR_AUTO_SYNC] = True
self.log.info("Ftrack autosync swithed on")
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)

View file

@ -340,11 +340,8 @@ class FamilyWidget(QtWidgets.QWidget):
).distinct("name")
if versions:
versions = sorted(
[v for v in versions],
key=lambda ver: ver['name']
)
version = int(versions[-1]['name']) + 1
versions = sorted(versions)
version = int(versions[-1]) + 1
self.version_spinbox.setValue(version)