Merge pull request #633 from pypeclub/bugfix/hiero-collect-review-repres

review from imagesequence error
This commit is contained in:
Milan Kolar 2020-10-16 11:12:54 +02:00 committed by GitHub
commit c635543c85
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 398 additions and 265 deletions

View file

@ -143,8 +143,8 @@ class CollectClips(api.ContextPlugin):
"asset": asset,
"family": "clip",
"families": [],
"handleStart": projectdata.get("handleStart", 0),
"handleEnd": projectdata.get("handleEnd", 0),
"handleStart": int(projectdata.get("handleStart", 0)),
"handleEnd": int(projectdata.get("handleEnd", 0)),
"fps": context.data["fps"]
})
instance = context.create_instance(**data)

View file

@ -1,8 +1,10 @@
from pyblish import api
import os
import re
import clique
class CollectReviews(api.InstancePlugin):
class CollectReview(api.InstancePlugin):
"""Collect review from tags.
Tag is expected to have metadata:
@ -14,11 +16,13 @@ class CollectReviews(api.InstancePlugin):
# Run just before CollectSubsets
order = api.CollectorOrder + 0.1022
label = "Collect Reviews"
label = "Collect Review"
hosts = ["hiero"]
families = ["plate"]
def process(self, instance):
is_sequence = instance.data["isSequence"]
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
@ -83,7 +87,29 @@ class CollectReviews(api.InstancePlugin):
file_path = rev_inst.data.get("sourcePath")
file_dir = os.path.dirname(file_path)
file = os.path.basename(file_path)
ext = os.path.splitext(file)[-1][1:]
ext = os.path.splitext(file)[-1]
# detect if sequence
if not is_sequence:
# is video file
files = file
else:
files = list()
source_first = instance.data["sourceFirst"]
self.log.debug("_ file: {}".format(file))
spliter, padding = self.detect_sequence(file)
self.log.debug("_ spliter, padding: {}, {}".format(
spliter, padding))
base_name = file.split(spliter)[0]
collection = clique.Collection(base_name, ext, padding, set(range(
int(source_first + rev_inst.data.get("sourceInH")),
int(source_first + rev_inst.data.get("sourceOutH") + 1))))
self.log.debug("_ collection: {}".format(collection))
real_files = os.listdir(file_dir)
for item in collection:
if item not in real_files:
continue
files.append(item)
# change label
instance.data["label"] = "{0} - {1} - ({2})".format(
@ -94,7 +120,7 @@ class CollectReviews(api.InstancePlugin):
# adding representation for review mov
representation = {
"files": file,
"files": files,
"stagingDir": file_dir,
"frameStart": rev_inst.data.get("sourceIn"),
"frameEnd": rev_inst.data.get("sourceOut"),
@ -102,9 +128,9 @@ class CollectReviews(api.InstancePlugin):
"frameEndFtrack": rev_inst.data.get("sourceOutH"),
"step": 1,
"fps": rev_inst.data.get("fps"),
"name": "preview",
"tags": ["preview", "ftrackreview"],
"ext": ext
"name": "review",
"tags": ["review", "ftrackreview"],
"ext": ext[1:]
}
media_duration = instance.data.get("mediaDuration")
@ -136,7 +162,12 @@ class CollectReviews(api.InstancePlugin):
source_path = instance.data["sourcePath"]
source_file = os.path.basename(source_path)
head, ext = os.path.splitext(source_file)
spliter, padding = self.detect_sequence(source_file)
if spliter:
head, ext = source_file.split(spliter)
else:
head, ext = os.path.splitext(source_file)
# staging dir creation
staging_dir = os.path.dirname(
@ -144,30 +175,28 @@ class CollectReviews(api.InstancePlugin):
media_duration = instance.data.get("mediaDuration")
clip_duration_h = instance.data.get("clipDurationH")
self.log.debug("__ media_duration: {}".format(media_duration))
self.log.debug("__ clip_duration_h: {}".format(clip_duration_h))
if media_duration > clip_duration_h:
thumb_frame = instance.data["clipInH"] + (
(instance.data["clipOutH"] - instance.data["clipInH"]) / 2)
elif media_duration <= clip_duration_h:
thumb_frame = instance.data["sourceIn"] + (
(instance.data["sourceOut"] - instance.data["sourceIn"]) / 2)
thumb_file = "{}_{}{}".format(head, thumb_frame, ".png")
thumb_frame = int(instance.data["sourceIn"] + (
(instance.data["sourceOut"] - instance.data["sourceIn"]) / 2))
thumb_file = "{}thumbnail{}{}".format(head, thumb_frame, ".png")
thumb_path = os.path.join(staging_dir, thumb_file)
self.log.debug("__ thumb_path: {}".format(thumb_path))
self.log.debug("__ thumb_frame: {}".format(thumb_frame))
self.log.debug(
"__ sourceIn: `{}`".format(instance.data["sourceIn"]))
thumbnail = item.thumbnail(thumb_frame).save(
thumb_path,
format='png'
)
self.log.debug(
"__ sourceIn: `{}`".format(instance.data["sourceIn"]))
self.log.debug(
"__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame))
self.log.debug("__ thumbnail: {}".format(thumbnail))
thumb_representation = {
'files': thumb_file,
'stagingDir': staging_dir,
@ -205,3 +234,26 @@ class CollectReviews(api.InstancePlugin):
instance.data["versionData"] = version_data
instance.data["source"] = instance.data["sourcePath"]
def detect_sequence(self, file):
""" Get identificating pater for image sequence
Can find file.0001.ext, file.%02d.ext, file.####.ext
Return:
string: any matching sequence patern
int: padding of sequnce numbering
"""
foundall = re.findall(
r"(#+)|(%\d+d)|(?<=[^a-zA-Z0-9])(\d+)(?=\.\w+$)", file)
if foundall:
found = sorted(list(set(foundall[0])))[-1]
if "%" in found:
padding = int(re.findall(r"\d+", found)[-1])
else:
padding = len(found)
return found, padding
else:
return None, None

View file

@ -0,0 +1,325 @@
import os
import sys
import six
import errno
from pyblish import api
import pype
import clique
from avalon.vendor import filelink
class ExtractReviewCutUp(pype.api.Extractor):
"""Cut up clips from long video file"""
order = api.ExtractorOrder
# order = api.CollectorOrder + 0.1023
label = "Extract Review CutUp"
hosts = ["hiero"]
families = ["review"]
# presets
tags_addition = []
def process(self, instance):
inst_data = instance.data
asset = inst_data['asset']
# get representation and loop them
representations = inst_data["representations"]
# check if sequence
is_sequence = inst_data["isSequence"]
# get resolution default
resolution_width = inst_data["resolutionWidth"]
resolution_height = inst_data["resolutionHeight"]
# frame range data
media_duration = inst_data["mediaDuration"]
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe")
# filter out mov and img sequences
representations_new = representations[:]
for repre in representations:
input_args = list()
output_args = list()
tags = repre.get("tags", [])
# check if supported tags are in representation for activation
filter_tag = False
for tag in ["_cut-bigger", "_cut-smaller"]:
if tag in tags:
filter_tag = True
break
if not filter_tag:
continue
self.log.debug("__ repre: {}".format(repre))
files = repre.get("files")
staging_dir = repre.get("stagingDir")
fps = repre.get("fps")
ext = repre.get("ext")
# make paths
full_output_dir = os.path.join(
staging_dir, "cuts")
if is_sequence:
new_files = list()
# frame range delivery included handles
frame_start = (
inst_data["frameStart"] - inst_data["handleStart"])
frame_end = (
inst_data["frameEnd"] + inst_data["handleEnd"])
self.log.debug("_ frame_start: {}".format(frame_start))
self.log.debug("_ frame_end: {}".format(frame_end))
# make collection from input files list
collections, remainder = clique.assemble(files)
collection = collections.pop()
self.log.debug("_ collection: {}".format(collection))
# name components
head = collection.format("{head}")
padding = collection.format("{padding}")
tail = collection.format("{tail}")
self.log.debug("_ head: {}".format(head))
self.log.debug("_ padding: {}".format(padding))
self.log.debug("_ tail: {}".format(tail))
# make destination file with instance data
# frame start and end range
index = 0
for image in collection:
dst_file_num = frame_start + index
dst_file_name = head + str(padding % dst_file_num) + tail
src = os.path.join(staging_dir, image)
dst = os.path.join(full_output_dir, dst_file_name)
self.log.info("Creating temp hardlinks: {}".format(dst))
self.hardlink_file(src, dst)
new_files.append(dst_file_name)
index += 1
self.log.debug("_ new_files: {}".format(new_files))
else:
# ffmpeg when single file
new_files = "{}_{}".format(asset, files)
# frame range
frame_start = repre.get("frameStart")
frame_end = repre.get("frameEnd")
full_input_path = os.path.join(
staging_dir, files)
os.path.isdir(full_output_dir) or os.makedirs(full_output_dir)
full_output_path = os.path.join(
full_output_dir, new_files)
self.log.debug(
"__ full_input_path: {}".format(full_input_path))
self.log.debug(
"__ full_output_path: {}".format(full_output_path))
# check if audio stream is in input video file
ffprob_cmd = (
"{ffprobe_path} -i \"{full_input_path}\" -show_streams "
"-select_streams a -loglevel error"
).format(**locals())
self.log.debug("ffprob_cmd: {}".format(ffprob_cmd))
audio_check_output = pype.api.subprocess(ffprob_cmd)
self.log.debug(
"audio_check_output: {}".format(audio_check_output))
# translate frame to sec
start_sec = float(frame_start) / fps
duration_sec = float(frame_end - frame_start + 1) / fps
empty_add = None
# check if not missing frames at start
if (start_sec < 0) or (media_duration < frame_end):
# for later swithing off `-c:v copy` output arg
empty_add = True
# init empty variables
video_empty_start = video_layer_start = ""
audio_empty_start = audio_layer_start = ""
video_empty_end = video_layer_end = ""
audio_empty_end = audio_layer_end = ""
audio_input = audio_output = ""
v_inp_idx = 0
concat_n = 1
# try to get video native resolution data
try:
resolution_output = pype.api.subprocess((
"{ffprobe_path} -i \"{full_input_path}\" -v error "
"-select_streams v:0 -show_entries "
"stream=width,height -of csv=s=x:p=0"
).format(**locals()))
x, y = resolution_output.split("x")
resolution_width = int(x)
resolution_height = int(y)
except Exception as _ex:
self.log.warning(
"Video native resolution is untracable: {}".format(
_ex))
if audio_check_output:
# adding input for empty audio
input_args.append("-f lavfi -i anullsrc")
# define audio empty concat variables
audio_input = "[1:a]"
audio_output = ":a=1"
v_inp_idx = 1
# adding input for video black frame
input_args.append((
"-f lavfi -i \"color=c=black:"
"s={resolution_width}x{resolution_height}:r={fps}\""
).format(**locals()))
if (start_sec < 0):
# recalculate input video timing
empty_start_dur = abs(start_sec)
start_sec = 0
duration_sec = float(frame_end - (
frame_start + (empty_start_dur * fps)) + 1) / fps
# define starting empty video concat variables
video_empty_start = (
"[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];" # noqa
).format(**locals())
video_layer_start = "[gv0]"
if audio_check_output:
# define starting empty audio concat variables
audio_empty_start = (
"[0]atrim=duration={empty_start_dur}[ga0];"
).format(**locals())
audio_layer_start = "[ga0]"
# alter concat number of clips
concat_n += 1
# check if not missing frames at the end
if (media_duration < frame_end):
# recalculate timing
empty_end_dur = float(
frame_end - media_duration + 1) / fps
duration_sec = float(
media_duration - frame_start) / fps
# define ending empty video concat variables
video_empty_end = (
"[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];"
).format(**locals())
video_layer_end = "[gv1]"
if audio_check_output:
# define ending empty audio concat variables
audio_empty_end = (
"[0]atrim=duration={empty_end_dur}[ga1];"
).format(**locals())
audio_layer_end = "[ga0]"
# alter concat number of clips
concat_n += 1
# concatting black frame togather
output_args.append((
"-filter_complex \""
"{audio_empty_start}"
"{video_empty_start}"
"{audio_empty_end}"
"{video_empty_end}"
"{video_layer_start}{audio_layer_start}[1:v]{audio_input}" # noqa
"{video_layer_end}{audio_layer_end}"
"concat=n={concat_n}:v=1{audio_output}\""
).format(**locals()))
# append ffmpeg input video clip
input_args.append("-ss {:0.2f}".format(start_sec))
input_args.append("-t {:0.2f}".format(duration_sec))
input_args.append("-i \"{}\"".format(full_input_path))
# add copy audio video codec if only shortening clip
if ("_cut-bigger" in tags) and (not empty_add):
output_args.append("-c:v copy")
# make sure it is having no frame to frame comprassion
output_args.append("-intra")
# output filename
output_args.append("-y \"{}\"".format(full_output_path))
mov_args = [
ffmpeg_path,
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
repre_new = {
"files": new_files,
"stagingDir": full_output_dir,
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartFtrack": frame_start,
"frameEndFtrack": frame_end,
"step": 1,
"fps": fps,
"name": "cut_up_preview",
"tags": ["review"] + self.tags_addition,
"ext": ext,
"anatomy_template": "publish"
}
representations_new.append(repre_new)
for repre in representations_new:
if ("delete" in repre.get("tags", [])) and (
"cut_up_preview" not in repre["name"]):
representations_new.remove(repre)
self.log.debug(
"Representations: {}".format(representations_new))
instance.data["representations"] = representations_new
def hardlink_file(self, src, dst):
dirname = os.path.dirname(dst)
# make sure the destination folder exist
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
six.reraise(*sys.exc_info())
# create hardlined file
try:
filelink.create(src, dst, filelink.HARDLINK)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
six.reraise(*sys.exc_info())

View file

@ -1,244 +0,0 @@
import os
from pyblish import api
import pype
class ExtractReviewCutUpVideo(pype.api.Extractor):
"""Cut up clips from long video file"""
order = api.ExtractorOrder
# order = api.CollectorOrder + 0.1023
label = "Extract Review CutUp Video"
hosts = ["hiero"]
families = ["review"]
# presets
tags_addition = []
def process(self, instance):
inst_data = instance.data
asset = inst_data['asset']
# get representation and loop them
representations = inst_data["representations"]
# get resolution default
resolution_width = inst_data["resolutionWidth"]
resolution_height = inst_data["resolutionHeight"]
# frame range data
media_duration = inst_data["mediaDuration"]
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe")
# filter out mov and img sequences
representations_new = representations[:]
for repre in representations:
input_args = list()
output_args = list()
tags = repre.get("tags", [])
# check if supported tags are in representation for activation
filter_tag = False
for tag in ["_cut-bigger", "_cut-smaller"]:
if tag in tags:
filter_tag = True
break
if not filter_tag:
continue
self.log.debug("__ repre: {}".format(repre))
file = repre.get("files")
staging_dir = repre.get("stagingDir")
frame_start = repre.get("frameStart")
frame_end = repre.get("frameEnd")
fps = repre.get("fps")
ext = repre.get("ext")
new_file_name = "{}_{}".format(asset, file)
full_input_path = os.path.join(
staging_dir, file)
full_output_dir = os.path.join(
staging_dir, "cuts")
os.path.isdir(full_output_dir) or os.makedirs(full_output_dir)
full_output_path = os.path.join(
full_output_dir, new_file_name)
self.log.debug("__ full_input_path: {}".format(full_input_path))
self.log.debug("__ full_output_path: {}".format(full_output_path))
# check if audio stream is in input video file
ffprob_cmd = (
"{ffprobe_path} -i \"{full_input_path}\" -show_streams "
"-select_streams a -loglevel error"
).format(**locals())
self.log.debug("ffprob_cmd: {}".format(ffprob_cmd))
audio_check_output = pype.api.subprocess(ffprob_cmd)
self.log.debug("audio_check_output: {}".format(audio_check_output))
# translate frame to sec
start_sec = float(frame_start) / fps
duration_sec = float(frame_end - frame_start + 1) / fps
empty_add = None
# check if not missing frames at start
if (start_sec < 0) or (media_duration < frame_end):
# for later swithing off `-c:v copy` output arg
empty_add = True
# init empty variables
video_empty_start = video_layer_start = ""
audio_empty_start = audio_layer_start = ""
video_empty_end = video_layer_end = ""
audio_empty_end = audio_layer_end = ""
audio_input = audio_output = ""
v_inp_idx = 0
concat_n = 1
# try to get video native resolution data
try:
resolution_output = pype.api.subprocess((
"{ffprobe_path} -i \"{full_input_path}\" -v error "
"-select_streams v:0 -show_entries "
"stream=width,height -of csv=s=x:p=0"
).format(**locals()))
x, y = resolution_output.split("x")
resolution_width = int(x)
resolution_height = int(y)
except Exception as E:
self.log.warning(
"Video native resolution is untracable: {}".format(E))
if audio_check_output:
# adding input for empty audio
input_args.append("-f lavfi -i anullsrc")
# define audio empty concat variables
audio_input = "[1:a]"
audio_output = ":a=1"
v_inp_idx = 1
# adding input for video black frame
input_args.append((
"-f lavfi -i \"color=c=black:"
"s={resolution_width}x{resolution_height}:r={fps}\""
).format(**locals()))
if (start_sec < 0):
# recalculate input video timing
empty_start_dur = abs(start_sec)
start_sec = 0
duration_sec = float(frame_end - (
frame_start + (empty_start_dur * fps)) + 1) / fps
# define starting empty video concat variables
video_empty_start = (
"[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];"
).format(**locals())
video_layer_start = "[gv0]"
if audio_check_output:
# define starting empty audio concat variables
audio_empty_start = (
"[0]atrim=duration={empty_start_dur}[ga0];"
).format(**locals())
audio_layer_start = "[ga0]"
# alter concat number of clips
concat_n += 1
# check if not missing frames at the end
if (media_duration < frame_end):
# recalculate timing
empty_end_dur = float(frame_end - media_duration + 1) / fps
duration_sec = float(media_duration - frame_start) / fps
# define ending empty video concat variables
video_empty_end = (
"[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];"
).format(**locals())
video_layer_end = "[gv1]"
if audio_check_output:
# define ending empty audio concat variables
audio_empty_end = (
"[0]atrim=duration={empty_end_dur}[ga1];"
).format(**locals())
audio_layer_end = "[ga0]"
# alter concat number of clips
concat_n += 1
# concatting black frame togather
output_args.append((
"-filter_complex \""
"{audio_empty_start}"
"{video_empty_start}"
"{audio_empty_end}"
"{video_empty_end}"
"{video_layer_start}{audio_layer_start}[1:v]{audio_input}"
"{video_layer_end}{audio_layer_end}"
"concat=n={concat_n}:v=1{audio_output}\""
).format(**locals()))
# append ffmpeg input video clip
input_args.append("-ss {:0.2f}".format(start_sec))
input_args.append("-t {:0.2f}".format(duration_sec))
input_args.append("-i \"{}\"".format(full_input_path))
# add copy audio video codec if only shortening clip
if ("_cut-bigger" in tags) and (not empty_add):
output_args.append("-c:v copy")
# make sure it is having no frame to frame comprassion
output_args.append("-intra")
# output filename
output_args.append("-y \"{}\"".format(full_output_path))
mov_args = [
ffmpeg_path,
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
repre_new = {
"files": new_file_name,
"stagingDir": full_output_dir,
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartFtrack": frame_start,
"frameEndFtrack": frame_end,
"step": 1,
"fps": fps,
"name": "cut_up_preview",
"tags": ["review"] + self.tags_addition,
"ext": ext,
"anatomy_template": "publish"
}
representations_new.append(repre_new)
for repre in representations_new:
if ("delete" in repre.get("tags", [])) and (
"cut_up_preview" not in repre["name"]):
representations_new.remove(repre)
self.log.debug(
"Representations: {}".format(representations_new))
instance.data["representations"] = representations_new