Merge branch 'develop' into feature/PYPE-754_review_burnin_filtering

This commit is contained in:
iLLiCiTiT 2020-05-20 16:04:47 +02:00
commit 6cc864d9b6
17 changed files with 562 additions and 96 deletions

View file

@ -22,7 +22,7 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
ftrack_log.setLevel(logging.WARNING)
# Collect session
session = ftrack_api.Session()
session = ftrack_api.Session(auto_connect_event_hub=True)
self.log.debug("Ftrack user: \"{0}\"".format(session.api_user))
context.data["ftrackSession"] = session

View file

@ -19,7 +19,7 @@ class ExtractBurnin(pype.api.Extractor):
label = "Extract burnins"
order = pyblish.api.ExtractorOrder + 0.03
families = ["review", "burnin"]
hosts = ["nuke", "maya", "shell", "premiere"]
hosts = ["nuke", "maya", "shell", "nukestudio", "premiere"]
optional = True
positions = [

View file

@ -22,7 +22,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
label = "Extract Review"
order = pyblish.api.ExtractorOrder + 0.02
families = ["review"]
hosts = ["nuke", "maya", "shell", "premiere"]
hosts = ["nuke", "maya", "shell", "nukestudio", "premiere"]
# Supported extensions
image_exts = ["exr", "jpg", "jpeg", "png", "dpx"]
@ -1095,6 +1095,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
def legacy_process(self, instance):
self.log.warning("Legacy review presets are used.")
def process(self, instance):
output_profiles = self.outputs or {}
inst_data = instance.data
@ -1247,7 +1248,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
frame_start_handle = frame_start - handle_start
frame_end_handle = frame_end + handle_end
if isinstance(repre["files"], list):
if frame_start_handle != repre.get("detectedStart", frame_start_handle):
if frame_start_handle != repre.get(
"detectedStart", frame_start_handle):
frame_start_handle = repre.get("detectedStart")
# exclude handle if no handles defined

View file

@ -301,6 +301,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
sequence_repre = isinstance(files, list)
repre_context = None
if sequence_repre:
self.log.debug(
"files: {}".format(files))
src_collections, remainder = clique.assemble(files)
self.log.debug(
"src_tail_collections: {}".format(str(src_collections)))
@ -347,6 +349,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
anatomy.templates["render"].get("padding")
)
)
index_frame_start = int(repre.get("frameStart"))
# exception for slate workflow

View file

@ -291,7 +291,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
"AVALON_PROJECT",
"AVALON_ASSET",
"AVALON_TASK",
"PYPE_USERNAME"
"PYPE_USERNAME",
"PYPE_DEV"
]
environment = dict({key: os.environ[key] for key in keys

View file

@ -115,7 +115,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
# Add version data to instance
version_data = {
"colorspace": node["colorspace"].value(),
"colorspace": node["colorspace"].value(),
}
instance.data["family"] = "write"
@ -150,6 +150,11 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"deadlinePriority": deadlinePriority
})
if "render" in families:
instance.data["family"] = "render2d"
if "render" not in families:
instance.data["families"].insert(0, "render")
if "prerender" in families:
instance.data.update({
"family": "prerender",

View file

@ -183,7 +183,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
"BatchName": responce_data["Props"]["Batch"],
"JobDependency0": responce_data["_id"],
"ChunkSize": 99999999
})
})
# Include critical environment variables with submission
keys = [
@ -195,7 +195,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
"FTRACK_SERVER",
"PYBLISHPLUGINPATH",
"NUKE_PATH",
"TOOL_ENV"
"TOOL_ENV",
"PYPE_DEV"
]
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **api.Session)

View file

@ -0,0 +1,90 @@
import pyblish.api
import opentimelineio.opentime as otio_ot
class CollectClipTimecodes(pyblish.api.InstancePlugin):
"""Collect time with OpenTimelineIO:
source_h(In,Out)[timecode, sec]
timeline(In,Out)[timecode, sec]
"""
order = pyblish.api.CollectorOrder + 0.101
label = "Collect Timecodes"
hosts = ["nukestudio"]
def process(self, instance):
data = dict()
self.log.debug("__ instance.data: {}".format(instance.data))
# Timeline data.
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
source_in_h = instance.data("sourceInH",
instance.data("sourceIn") - handle_start)
source_out_h = instance.data("sourceOutH",
instance.data("sourceOut") + handle_end)
timeline_in = instance.data["clipIn"]
timeline_out = instance.data["clipOut"]
# set frame start with tag or take it from timeline
frame_start = instance.data.get("startingFrame")
if not frame_start:
frame_start = timeline_in
source = instance.data.get("source")
otio_data = dict()
self.log.debug("__ source: `{}`".format(source))
rate_fps = instance.context.data["fps"]
otio_in_h_ratio = otio_ot.RationalTime(
value=(source.timecodeStart() + (
source_in_h + (source_out_h - source_in_h))),
rate=rate_fps)
otio_out_h_ratio = otio_ot.RationalTime(
value=(source.timecodeStart() + source_in_h),
rate=rate_fps)
otio_timeline_in_ratio = otio_ot.RationalTime(
value=int(
instance.data.get("timelineTimecodeStart", 0)) + timeline_in,
rate=rate_fps)
otio_timeline_out_ratio = otio_ot.RationalTime(
value=int(
instance.data.get("timelineTimecodeStart", 0)) + timeline_out,
rate=rate_fps)
otio_data.update({
"otioClipInHTimecode": otio_ot.to_timecode(otio_in_h_ratio),
"otioClipOutHTimecode": otio_ot.to_timecode(otio_out_h_ratio),
"otioClipInHSec": otio_ot.to_seconds(otio_in_h_ratio),
"otioClipOutHSec": otio_ot.to_seconds(otio_out_h_ratio),
"otioTimelineInTimecode": otio_ot.to_timecode(
otio_timeline_in_ratio),
"otioTimelineOutTimecode": otio_ot.to_timecode(
otio_timeline_out_ratio),
"otioTimelineInSec": otio_ot.to_seconds(otio_timeline_in_ratio),
"otioTimelineOutSec": otio_ot.to_seconds(otio_timeline_out_ratio)
})
data.update({
"otioData": otio_data,
"sourceTimecodeIn": otio_ot.to_timecode(otio_in_h_ratio),
"sourceTimecodeOut": otio_ot.to_timecode(otio_out_h_ratio)
})
instance.data.update(data)
self.log.debug("data: {}".format(instance.data))

View file

@ -0,0 +1,21 @@
import pyblish.api
class CollectClipResolution(pyblish.api.InstancePlugin):
"""Collect clip geometry resolution"""
order = pyblish.api.CollectorOrder + 0.101
label = "Collect Clip Resoluton"
hosts = ["nukestudio"]
def process(self, instance):
sequence = instance.context.data['activeSequence']
resolution_width = int(sequence.format().width())
resolution_height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
instance.data.update({
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect
})

View file

@ -47,11 +47,42 @@ class CollectClips(api.ContextPlugin):
track = item.parent()
source = item.source().mediaSource()
source_path = source.firstpath()
clip_in = int(item.timelineIn())
clip_out = int(item.timelineOut())
file_head = source.filenameHead()
file_info = next((f for f in source.fileinfos()), None)
source_first_frame = file_info.startFrame()
source_first_frame = int(file_info.startFrame())
is_sequence = False
self.log.debug(
"__ assets_shared: {}".format(
context.data["assetsShared"]))
# Check for clips with the same range
# this is for testing if any vertically neighbouring
# clips has been already processed
clip_matching_with_range = next(
(k for k, v in context.data["assetsShared"].items()
if (v.get("_clipIn", 0) == clip_in)
and (v.get("_clipOut", 0) == clip_out)
), False)
# check if clip name is the same in matched
# vertically neighbouring clip
# if it is then it is correct and resent variable to False
# not to be rised wrong name exception
if asset in str(clip_matching_with_range):
clip_matching_with_range = False
# rise wrong name exception if found one
assert (not clip_matching_with_range), (
"matching clip: {asset}"
" timeline range ({clip_in}:{clip_out})"
" conflicting with {clip_matching_with_range}"
" >> rename any of clips to be the same as the other <<"
).format(
**locals())
if not source.singleFile():
self.log.info("Single file")
is_sequence = True
@ -89,32 +120,31 @@ class CollectClips(api.ContextPlugin):
)
data.update({
"name": "{0}_{1}".format(track.name(), item.name()),
"item": item,
"source": source,
"timecodeStart": str(source.timecodeStart()),
"timelineTimecodeStart": str(sequence.timecodeStart()),
"sourcePath": source_path,
"sourceFileHead": file_head,
"isSequence": is_sequence,
"track": track.name(),
"trackIndex": track_index,
"sourceFirst": source_first_frame,
"effects": effects,
"sourceIn": int(item.sourceIn()),
"sourceOut": int(item.sourceOut()),
"mediaDuration": (int(item.sourceOut()) -
int(item.sourceIn())) + 1,
"clipIn": int(item.timelineIn()),
"clipOut": int(item.timelineOut()),
"clipDuration": (
int(item.timelineOut()) - int(
item.timelineIn())) + 1,
"asset": asset,
"family": "clip",
"families": [],
"handleStart": projectdata.get("handleStart", 0),
"handleEnd": projectdata.get("handleEnd", 0)})
"name": "{0}_{1}".format(track.name(), item.name()),
"item": item,
"source": source,
"timecodeStart": str(source.timecodeStart()),
"timelineTimecodeStart": str(sequence.timecodeStart()),
"sourcePath": source_path,
"sourceFileHead": file_head,
"isSequence": is_sequence,
"track": track.name(),
"trackIndex": track_index,
"sourceFirst": source_first_frame,
"effects": effects,
"sourceIn": int(item.sourceIn()),
"sourceOut": int(item.sourceOut()),
"mediaDuration": int(source.duration()),
"clipIn": clip_in,
"clipOut": clip_out,
"clipDuration": (
int(item.timelineOut()) - int(
item.timelineIn())) + 1,
"asset": asset,
"family": "clip",
"families": [],
"handleStart": projectdata.get("handleStart", 0),
"handleEnd": projectdata.get("handleEnd", 0)})
instance = context.create_instance(**data)
@ -122,7 +152,10 @@ class CollectClips(api.ContextPlugin):
self.log.info("Created instance.data: {}".format(instance.data))
self.log.debug(">> effects: {}".format(instance.data["effects"]))
context.data["assetsShared"][asset] = dict()
context.data["assetsShared"][asset] = {
"_clipIn": clip_in,
"_clipOut": clip_out
}
# from now we are collecting only subtrackitems on
# track with no video items

View file

@ -35,14 +35,15 @@ class CollectClipFrameRanges(pyblish.api.InstancePlugin):
frame_end = frame_start + (timeline_out - timeline_in)
data.update(
{
"sourceInH": source_in_h,
"sourceOutH": source_out_h,
"frameStart": frame_start,
"frameEnd": frame_end,
"clipInH": timeline_in_h,
"clipOutH": timeline_out_h
data.update({
"sourceInH": source_in_h,
"sourceOutH": source_out_h,
"frameStart": frame_start,
"frameEnd": frame_end,
"clipInH": timeline_in_h,
"clipOutH": timeline_out_h,
"clipDurationH": instance.data.get(
"clipDuration") + handle_start + handle_end
}
)
self.log.debug("__ data: {}".format(data))

View file

@ -1,5 +1,6 @@
from pyblish import api
class CollectFramerate(api.ContextPlugin):
"""Collect framerate from selected sequence."""
@ -9,4 +10,13 @@ class CollectFramerate(api.ContextPlugin):
def process(self, context):
sequence = context.data["activeSequence"]
context.data["fps"] = sequence.framerate().toFloat()
context.data["fps"] = self.get_rate(sequence)
def get_rate(self, sequence):
num, den = sequence.framerate().toRational()
rate = float(num) / float(den)
if rate.is_integer():
return rate
return round(rate, 3)

View file

@ -37,11 +37,13 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
assets_shared = context.data.get("assetsShared")
tags = instance.data.get("tags", None)
clip = instance.data["item"]
asset = instance.data.get("asset")
asset = instance.data["asset"]
sequence = context.data['activeSequence']
width = int(sequence.format().width())
height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
resolution_width = instance.data["resolutionWidth"]
resolution_height = instance.data["resolutionHeight"]
pixel_aspect = instance.data["pixelAspect"]
clip_in = instance.data["clipIn"]
clip_out = instance.data["clipOut"]
fps = context.data["fps"]
# build data for inner nukestudio project property
@ -72,6 +74,31 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
# and finding only hierarchical tag
if "hierarchy" in t_type.lower():
# Check for clips with the same range
# this is for testing if any vertically neighbouring
# clips has been already processed
match = next((
k for k, v in assets_shared.items()
if (v["_clipIn"] == clip_in)
and (v["_clipOut"] == clip_out)
), False)
self.log.debug(
"__ assets_shared[match]: {}".format(
assets_shared[match]))
# check if hierarchy key is present in matched
# vertically neighbouring clip
if not assets_shared[match].get("hierarchy"):
match = False
# rise exception if multiple hierarchy tag found
assert not match, (
"Two clips above each other with"
" hierarchy tag are not allowed"
" >> keep hierarchy tag only in one of them <<"
)
d_metadata = dict()
parents = list()
@ -82,7 +109,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
if "shot" in template.lower():
instance.data["asset"] = [
t for t in template.split('/')][-1]
template = "/".join([t for t in template.split('/')][0:-1])
template = "/".join(
[t for t in template.split('/')][0:-1])
# take template from Tag.note and break it into parts
template_split = template.split("/")
@ -149,8 +177,12 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
instance.data["hierarchy"] = hierarchy
instance.data["parents"] = parents
self.log.info(
"clip: {asset}[{clip_in}:{clip_out}]".format(
**locals()))
# adding to asset shared dict
self.log.debug("__ assets_shared: {}".format(assets_shared))
self.log.debug(
"__ assets_shared: {}".format(assets_shared))
if assets_shared.get(asset):
self.log.debug("Adding to shared assets: `{}`".format(
asset))
@ -162,11 +194,11 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"asset": asset,
"hierarchy": hierarchy,
"parents": parents,
"resolutionWidth": width,
"resolutionHeight": height,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect,
"fps": fps,
"tasks": instance.data["tasks"]
"tasks": instance.data["tasks"]
})
# adding frame start if any on instance
@ -175,8 +207,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
asset_shared.update({
"startingFrame": start_frame
})
self.log.debug(
"assets_shared: {assets_shared}".format(**locals()))
class CollectHierarchyContext(pyblish.api.ContextPlugin):
'''Collecting Hierarchy from instaces and building

View file

@ -64,15 +64,15 @@ class CollectPlates(api.InstancePlugin):
# adding SourceResolution if Tag was present
if instance.data.get("sourceResolution") and instance.data.get("main"):
item = instance.data["item"]
width = int(item.source().mediaSource().width())
height = int(item.source().mediaSource().height())
resolution_width = int(item.source().mediaSource().width())
resolution_height = int(item.source().mediaSource().height())
pixel_aspect = int(item.source().mediaSource().pixelAspect())
self.log.info("Source Width and Height are: `{0} x {1} : {2}`".format(
width, height, pixel_aspect))
resolution_width, resolution_height, pixel_aspect))
data.update({
"width": width,
"height": height,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect
})
@ -102,14 +102,6 @@ class CollectPlatesData(api.InstancePlugin):
instance.data["representations"] = list()
version_data = dict()
context = instance.context
anatomy = context.data.get("anatomy", None)
padding = int(
anatomy.templates["render"].get(
"frame_padding",
anatomy.templates["render"].get("padding")
)
)
name = instance.data["subset"]
source_path = instance.data["sourcePath"]
@ -154,6 +146,7 @@ class CollectPlatesData(api.InstancePlugin):
source_first_frame = instance.data.get("sourceFirst")
source_file_head = instance.data.get("sourceFileHead")
self.log.debug("source_first_frame: `{}`".format(source_first_frame))
if instance.data.get("isSequence", False):
self.log.info("Is sequence of files")
@ -190,8 +183,7 @@ class CollectPlatesData(api.InstancePlugin):
"frameEnd": instance.data["sourceOut"] - instance.data["sourceIn"] + 1,
'step': 1,
'fps': instance.context.data["fps"],
'preview': True,
'thumbnail': False,
'tags': ["preview"],
'name': "preview",
'ext': "mov",
}

View file

@ -36,9 +36,10 @@ class CollectReviews(api.InstancePlugin):
return
if not track:
self.log.debug(
"Skipping \"{}\" because tag is not having `track` in metadata".format(instance)
)
self.log.debug((
"Skipping \"{}\" because tag is not having"
"`track` in metadata"
).format(instance))
return
# add to representations
@ -68,18 +69,17 @@ class CollectReviews(api.InstancePlugin):
rev_inst.data["name"]))
if rev_inst is None:
raise RuntimeError(
"TrackItem from track name `{}` has to be also selected".format(
track)
)
raise RuntimeError((
"TrackItem from track name `{}` has to"
"be also selected"
).format(track))
instance.data["families"].append("review")
file_path = rev_inst.data.get("sourcePath")
file_dir = os.path.dirname(file_path)
file = os.path.basename(file_path)
ext = os.path.splitext(file)[-1][1:]
handleStart = rev_inst.data.get("handleStart")
handleEnd = rev_inst.data.get("handleEnd")
# change label
instance.data["label"] = "{0} - {1} - ({2}) - review".format(
@ -94,15 +94,35 @@ class CollectReviews(api.InstancePlugin):
"stagingDir": file_dir,
"frameStart": rev_inst.data.get("sourceIn"),
"frameEnd": rev_inst.data.get("sourceOut"),
"frameStartFtrack": rev_inst.data.get("sourceIn") - handleStart,
"frameEndFtrack": rev_inst.data.get("sourceOut") + handleEnd,
"frameStartFtrack": rev_inst.data.get("sourceInH"),
"frameEndFtrack": rev_inst.data.get("sourceOutH"),
"step": 1,
"fps": rev_inst.data.get("fps"),
"preview": True,
"thumbnail": False,
"name": "preview",
"tags": ["preview"],
"ext": ext
}
media_duration = instance.data.get("mediaDuration")
clip_duration_h = instance.data.get("clipDurationH")
if media_duration > clip_duration_h:
self.log.debug("Media duration higher: {}".format(
(media_duration - clip_duration_h)))
representation.update({
"frameStart": instance.data.get("sourceInH"),
"frameEnd": instance.data.get("sourceOutH"),
"tags": ["_cut-bigger", "delete"]
})
elif media_duration < clip_duration_h:
self.log.debug("Media duration higher: {}".format(
(media_duration - clip_duration_h)))
representation.update({
"frameStart": instance.data.get("sourceInH"),
"frameEnd": instance.data.get("sourceOutH"),
"tags": ["_cut-smaller", "delete"]
})
instance.data["representations"].append(representation)
self.log.debug("Added representation: {}".format(representation))
@ -122,15 +142,18 @@ class CollectReviews(api.InstancePlugin):
thumb_path = os.path.join(staging_dir, thumb_file)
self.log.debug("__ thumb_path: {}".format(thumb_path))
thumb_frame = instance.data["sourceIn"] + ((instance.data["sourceOut"] - instance.data["sourceIn"])/2)
thumb_frame = instance.data["sourceIn"] + (
(instance.data["sourceOut"] - instance.data["sourceIn"]) / 2)
self.log.debug("__ thumb_frame: {}".format(thumb_frame))
thumbnail = item.thumbnail(thumb_frame).save(
thumb_path,
format='png'
)
self.log.debug("__ sourceIn: `{}`".format(instance.data["sourceIn"]))
self.log.debug("__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame))
self.log.debug(
"__ sourceIn: `{}`".format(instance.data["sourceIn"]))
self.log.debug(
"__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame))
self.log.debug("__ thumbnail: {}".format(thumbnail))

View file

@ -0,0 +1,245 @@
import os
from pyblish import api
import pype
class ExtractReviewCutUpVideo(pype.api.Extractor):
"""Cut up clips from long video file"""
order = api.ExtractorOrder
# order = api.CollectorOrder + 0.1023
label = "Extract Review CutUp Video"
hosts = ["nukestudio"]
families = ["review"]
# presets
tags_addition = []
def process(self, instance):
inst_data = instance.data
asset = inst_data['asset']
# get representation and loop them
representations = inst_data["representations"]
# get resolution default
resolution_width = inst_data["resolutionWidth"]
resolution_height = inst_data["resolutionHeight"]
# frame range data
media_duration = inst_data["mediaDuration"]
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe")
# filter out mov and img sequences
representations_new = representations[:]
for repre in representations:
input_args = list()
output_args = list()
tags = repre.get("tags", [])
# check if supported tags are in representation for activation
filter_tag = False
for tag in ["_cut-bigger", "_cut-smaller"]:
if tag in tags:
filter_tag = True
break
if not filter_tag:
continue
self.log.debug("__ repre: {}".format(repre))
file = repre.get("files")
staging_dir = repre.get("stagingDir")
frame_start = repre.get("frameStart")
frame_end = repre.get("frameEnd")
fps = repre.get("fps")
ext = repre.get("ext")
new_file_name = "{}_{}".format(asset, file)
full_input_path = os.path.join(
staging_dir, file)
full_output_dir = os.path.join(
staging_dir, "cuts")
os.path.isdir(full_output_dir) or os.makedirs(full_output_dir)
full_output_path = os.path.join(
full_output_dir, new_file_name)
self.log.debug("__ full_input_path: {}".format(full_input_path))
self.log.debug("__ full_output_path: {}".format(full_output_path))
# check if audio stream is in input video file
ffprob_cmd = (
"{ffprobe_path} -i {full_input_path} -show_streams "
"-select_streams a -loglevel error"
).format(**locals())
self.log.debug("ffprob_cmd: {}".format(ffprob_cmd))
audio_check_output = pype.api.subprocess(ffprob_cmd)
self.log.debug("audio_check_output: {}".format(audio_check_output))
# translate frame to sec
start_sec = float(frame_start) / fps
duration_sec = float(frame_end - frame_start + 1) / fps
empty_add = None
# check if not missing frames at start
if (start_sec < 0) or (media_duration < frame_end):
# for later swithing off `-c:v copy` output arg
empty_add = True
# init empty variables
video_empty_start = video_layer_start = ""
audio_empty_start = audio_layer_start = ""
video_empty_end = video_layer_end = ""
audio_empty_end = audio_layer_end = ""
audio_input = audio_output = ""
v_inp_idx = 0
concat_n = 1
# try to get video native resolution data
try:
resolution_output = pype.api.subprocess((
"{ffprobe_path} -i {full_input_path} -v error "
"-select_streams v:0 -show_entries "
"stream=width,height -of csv=s=x:p=0"
).format(**locals()))
x, y = resolution_output.split("x")
resolution_width = int(x)
resolution_height = int(y)
except Exception as E:
self.log.warning(
"Video native resolution is untracable: {}".format(E))
if audio_check_output:
# adding input for empty audio
input_args.append("-f lavfi -i anullsrc")
# define audio empty concat variables
audio_input = "[1:a]"
audio_output = ":a=1"
v_inp_idx = 1
# adding input for video black frame
input_args.append((
"-f lavfi -i \"color=c=black:"
"s={resolution_width}x{resolution_height}:r={fps}\""
).format(**locals()))
if (start_sec < 0):
# recalculate input video timing
empty_start_dur = abs(start_sec)
start_sec = 0
duration_sec = float(frame_end - (
frame_start + (empty_start_dur * fps)) + 1) / fps
# define starting empty video concat variables
video_empty_start = (
"[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];"
).format(**locals())
video_layer_start = "[gv0]"
if audio_check_output:
# define starting empty audio concat variables
audio_empty_start = (
"[0]atrim=duration={empty_start_dur}[ga0];"
).format(**locals())
audio_layer_start = "[ga0]"
# alter concat number of clips
concat_n += 1
# check if not missing frames at the end
if (media_duration < frame_end):
# recalculate timing
empty_end_dur = float(frame_end - media_duration + 1) / fps
duration_sec = float(media_duration - frame_start) / fps
# define ending empty video concat variables
video_empty_end = (
"[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];"
).format(**locals())
video_layer_end = "[gv1]"
if audio_check_output:
# define ending empty audio concat variables
audio_empty_end = (
"[0]atrim=duration={empty_end_dur}[ga1];"
).format(**locals())
audio_layer_end = "[ga0]"
# alter concat number of clips
concat_n += 1
# concatting black frame togather
output_args.append((
"-filter_complex \""
"{audio_empty_start}"
"{video_empty_start}"
"{audio_empty_end}"
"{video_empty_end}"
"{video_layer_start}{audio_layer_start}[1:v]{audio_input}"
"{video_layer_end}{audio_layer_end}"
"concat=n={concat_n}:v=1{audio_output}\""
).format(**locals()))
# append ffmpeg input video clip
input_args.append("-ss {:0.2f}".format(start_sec))
input_args.append("-t {:0.2f}".format(duration_sec))
input_args.append("-i {}".format(full_input_path))
# add copy audio video codec if only shortening clip
if ("_cut-bigger" in tags) and (not empty_add):
output_args.append("-c:v copy")
# make sure it is having no frame to frame comprassion
output_args.append("-intra")
# output filename
output_args.append("-y")
output_args.append(full_output_path)
mov_args = [
ffmpeg_path,
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
repre_new = {
"files": new_file_name,
"stagingDir": full_output_dir,
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartFtrack": frame_start,
"frameEndFtrack": frame_end,
"step": 1,
"fps": fps,
"name": "cut_up_preview",
"tags": ["review", "delete"] + self.tags_addition,
"ext": ext,
"anatomy_template": "publish"
}
representations_new.append(repre_new)
for repre in representations_new:
if ("delete" in repre.get("tags", [])) and (
"cut_up_preview" not in repre["name"]):
representations_new.remove(repre)
self.log.debug(
"Representations: {}".format(representations_new))
instance.data["representations"] = representations_new