feat(global, resolve): publishing with otio wip

This commit is contained in:
Jakub Jezek 2020-12-08 16:03:33 +01:00
parent e22aceab4b
commit 042a4e643b
No known key found for this signature in database
GPG key ID: C4B96E101D2A47F3
8 changed files with 526 additions and 61 deletions

View file

@ -12,6 +12,14 @@ def timecode_to_frames(timecode, framerate):
)
def frames_to_timecode(frames, framerate):
return '{0:02d}:{1:02d}:{2:02d}:{3:02d}'.format(
int(frames / (3600 * framerate)),
int(frames / (60 * framerate) % 60),
int(frames / framerate % 60),
int(frames % framerate))
def get_reformated_path(path, padded=True):
"""
Return fixed python expression path

View file

@ -61,7 +61,8 @@ from .ffmpeg_utils import (
from .editorial import (
is_overlapping_otio_ranges,
convert_otio_range_to_frame_range,
otio_range_to_frame_range,
otio_range_with_handles,
convert_to_padded_path
)
@ -111,6 +112,7 @@ __all__ = [
"_subprocess",
"is_overlapping_otio_ranges",
"convert_otio_range_to_frame_range",
"otio_range_to_frame_range",
"otio_range_with_handles",
"convert_to_padded_path"
]

View file

@ -1,8 +1,9 @@
import re
from opentimelineio.opentime import to_frames
from opentimelineio.opentime import (
to_frames, RationalTime, TimeRange)
def convert_otio_range_to_frame_range(otio_range):
def otio_range_to_frame_range(otio_range):
start = to_frames(
otio_range.start_time, otio_range.start_time.rate)
end = start + to_frames(
@ -10,9 +11,23 @@ def convert_otio_range_to_frame_range(otio_range):
return start, end
def otio_range_with_handles(otio_range, instance):
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
handles_duration = handle_start + handle_end
fps = float(otio_range.start_time.rate)
start = to_frames(otio_range.start_time, fps)
duration = to_frames(otio_range.duration, fps)
return TimeRange(
start_time=RationalTime((start - handle_start), fps),
duration=RationalTime((duration + handles_duration), fps)
)
def is_overlapping_otio_ranges(test_otio_range, main_otio_range, strict=False):
test_start, test_end = convert_otio_range_to_frame_range(test_otio_range)
main_start, main_end = convert_otio_range_to_frame_range(main_otio_range)
test_start, test_end = otio_range_to_frame_range(test_otio_range)
main_start, main_end = otio_range_to_frame_range(main_otio_range)
covering_exp = bool(
(test_start <= main_start) and (test_end >= main_end)
)
@ -20,10 +35,10 @@ def is_overlapping_otio_ranges(test_otio_range, main_otio_range, strict=False):
(test_start >= main_start) and (test_end <= main_end)
)
overlaying_right_exp = bool(
(test_start < main_end) and (test_end >= main_end)
(test_start <= main_end) and (test_end >= main_end)
)
overlaying_left_exp = bool(
(test_end > main_start) and (test_start <= main_start)
(test_end >= main_start) and (test_start <= main_start)
)
if not strict:

View file

@ -0,0 +1,72 @@
"""
Requires:
otioTimeline -> context data attribute
review -> instance data attribute
masterLayer -> instance data attribute
otioClipRange -> instance data attribute
"""
# import os
import opentimelineio as otio
import pyblish.api
import pype.lib
from pprint import pformat
class CollectOcioFrameRanges(pyblish.api.InstancePlugin):
"""Getting otio ranges from otio_clip
Adding timeline and source ranges to instance data"""
label = "Collect OTIO Frame Ranges"
order = pyblish.api.CollectorOrder - 0.58
families = ["clip"]
hosts = ["resolve"]
def process(self, instance):
# get basic variables
otio_clip = instance.data["otioClip"]
workfile_start = instance.data["workfileFrameStart"]
# get ranges
otio_tl_range = otio_clip.range_in_parent()
self.log.debug(otio_tl_range)
otio_src_range = otio_clip.source_range
otio_avalable_range = otio_clip.available_range()
self.log.debug(otio_avalable_range)
otio_tl_range_handles = pype.lib.otio_range_with_handles(
otio_tl_range, instance)
self.log.debug(otio_tl_range_handles)
otio_src_range_handles = pype.lib.otio_range_with_handles(
otio_src_range, instance)
# get source avalable start frame
src_starting_from = otio.opentime.to_frames(
otio_avalable_range.start_time,
otio_avalable_range.start_time.rate)
# convert to frames
range_convert = pype.lib.otio_range_to_frame_range
tl_start, tl_end = range_convert(otio_tl_range)
tl_start_h, tl_end_h = range_convert(otio_tl_range_handles)
src_start, src_end = range_convert(otio_src_range)
src_start_h, src_end_h = range_convert(otio_src_range_handles)
frame_start = workfile_start
frame_end = frame_start + otio.opentime.to_frames(
otio_tl_range.duration, otio_tl_range.duration.rate) - 1
data = {
"frameStart": frame_start,
"frameEnd": frame_end,
"clipStart": tl_start,
"clipEnd": tl_end,
"clipStartH": tl_start_h,
"clipEndH": tl_end_h,
"sourceStart": src_starting_from + src_start,
"sourceEnd": src_starting_from + src_end,
"sourceStartH": src_starting_from + src_start_h,
"sourceEndH": src_starting_from + src_end_h,
}
instance.data.update(data)
self.log.debug(
"_ data: {}".format(pformat(data)))
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))

View file

@ -5,21 +5,23 @@ Requires:
masterLayer -> instance data attribute
otioClipRange -> instance data attribute
"""
import os
# import os
import opentimelineio as otio
import pyblish.api
import pype.lib
from pprint import pformat
class CollectOcioReview(pyblish.api.InstancePlugin):
"""Get matching otio from defined review layer"""
label = "Collect OTIO review"
order = pyblish.api.CollectorOrder
order = pyblish.api.CollectorOrder - 0.57
families = ["clip"]
hosts = ["resolve"]
def process(self, instance):
otio_review_clips = list()
# get basic variables
review_track_name = instance.data["review"]
master_layer = instance.data["masterLayer"]
@ -36,53 +38,18 @@ class CollectOcioReview(pyblish.api.InstancePlugin):
if track_name not in review_track_name:
continue
if isinstance(_otio_clip, otio.schema.Clip):
test_start, test_end = pype.lib.otio_range_to_frame_range(
parent_range)
main_start, main_end = pype.lib.otio_range_to_frame_range(
otio_clip_range)
if pype.lib.is_overlapping_otio_ranges(
parent_range, otio_clip_range, strict=False):
self.create_representation(
_otio_clip, otio_clip_range, instance)
# add found clips to list
otio_review_clips.append(_otio_clip)
def create_representation(self, otio_clip, to_otio_range, instance):
to_tl_start, to_tl_end = pype.lib.convert_otio_range_to_frame_range(
to_otio_range)
tl_start, tl_end = pype.lib.convert_otio_range_to_frame_range(
otio_clip.range_in_parent())
source_start, source_end = pype.lib.convert_otio_range_to_frame_range(
otio_clip.source_range)
media_reference = otio_clip.media_reference
metadata = media_reference.metadata
mr_start, mr_end = pype.lib.convert_otio_range_to_frame_range(
media_reference.available_range)
path = media_reference.target_url
reference_frame_start = (mr_start + source_start) + (
to_tl_start - tl_start)
reference_frame_end = (mr_start + source_end) - (
tl_end - to_tl_end)
instance.data["otioReviewClip"] = otio_review_clips
self.log.debug(
"_ otio_review_clips: {}".format(otio_review_clips))
base_name = os.path.basename(path)
staging_dir = os.path.dirname(path)
ext = os.path.splitext(base_name)[1][1:]
if metadata.get("isSequence"):
files = list()
padding = metadata["padding"]
base_name = pype.lib.convert_to_padded_path(base_name, padding)
for index in range(
reference_frame_start, (reference_frame_end + 1)):
file_name = base_name % index
path_test = os.path.join(staging_dir, file_name)
if os.path.exists(path_test):
files.append(file_name)
self.log.debug(files)
else:
files = base_name
representation = {
"ext": ext,
"name": ext,
"files": files,
"frameStart": reference_frame_start,
"frameEnd": reference_frame_end,
"stagingDir": staging_dir
}
self.log.debug(representation)
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))

View file

@ -0,0 +1,403 @@
import os
import sys
import six
import errno
from pyblish import api
import pype
import clique
from avalon.vendor import filelink
class ExtractOTIOReview(pype.api.Extractor):
"""Extract OTIO timeline into one concuted video file"""
# order = api.ExtractorOrder
order = api.CollectorOrder + 0.1023
label = "Extract OTIO review"
hosts = ["resolve"]
families = ["review_otio"]
# presets
tags_addition = []
def process(self, instance):
# self.create_representation(
# _otio_clip, otio_clip_range, instance)
""""
Expecting (instance.data):
otioClip (otio.schema.clip): clip from otio timeline
otioReviewClips (list): list with instances of otio.schema.clip
or otio.schema.gap
Process description:
Comparing `otioClip` parent range with `otioReviewClip` parent range will result in frame range witch is the trimmed cut. In case more otio clips or otio gaps are found in otioReviewClips then ffmpeg will generate multiple clips and those are then concuted together to one video file or image sequence. Resulting files are then added to instance as representation ready for review family plugins.
""""
inst_data = instance.data
asset = inst_data['asset']
item = inst_data['item']
event_number = int(item.eventNumber())
# get representation and loop them
representations = inst_data["representations"]
# check if sequence
is_sequence = inst_data["isSequence"]
# get resolution default
resolution_width = inst_data["resolutionWidth"]
resolution_height = inst_data["resolutionHeight"]
# frame range data
media_duration = inst_data["mediaDuration"]
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe")
# filter out mov and img sequences
representations_new = representations[:]
for repre in representations:
input_args = list()
output_args = list()
tags = repre.get("tags", [])
# check if supported tags are in representation for activation
filter_tag = False
for tag in ["_cut-bigger", "_cut-smaller"]:
if tag in tags:
filter_tag = True
break
if not filter_tag:
continue
self.log.debug("__ repre: {}".format(repre))
files = repre.get("files")
staging_dir = repre.get("stagingDir")
fps = repre.get("fps")
ext = repre.get("ext")
# make paths
full_output_dir = os.path.join(
staging_dir, "cuts")
if is_sequence:
new_files = list()
# frame range delivery included handles
frame_start = (
inst_data["frameStart"] - inst_data["handleStart"])
frame_end = (
inst_data["frameEnd"] + inst_data["handleEnd"])
self.log.debug("_ frame_start: {}".format(frame_start))
self.log.debug("_ frame_end: {}".format(frame_end))
# make collection from input files list
collections, remainder = clique.assemble(files)
collection = collections.pop()
self.log.debug("_ collection: {}".format(collection))
# name components
head = collection.format("{head}")
padding = collection.format("{padding}")
tail = collection.format("{tail}")
self.log.debug("_ head: {}".format(head))
self.log.debug("_ padding: {}".format(padding))
self.log.debug("_ tail: {}".format(tail))
# make destination file with instance data
# frame start and end range
index = 0
for image in collection:
dst_file_num = frame_start + index
dst_file_name = "".join([
str(event_number),
head,
str(padding % dst_file_num),
tail
])
src = os.path.join(staging_dir, image)
dst = os.path.join(full_output_dir, dst_file_name)
self.log.info("Creating temp hardlinks: {}".format(dst))
self.hardlink_file(src, dst)
new_files.append(dst_file_name)
index += 1
self.log.debug("_ new_files: {}".format(new_files))
else:
# ffmpeg when single file
new_files = "{}_{}".format(asset, files)
# frame range
frame_start = repre.get("frameStart")
frame_end = repre.get("frameEnd")
full_input_path = os.path.join(
staging_dir, files)
os.path.isdir(full_output_dir) or os.makedirs(full_output_dir)
full_output_path = os.path.join(
full_output_dir, new_files)
self.log.debug(
"__ full_input_path: {}".format(full_input_path))
self.log.debug(
"__ full_output_path: {}".format(full_output_path))
# check if audio stream is in input video file
ffprob_cmd = (
"\"{ffprobe_path}\" -i \"{full_input_path}\" -show_streams"
" -select_streams a -loglevel error"
).format(**locals())
self.log.debug("ffprob_cmd: {}".format(ffprob_cmd))
audio_check_output = pype.api.subprocess(ffprob_cmd)
self.log.debug(
"audio_check_output: {}".format(audio_check_output))
# Fix one frame difference
""" TODO: this is just work-around for issue:
https://github.com/pypeclub/pype/issues/659
"""
frame_duration_extend = 1
if audio_check_output:
frame_duration_extend = 0
# translate frame to sec
start_sec = float(frame_start) / fps
duration_sec = float(
(frame_end - frame_start) + frame_duration_extend) / fps
empty_add = None
# check if not missing frames at start
if (start_sec < 0) or (media_duration < frame_end):
# for later swithing off `-c:v copy` output arg
empty_add = True
# init empty variables
video_empty_start = video_layer_start = ""
audio_empty_start = audio_layer_start = ""
video_empty_end = video_layer_end = ""
audio_empty_end = audio_layer_end = ""
audio_input = audio_output = ""
v_inp_idx = 0
concat_n = 1
# try to get video native resolution data
try:
resolution_output = pype.api.subprocess((
"\"{ffprobe_path}\" -i \"{full_input_path}\""
" -v error "
"-select_streams v:0 -show_entries "
"stream=width,height -of csv=s=x:p=0"
).format(**locals()))
x, y = resolution_output.split("x")
resolution_width = int(x)
resolution_height = int(y)
except Exception as _ex:
self.log.warning(
"Video native resolution is untracable: {}".format(
_ex))
if audio_check_output:
# adding input for empty audio
input_args.append("-f lavfi -i anullsrc")
# define audio empty concat variables
audio_input = "[1:a]"
audio_output = ":a=1"
v_inp_idx = 1
# adding input for video black frame
input_args.append((
"-f lavfi -i \"color=c=black:"
"s={resolution_width}x{resolution_height}:r={fps}\""
).format(**locals()))
if (start_sec < 0):
# recalculate input video timing
empty_start_dur = abs(start_sec)
start_sec = 0
duration_sec = float(frame_end - (
frame_start + (empty_start_dur * fps)) + 1) / fps
# define starting empty video concat variables
video_empty_start = (
"[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];" # noqa
).format(**locals())
video_layer_start = "[gv0]"
if audio_check_output:
# define starting empty audio concat variables
audio_empty_start = (
"[0]atrim=duration={empty_start_dur}[ga0];"
).format(**locals())
audio_layer_start = "[ga0]"
# alter concat number of clips
concat_n += 1
# check if not missing frames at the end
if (media_duration < frame_end):
# recalculate timing
empty_end_dur = float(
frame_end - media_duration + 1) / fps
duration_sec = float(
media_duration - frame_start) / fps
# define ending empty video concat variables
video_empty_end = (
"[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];"
).format(**locals())
video_layer_end = "[gv1]"
if audio_check_output:
# define ending empty audio concat variables
audio_empty_end = (
"[0]atrim=duration={empty_end_dur}[ga1];"
).format(**locals())
audio_layer_end = "[ga0]"
# alter concat number of clips
concat_n += 1
# concatting black frame togather
output_args.append((
"-filter_complex \""
"{audio_empty_start}"
"{video_empty_start}"
"{audio_empty_end}"
"{video_empty_end}"
"{video_layer_start}{audio_layer_start}[1:v]{audio_input}" # noqa
"{video_layer_end}{audio_layer_end}"
"concat=n={concat_n}:v=1{audio_output}\""
).format(**locals()))
# append ffmpeg input video clip
input_args.append("-ss {:0.2f}".format(start_sec))
input_args.append("-t {:0.2f}".format(duration_sec))
input_args.append("-i \"{}\"".format(full_input_path))
# add copy audio video codec if only shortening clip
if ("_cut-bigger" in tags) and (not empty_add):
output_args.append("-c:v copy")
# make sure it is having no frame to frame comprassion
output_args.append("-intra")
# output filename
output_args.append("-y \"{}\"".format(full_output_path))
mov_args = [
"\"{}\"".format(ffmpeg_path),
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
repre_new = {
"files": new_files,
"stagingDir": full_output_dir,
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartFtrack": frame_start,
"frameEndFtrack": frame_end,
"step": 1,
"fps": fps,
"name": "cut_up_preview",
"tags": ["review"] + self.tags_addition,
"ext": ext,
"anatomy_template": "publish"
}
representations_new.append(repre_new)
for repre in representations_new:
if ("delete" in repre.get("tags", [])) and (
"cut_up_preview" not in repre["name"]):
representations_new.remove(repre)
self.log.debug(
"Representations: {}".format(representations_new))
instance.data["representations"] = representations_new
def hardlink_file(self, src, dst):
dirname = os.path.dirname(dst)
# make sure the destination folder exist
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
six.reraise(*sys.exc_info())
# create hardlined file
try:
filelink.create(src, dst, filelink.HARDLINK)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
six.reraise(*sys.exc_info())
def create_representation(self, otio_clip, to_otio_range, instance):
to_tl_start, to_tl_end = pype.lib.otio_range_to_frame_range(
to_otio_range)
tl_start, tl_end = pype.lib.otio_range_to_frame_range(
otio_clip.range_in_parent())
source_start, source_end = pype.lib.otio_range_to_frame_range(
otio_clip.source_range)
media_reference = otio_clip.media_reference
metadata = media_reference.metadata
mr_start, mr_end = pype.lib.otio_range_to_frame_range(
media_reference.available_range)
path = media_reference.target_url
reference_frame_start = (mr_start + source_start) + (
to_tl_start - tl_start)
reference_frame_end = (mr_start + source_end) - (
tl_end - to_tl_end)
base_name = os.path.basename(path)
staging_dir = os.path.dirname(path)
ext = os.path.splitext(base_name)[1][1:]
if metadata.get("isSequence"):
files = list()
padding = metadata["padding"]
base_name = pype.lib.convert_to_padded_path(base_name, padding)
for index in range(
reference_frame_start, (reference_frame_end + 1)):
file_name = base_name % index
path_test = os.path.join(staging_dir, file_name)
if os.path.exists(path_test):
files.append(file_name)
self.log.debug(files)
else:
files = base_name
representation = {
"ext": ext,
"name": ext,
"files": files,
"frameStart": reference_frame_start,
"frameEnd": reference_frame_end,
"stagingDir": staging_dir
}
self.log.debug(representation)

View file

@ -8,7 +8,7 @@ from pprint import pformat
class CollectInstances(pyblish.api.ContextPlugin):
"""Collect all Track items selection."""
order = pyblish.api.CollectorOrder - 0.5
order = pyblish.api.CollectorOrder - 0.59
label = "Collect Instances"
hosts = ["resolve"]
@ -64,9 +64,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
"asset": asset,
"item": track_item,
"families": families,
"publish": resolve.get_publish_attribute(track_item),
# tags
"tags": tag_data,
"publish": resolve.get_publish_attribute(track_item)
})
# otio clip data

View file

@ -13,7 +13,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
label = "Collect Workfile"
order = pyblish.api.CollectorOrder - 0.501
order = pyblish.api.CollectorOrder - 0.6
def process(self, context):