mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-26 13:52:15 +01:00
feat(global): finalized collect and extract otio review plugin
adding docstirngs and comments
This commit is contained in:
parent
9497fc621e
commit
8c94caf330
2 changed files with 238 additions and 55 deletions
|
|
@ -1,19 +1,21 @@
|
|||
"""
|
||||
Requires:
|
||||
otioTimeline -> context data attribute
|
||||
review -> instance data attribute
|
||||
masterLayer -> instance data attribute
|
||||
otioClipRange -> instance data attribute
|
||||
instance -> review
|
||||
instance -> masterLayer
|
||||
instance -> otioClip
|
||||
context -> otioTimeline
|
||||
|
||||
Provides:
|
||||
instance -> otioReviewClips
|
||||
"""
|
||||
# import os
|
||||
|
||||
import opentimelineio as otio
|
||||
import pyblish.api
|
||||
import pype.lib
|
||||
from pprint import pformat
|
||||
|
||||
|
||||
class CollectOcioReview(pyblish.api.InstancePlugin):
|
||||
"""Get matching otio from defined review layer"""
|
||||
"""Get matching otio track from defined review layer"""
|
||||
|
||||
label = "Collect OTIO review"
|
||||
order = pyblish.api.CollectorOrder - 0.57
|
||||
|
|
@ -27,8 +29,14 @@ class CollectOcioReview(pyblish.api.InstancePlugin):
|
|||
master_layer = instance.data["masterLayer"]
|
||||
otio_timeline = instance.context.data["otioTimeline"]
|
||||
otio_clip = instance.data["otioClip"]
|
||||
|
||||
# generate range in parent
|
||||
otio_tl_range = otio_clip.range_in_parent()
|
||||
|
||||
# calculate real timeline end needed for the clip
|
||||
clip_end_frame = int(
|
||||
otio_tl_range.start_time.value + otio_tl_range.duration.value)
|
||||
|
||||
# skip if master layer is False
|
||||
if not master_layer:
|
||||
return
|
||||
|
|
@ -36,10 +44,43 @@ class CollectOcioReview(pyblish.api.InstancePlugin):
|
|||
for track in otio_timeline.tracks:
|
||||
if review_track_name not in track.name:
|
||||
continue
|
||||
|
||||
# process correct track
|
||||
otio_gap = None
|
||||
|
||||
# get track parent range
|
||||
track_rip = track.range_in_parent()
|
||||
|
||||
# calculate real track end frame
|
||||
track_end_frame = int(
|
||||
track_rip.start_time.value + track_rip.duration.value)
|
||||
|
||||
# check if the end of track is not lower then clip requirement
|
||||
if clip_end_frame > track_end_frame:
|
||||
# calculate diference duration
|
||||
gap_duration = clip_end_frame - track_end_frame
|
||||
# create rational time range for gap
|
||||
otio_gap_range = otio.opentime.TimeRange(
|
||||
start_time=otio.opentime.RationalTime(
|
||||
float(0),
|
||||
track_rip.start_time.rate
|
||||
),
|
||||
duration=otio.opentime.RationalTime(
|
||||
float(gap_duration),
|
||||
track_rip.start_time.rate
|
||||
)
|
||||
)
|
||||
# crate gap
|
||||
otio_gap = otio.schema.Gap(source_range=otio_gap_range)
|
||||
|
||||
# trim available clips from devined track as reviewable source
|
||||
otio_review_clips = otio.algorithms.track_trimmed_to_range(
|
||||
track,
|
||||
otio_tl_range
|
||||
)
|
||||
# add gap at the end if track end is shorter then needed
|
||||
if otio_gap:
|
||||
otio_review_clips.append(otio_gap)
|
||||
|
||||
instance.data["otioReviewClips"] = otio_review_clips
|
||||
self.log.debug(
|
||||
|
|
|
|||
|
|
@ -1,30 +1,41 @@
|
|||
import os
|
||||
import sys
|
||||
import six
|
||||
import errno
|
||||
"""
|
||||
Requires:
|
||||
instance -> handleStart
|
||||
instance -> handleEnd
|
||||
instance -> otioClip
|
||||
instance -> otioReviewClips
|
||||
|
||||
Optional:
|
||||
instance -> workfileFrameStart
|
||||
instance -> resolutionWidth
|
||||
instance -> resolutionHeight
|
||||
|
||||
Provides:
|
||||
instance -> otioReviewClips
|
||||
"""
|
||||
|
||||
import os
|
||||
import clique
|
||||
import shutil
|
||||
import opentimelineio as otio
|
||||
from pyblish import api
|
||||
import pype
|
||||
|
||||
|
||||
class ExtractOTIOReview(pype.api.Extractor):
|
||||
""" Extract OTIO timeline into one concuted video file.
|
||||
"""
|
||||
Extract OTIO timeline into one concuted image sequence file.
|
||||
|
||||
Expecting (instance.data):
|
||||
otioClip (otio.schema.clip): clip from otio timeline
|
||||
otioReviewClips (list): list with instances of otio.schema.clip
|
||||
or otio.schema.gap
|
||||
The `otioReviewClip` is holding trimmed range of clips relative to
|
||||
the `otioClip`. Handles are added during looping by available list
|
||||
of Gap and clips in the track. Handle start (head) is added before
|
||||
first Gap or Clip and Handle end (tail) is added at the end of last
|
||||
Clip or Gap. In case there is missing source material after the
|
||||
handles addition Gap will be added. At the end all Gaps are converted
|
||||
to black frames and available material is converted to image sequence
|
||||
frames. At the end representation is created and added to the instance.
|
||||
|
||||
At the moment only image sequence output is supported
|
||||
|
||||
Process description:
|
||||
Comparing `otioClip` parent range with `otioReviewClip` parent range
|
||||
will result in frame range witch is the trimmed cut. In case more otio
|
||||
clips or otio gaps are found in otioReviewClips then ffmpeg will
|
||||
generate multiple clips and those are then concuted together to one
|
||||
video file or image sequence. Resulting files are then added to
|
||||
instance as representation ready for review family plugins.
|
||||
"""
|
||||
|
||||
# order = api.ExtractorOrder
|
||||
|
|
@ -40,23 +51,26 @@ class ExtractOTIOReview(pype.api.Extractor):
|
|||
output_ext = ".jpg"
|
||||
|
||||
def process(self, instance):
|
||||
# TODO: convert resulting image sequence to mp4
|
||||
# TODO: add oudio ouput to the mp4 if audio in review is on.
|
||||
|
||||
# get otio clip and other time info from instance clip
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
otio_review_clips = instance.data["otioReviewClips"]
|
||||
|
||||
# add plugin wide attributes
|
||||
self.representation_files = list()
|
||||
self.used_frames = list()
|
||||
self.workfile_start = int(instance.data.get(
|
||||
"workfileFrameStart", 1001))
|
||||
self.padding = len(str(self.workfile_start))
|
||||
self.used_frames.append(self.workfile_start)
|
||||
self.log.debug(f"_ self.used_frames-0: {self.used_frames}")
|
||||
self.to_width = instance.data.get(
|
||||
"resolutionWidth") or self.to_width
|
||||
self.to_height = instance.data.get(
|
||||
"resolutionHeight") or self.to_height
|
||||
|
||||
# get otio clip and other time info from instance clip
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
otio_review_clips = instance.data["otioReviewClips"]
|
||||
|
||||
# skip instance if no reviewable data available
|
||||
if (not isinstance(otio_review_clips[0], otio.schema.Clip)) \
|
||||
and (len(otio_review_clips) == 1):
|
||||
|
|
@ -68,7 +82,9 @@ class ExtractOTIOReview(pype.api.Extractor):
|
|||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
# loop available clips in otio track
|
||||
for index, r_otio_cl in enumerate(otio_review_clips):
|
||||
# get frame range values
|
||||
src_range = r_otio_cl.source_range
|
||||
start = src_range.start_time.value
|
||||
duration = src_range.duration.value
|
||||
|
|
@ -110,16 +126,22 @@ class ExtractOTIOReview(pype.api.Extractor):
|
|||
|
||||
# render segment
|
||||
self._render_seqment(
|
||||
sequence=[dir_path, collection]
|
||||
)
|
||||
sequence=[dir_path, collection])
|
||||
# generate used frames
|
||||
self._generate_used_frames(
|
||||
len(collection.indexes))
|
||||
else:
|
||||
# render video file to sequence
|
||||
self._render_seqment(
|
||||
video=[path, available_range]
|
||||
)
|
||||
video=[path, available_range])
|
||||
# generate used frames
|
||||
self._generate_used_frames(
|
||||
available_range.duration.value)
|
||||
|
||||
else:
|
||||
self._render_seqment(gap=duration)
|
||||
# generate used frames
|
||||
self._generate_used_frames(duration)
|
||||
|
||||
# creating and registering representation
|
||||
representation = self._create_representation(start, duration)
|
||||
|
|
@ -127,6 +149,17 @@ class ExtractOTIOReview(pype.api.Extractor):
|
|||
self.log.info(f"Adding representation: {representation}")
|
||||
|
||||
def _create_representation(self, start, duration):
|
||||
"""
|
||||
Creating representation data.
|
||||
|
||||
Args:
|
||||
start (int): start frame
|
||||
duration (int): duration frames
|
||||
|
||||
Returns:
|
||||
dict: representation data
|
||||
"""
|
||||
|
||||
end = start + duration
|
||||
|
||||
# create default representation data
|
||||
|
|
@ -158,6 +191,21 @@ class ExtractOTIOReview(pype.api.Extractor):
|
|||
return representation_data
|
||||
|
||||
def _trim_available_range(self, avl_range, start, duration, fps):
|
||||
"""
|
||||
Trim available media range to source range.
|
||||
|
||||
If missing media range is detected it will convert it into
|
||||
black frames gaps.
|
||||
|
||||
Args:
|
||||
avl_range (otio.time.TimeRange): media available time range
|
||||
start (int): start frame
|
||||
duration (int): duration frames
|
||||
fps (float): frame rate
|
||||
|
||||
Returns:
|
||||
otio.time.TimeRange: trimmed available range
|
||||
"""
|
||||
avl_start = int(avl_range.start_time.value)
|
||||
src_start = int(avl_start + start)
|
||||
avl_durtation = int(avl_range.duration.value - start)
|
||||
|
|
@ -169,22 +217,24 @@ class ExtractOTIOReview(pype.api.Extractor):
|
|||
|
||||
# create gap data to disk
|
||||
self._render_seqment(gap=gap_duration)
|
||||
self.log.debug(f"_ self.used_frames-1: {self.used_frames}")
|
||||
# generate used frames
|
||||
self._generate_used_frames(gap_duration)
|
||||
|
||||
# fix start and end to correct values
|
||||
start = 0
|
||||
duration -= len(gap_duration)
|
||||
|
||||
# if media duration is shorter then clip requirement
|
||||
if duration > avl_durtation:
|
||||
# TODO: this will render missing frame before not at the end of footage. need to fix this so the rendered frames will be numbered after the footage.
|
||||
# calculate gap
|
||||
gap_start = int(src_start + avl_durtation)
|
||||
gap_end = int(src_start + duration)
|
||||
gap_duration = gap_start - gap_end
|
||||
gap_duration = gap_end - gap_start
|
||||
|
||||
# create gap data to disk
|
||||
self._render_seqment(gap=gap_duration)
|
||||
self.log.debug(f"_ self.used_frames-2: {self.used_frames}")
|
||||
self._render_seqment(gap=gap_duration, end_offset=avl_durtation)
|
||||
# generate used frames
|
||||
self._generate_used_frames(gap_duration, end_offset=avl_durtation)
|
||||
|
||||
# fix duration lenght
|
||||
duration = avl_durtation
|
||||
|
|
@ -194,19 +244,37 @@ class ExtractOTIOReview(pype.api.Extractor):
|
|||
avl_range, self._range_from_frames(start, duration, fps)
|
||||
)
|
||||
|
||||
def _render_seqment(self, sequence=None, video=None, gap=None):
|
||||
def _render_seqment(self, sequence=None,
|
||||
video=None, gap=None, end_offset=None):
|
||||
"""
|
||||
Render seqment into image sequence frames.
|
||||
|
||||
Using ffmpeg to convert compatible video and image source
|
||||
to defined image sequence format.
|
||||
|
||||
Args:
|
||||
sequence (list): input dir path string, collection object in list
|
||||
video (list)[optional]: video_path string, otio_range in list
|
||||
gap (int)[optional]: gap duration
|
||||
end_offset (int)[optional]: offset gap frame start in frames
|
||||
|
||||
Returns:
|
||||
otio.time.TimeRange: trimmed available range
|
||||
"""
|
||||
# get rendering app path
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
|
||||
# create path and frame start to destination
|
||||
output_path, out_frame_start = self._add_ffmpeg_output()
|
||||
output_path, out_frame_start = self._get_ffmpeg_output()
|
||||
|
||||
if end_offset:
|
||||
out_frame_start += end_offset
|
||||
|
||||
# start command list
|
||||
command = [ffmpeg_path]
|
||||
|
||||
if sequence:
|
||||
input_dir, collection = sequence
|
||||
frame_duration = len(collection.indexes)
|
||||
in_frame_start = min(collection.indexes)
|
||||
|
||||
# converting image sequence to image sequence
|
||||
|
|
@ -235,9 +303,8 @@ class ExtractOTIOReview(pype.api.Extractor):
|
|||
])
|
||||
|
||||
elif gap:
|
||||
frame_duration = gap
|
||||
sec_duration = self._frames_to_secons(
|
||||
frame_duration, self.actual_fps)
|
||||
gap, self.actual_fps)
|
||||
|
||||
# form command for rendering gap files
|
||||
command.extend([
|
||||
|
|
@ -258,20 +325,49 @@ class ExtractOTIOReview(pype.api.Extractor):
|
|||
output = pype.api.subprocess(" ".join(command), shell=True)
|
||||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
# generate used frames
|
||||
self._generate_used_frames(frame_duration)
|
||||
def _generate_used_frames(self, duration, end_offset=None):
|
||||
"""
|
||||
Generating used frames into plugin argument `used_frames`.
|
||||
|
||||
The argument `used_frames` is used for checking next available
|
||||
frame to start with during rendering sequence segments.
|
||||
|
||||
Args:
|
||||
duration (int): duration of frames needed to be generated
|
||||
end_offset (int)[optional]: in case frames need to be offseted
|
||||
|
||||
"""
|
||||
|
||||
def _generate_used_frames(self, duration):
|
||||
padding = "{{:0{}d}}".format(self.padding)
|
||||
for _i in range(1, (int(duration) + 1)):
|
||||
if self.used_frames[-1] == self.workfile_start:
|
||||
seq_number = padding.format(self.used_frames[-1])
|
||||
self.workfile_start -= 1
|
||||
else:
|
||||
seq_number = padding.format(self.used_frames[-1] + 1)
|
||||
self.used_frames.append(int(seq_number))
|
||||
if end_offset:
|
||||
new_frames = list()
|
||||
start_frame = self.used_frames[-1]
|
||||
for index in range((end_offset + 1),
|
||||
(int(end_offset + duration) + 1)):
|
||||
seq_number = padding.format(start_frame + index)
|
||||
self.log.debug(
|
||||
f"index: `{index}` | seq_number: `{seq_number}`")
|
||||
new_frames.append(int(seq_number))
|
||||
new_frames += self.used_frames
|
||||
self.used_frames = new_frames
|
||||
else:
|
||||
for _i in range(1, (int(duration) + 1)):
|
||||
if self.used_frames[-1] == self.workfile_start:
|
||||
seq_number = padding.format(self.used_frames[-1])
|
||||
self.workfile_start -= 1
|
||||
else:
|
||||
seq_number = padding.format(self.used_frames[-1] + 1)
|
||||
self.used_frames.append(int(seq_number))
|
||||
|
||||
def _add_ffmpeg_output(self):
|
||||
def _get_ffmpeg_output(self):
|
||||
"""
|
||||
Returning ffmpeg output command arguments.
|
||||
|
||||
Returns:
|
||||
str: output_path is path for image sequence output
|
||||
int: out_frame_start is starting sequence frame
|
||||
|
||||
"""
|
||||
output_file = "{}{}{}".format(
|
||||
self.temp_file_head,
|
||||
"%0{}d".format(self.padding),
|
||||
|
|
@ -289,11 +385,34 @@ class ExtractOTIOReview(pype.api.Extractor):
|
|||
|
||||
@staticmethod
|
||||
def _frames_to_secons(frames, framerate):
|
||||
"""
|
||||
Returning secons.
|
||||
|
||||
Args:
|
||||
frames (int): frame
|
||||
framerate (flaot): frame rate
|
||||
|
||||
Returns:
|
||||
float: second value
|
||||
|
||||
"""
|
||||
rt = otio.opentime.from_frames(frames, framerate)
|
||||
return otio.opentime.to_seconds(rt)
|
||||
|
||||
@staticmethod
|
||||
def _make_sequence_collection(path, otio_range, metadata):
|
||||
"""
|
||||
Make collection from path otio range and otio metadata.
|
||||
|
||||
Args:
|
||||
path (str): path to image sequence with `%d`
|
||||
otio_range (otio.opentime.TimeRange): range to be used
|
||||
metadata (dict): data where padding value can be found
|
||||
|
||||
Returns:
|
||||
list: dir_path (str): path to sequence, collection object
|
||||
|
||||
"""
|
||||
if "%" not in path:
|
||||
return None
|
||||
file_name = os.path.basename(path)
|
||||
|
|
@ -308,6 +427,17 @@ class ExtractOTIOReview(pype.api.Extractor):
|
|||
|
||||
@staticmethod
|
||||
def _trim_media_range(media_range, source_range):
|
||||
"""
|
||||
Trim input media range with clip source range.
|
||||
|
||||
Args:
|
||||
media_range (otio.opentime.TimeRange): available range of media
|
||||
source_range (otio.opentime.TimeRange): clip required range
|
||||
|
||||
Returns:
|
||||
otio.opentime.TimeRange: trimmed media range
|
||||
|
||||
"""
|
||||
rw_media_start = otio.opentime.RationalTime(
|
||||
media_range.start_time.value + source_range.start_time.value,
|
||||
media_range.start_time.rate
|
||||
|
|
@ -321,6 +451,18 @@ class ExtractOTIOReview(pype.api.Extractor):
|
|||
|
||||
@staticmethod
|
||||
def _range_from_frames(start, duration, fps):
|
||||
"""
|
||||
Returns otio time range.
|
||||
|
||||
Args:
|
||||
start (int): frame start
|
||||
duration (int): frame duration
|
||||
fps (float): frame range
|
||||
|
||||
Returns:
|
||||
otio.opentime.TimeRange: crated range
|
||||
|
||||
"""
|
||||
return otio.opentime.TimeRange(
|
||||
otio.opentime.RationalTime(start, fps),
|
||||
otio.opentime.RationalTime(duration, fps)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue