Merge pull request #3684 from pypeclub/bugfix/OP-3654_Flame-re-timing-produces-frame-range-discrepancy-

Flame: retime is working on clip publishing
This commit is contained in:
Jakub Ježek 2022-08-23 15:51:41 +02:00 committed by GitHub
commit 481eded31d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 311 additions and 52 deletions

View file

@ -30,7 +30,8 @@ from .lib import (
maintained_temp_file_path,
get_clip_segment,
get_batch_group_from_desktop,
MediaInfoFile
MediaInfoFile,
TimeEffectMetadata
)
from .utils import (
setup,
@ -107,6 +108,7 @@ __all__ = [
"get_clip_segment",
"get_batch_group_from_desktop",
"MediaInfoFile",
"TimeEffectMetadata",
# pipeline
"install",

View file

@ -5,10 +5,11 @@ import json
import pickle
import clique
import tempfile
import traceback
import itertools
import contextlib
import xml.etree.cElementTree as cET
from copy import deepcopy
from copy import deepcopy, copy
from xml.etree import ElementTree as ET
from pprint import pformat
from .constants import (
@ -266,7 +267,7 @@ def get_current_sequence(selection):
def rescan_hooks():
import flame
try:
flame.execute_shortcut('Rescan Python Hooks')
flame.execute_shortcut("Rescan Python Hooks")
except Exception:
pass
@ -1082,21 +1083,21 @@ class MediaInfoFile(object):
xml_data (ET.Element): clip data
"""
try:
for out_track in xml_data.iter('track'):
for out_feed in out_track.iter('feed'):
for out_track in xml_data.iter("track"):
for out_feed in out_track.iter("feed"):
# start frame
out_feed_nb_ticks_obj = out_feed.find(
'startTimecode/nbTicks')
"startTimecode/nbTicks")
self.start_frame = out_feed_nb_ticks_obj.text
# fps
out_feed_fps_obj = out_feed.find(
'startTimecode/rate')
"startTimecode/rate")
self.fps = out_feed_fps_obj.text
# drop frame mode
out_feed_drop_mode_obj = out_feed.find(
'startTimecode/dropMode')
"startTimecode/dropMode")
self.drop_mode = out_feed_drop_mode_obj.text
break
except Exception as msg:
@ -1118,8 +1119,153 @@ class MediaInfoFile(object):
tree = cET.ElementTree(xml_element_data)
tree.write(
fpath, xml_declaration=True,
method='xml', encoding='UTF-8'
method="xml", encoding="UTF-8"
)
except IOError as error:
raise IOError(
"Not able to write data to file: {}".format(error))
class TimeEffectMetadata(object):
log = log
_data = {}
_retime_modes = {
0: "speed",
1: "timewarp",
2: "duration"
}
def __init__(self, segment, logger=None):
if logger:
self.log = logger
self._data = self._get_metadata(segment)
@property
def data(self):
""" Returns timewarp effect data
Returns:
dict: retime data
"""
return self._data
def _get_metadata(self, segment):
effects = segment.effects or []
for effect in effects:
if effect.type == "Timewarp":
with maintained_temp_file_path(".timewarp_node") as tmp_path:
self.log.info("Temp File: {}".format(tmp_path))
effect.save_setup(tmp_path)
return self._get_attributes_from_xml(tmp_path)
return {}
def _get_attributes_from_xml(self, tmp_path):
with open(tmp_path, "r") as tw_setup_file:
tw_setup_string = tw_setup_file.read()
tw_setup_file.close()
tw_setup_xml = ET.fromstring(tw_setup_string)
tw_setup = self._dictify(tw_setup_xml)
# pprint(tw_setup)
try:
tw_setup_state = tw_setup["Setup"]["State"][0]
mode = int(
tw_setup_state["TW_RetimerMode"][0]["_text"]
)
r_data = {
"type": self._retime_modes[mode],
"effectStart": int(
tw_setup["Setup"]["Base"][0]["Range"][0]["Start"]),
"effectEnd": int(
tw_setup["Setup"]["Base"][0]["Range"][0]["End"])
}
if mode == 0: # speed
r_data[self._retime_modes[mode]] = float(
tw_setup_state["TW_Speed"]
[0]["Channel"][0]["Value"][0]["_text"]
) / 100
elif mode == 1: # timewarp
print("timing")
r_data[self._retime_modes[mode]] = self._get_anim_keys(
tw_setup_state["TW_Timing"]
)
elif mode == 2: # duration
r_data[self._retime_modes[mode]] = {
"start": {
"source": int(
tw_setup_state["TW_DurationTiming"][0]["Channel"]
[0]["KFrames"][0]["Key"][0]["Value"][0]["_text"]
),
"timeline": int(
tw_setup_state["TW_DurationTiming"][0]["Channel"]
[0]["KFrames"][0]["Key"][0]["Frame"][0]["_text"]
)
},
"end": {
"source": int(
tw_setup_state["TW_DurationTiming"][0]["Channel"]
[0]["KFrames"][0]["Key"][1]["Value"][0]["_text"]
),
"timeline": int(
tw_setup_state["TW_DurationTiming"][0]["Channel"]
[0]["KFrames"][0]["Key"][1]["Frame"][0]["_text"]
)
}
}
except Exception:
lines = traceback.format_exception(*sys.exc_info())
self.log.error("\n".join(lines))
return
return r_data
def _get_anim_keys(self, setup_cat, index=None):
return_data = {
"extrapolation": (
setup_cat[0]["Channel"][0]["Extrap"][0]["_text"]
),
"animKeys": []
}
for key in setup_cat[0]["Channel"][0]["KFrames"][0]["Key"]:
if index and int(key["Index"]) != index:
continue
key_data = {
"source": float(key["Value"][0]["_text"]),
"timeline": float(key["Frame"][0]["_text"]),
"index": int(key["Index"]),
"curveMode": key["CurveMode"][0]["_text"],
"curveOrder": key["CurveOrder"][0]["_text"]
}
if key.get("TangentMode"):
key_data["tangentMode"] = key["TangentMode"][0]["_text"]
return_data["animKeys"].append(key_data)
return return_data
def _dictify(self, xml_, root=True):
""" Convert xml object to dictionary
Args:
xml_ (xml.etree.ElementTree.Element): xml data
root (bool, optional): is root available. Defaults to True.
Returns:
dict: dictionarized xml
"""
if root:
return {xml_.tag: self._dictify(xml_, False)}
d = copy(xml_.attrib)
if xml_.text:
d["_text"] = xml_.text
for x in xml_.findall("./*"):
if x.tag not in d:
d[x.tag] = []
d[x.tag].append(self._dictify(x, False))
return d

View file

@ -275,7 +275,7 @@ def create_otio_reference(clip_data, fps=None):
def create_otio_clip(clip_data):
from openpype.hosts.flame.api import MediaInfoFile
from openpype.hosts.flame.api import MediaInfoFile, TimeEffectMetadata
segment = clip_data["PySegment"]
@ -284,14 +284,31 @@ def create_otio_clip(clip_data):
media_timecode_start = media_info.start_frame
media_fps = media_info.fps
# Timewarp metadata
tw_data = TimeEffectMetadata(segment, logger=log).data
log.debug("__ tw_data: {}".format(tw_data))
# define first frame
first_frame = media_timecode_start or utils.get_frame_from_filename(
clip_data["fpath"]) or 0
file_first_frame = utils.get_frame_from_filename(
clip_data["fpath"])
if file_first_frame:
file_first_frame = int(file_first_frame)
first_frame = media_timecode_start or file_first_frame or 0
_clip_source_in = int(clip_data["source_in"])
_clip_source_out = int(clip_data["source_out"])
_clip_record_in = clip_data["record_in"]
_clip_record_out = clip_data["record_out"]
_clip_record_duration = int(clip_data["record_duration"])
log.debug("_ file_first_frame: {}".format(file_first_frame))
log.debug("_ first_frame: {}".format(first_frame))
log.debug("_ _clip_source_in: {}".format(_clip_source_in))
log.debug("_ _clip_source_out: {}".format(_clip_source_out))
log.debug("_ _clip_record_in: {}".format(_clip_record_in))
log.debug("_ _clip_record_out: {}".format(_clip_record_out))
# first solve if the reverse timing
speed = 1
if clip_data["source_in"] > clip_data["source_out"]:
@ -302,16 +319,28 @@ def create_otio_clip(clip_data):
source_in = _clip_source_in - int(first_frame)
source_out = _clip_source_out - int(first_frame)
log.debug("_ source_in: {}".format(source_in))
log.debug("_ source_out: {}".format(source_out))
if file_first_frame:
log.debug("_ file_source_in: {}".format(
file_first_frame + source_in))
log.debug("_ file_source_in: {}".format(
file_first_frame + source_out))
source_duration = (source_out - source_in + 1)
# secondly check if any change of speed
if source_duration != _clip_record_duration:
retime_speed = float(source_duration) / float(_clip_record_duration)
log.debug("_ retime_speed: {}".format(retime_speed))
log.debug("_ calculated speed: {}".format(retime_speed))
speed *= retime_speed
log.debug("_ source_in: {}".format(source_in))
log.debug("_ source_out: {}".format(source_out))
# get speed from metadata if available
if tw_data.get("speed"):
speed = tw_data["speed"]
log.debug("_ metadata speed: {}".format(speed))
log.debug("_ speed: {}".format(speed))
log.debug("_ source_duration: {}".format(source_duration))
log.debug("_ _clip_record_duration: {}".format(_clip_record_duration))

View file

@ -8,6 +8,9 @@ import pyblish.api
import openpype.api
from openpype.hosts.flame import api as opfapi
from openpype.hosts.flame.api import MediaInfoFile
from openpype.pipeline.editorial import (
get_media_range_with_retimes
)
import flame
@ -47,7 +50,6 @@ class ExtractSubsetResources(openpype.api.Extractor):
export_presets_mapping = {}
def process(self, instance):
if not self.keep_original_representation:
# remove previeous representation if not needed
instance.data["representations"] = []
@ -67,18 +69,60 @@ class ExtractSubsetResources(openpype.api.Extractor):
# get media source first frame
source_first_frame = instance.data["sourceFirstFrame"]
self.log.debug("_ frame_start: {}".format(frame_start))
self.log.debug("_ source_first_frame: {}".format(source_first_frame))
# get timeline in/out of segment
clip_in = instance.data["clipIn"]
clip_out = instance.data["clipOut"]
# get retimed attributres
retimed_data = self._get_retimed_attributes(instance)
# get individual keys
r_handle_start = retimed_data["handle_start"]
r_handle_end = retimed_data["handle_end"]
r_source_dur = retimed_data["source_duration"]
r_speed = retimed_data["speed"]
# get handles value - take only the max from both
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
handles = max(handle_start, handle_end)
include_handles = instance.data.get("includeHandles")
# get media source range with handles
source_start_handles = instance.data["sourceStartH"]
source_end_handles = instance.data["sourceEndH"]
# retime if needed
if r_speed != 1.0:
source_start_handles = (
instance.data["sourceStart"] - r_handle_start)
source_end_handles = (
source_start_handles
+ (r_source_dur - 1)
+ r_handle_start
+ r_handle_end
)
# get frame range with handles for representation range
frame_start_handle = frame_start - handle_start
repre_frame_start = frame_start_handle
if include_handles:
if r_speed == 1.0:
frame_start_handle = frame_start
else:
frame_start_handle = (
frame_start - handle_start) + r_handle_start
self.log.debug("_ frame_start_handle: {}".format(
frame_start_handle))
self.log.debug("_ repre_frame_start: {}".format(
repre_frame_start))
# calculate duration with handles
source_duration_handles = (
source_end_handles - source_start_handles) + 1
# create staging dir path
staging_dir = self.staging_dir(instance)
@ -93,6 +137,28 @@ class ExtractSubsetResources(openpype.api.Extractor):
}
export_presets.update(self.export_presets_mapping)
if not instance.data.get("versionData"):
instance.data["versionData"] = {}
# set versiondata if any retime
version_data = retimed_data.get("version_data")
self.log.debug("_ version_data: {}".format(version_data))
if version_data:
instance.data["versionData"].update(version_data)
if r_speed != 1.0:
instance.data["versionData"].update({
"frameStart": frame_start_handle,
"frameEnd": (
(frame_start_handle + source_duration_handles - 1)
- (r_handle_start + r_handle_end)
)
})
self.log.debug("_ i_version_data: {}".format(
instance.data["versionData"]
))
# loop all preset names and
for unique_name, preset_config in export_presets.items():
modify_xml_data = {}
@ -115,20 +181,10 @@ class ExtractSubsetResources(openpype.api.Extractor):
)
)
# get frame range with handles for representation range
frame_start_handle = frame_start - handle_start
# calculate duration with handles
source_duration_handles = (
source_end_handles - source_start_handles)
# define in/out marks
in_mark = (source_start_handles - source_first_frame) + 1
out_mark = in_mark + source_duration_handles
exporting_clip = None
name_patern_xml = "<name>_{}.".format(
unique_name)
if export_type == "Sequence Publish":
# change export clip to sequence
exporting_clip = flame.duplicate(sequence_clip)
@ -142,19 +198,25 @@ class ExtractSubsetResources(openpype.api.Extractor):
"<segment name>_<shot name>_{}.").format(
unique_name)
# change in/out marks to timeline in/out
# only for h264 with baked retime
in_mark = clip_in
out_mark = clip_out
out_mark = clip_out + 1
modify_xml_data.update({
"exportHandles": True,
"nbHandles": handles
})
else:
in_mark = (source_start_handles - source_first_frame) + 1
out_mark = in_mark + source_duration_handles
exporting_clip = self.import_clip(clip_path)
exporting_clip.name.set_value("{}_{}".format(
asset_name, segment_name))
# add xml tags modifications
modify_xml_data.update({
"exportHandles": True,
"nbHandles": handles,
"startFrame": frame_start,
# enum position low start from 0
"frameIndex": 0,
"startFrame": repre_frame_start,
"namePattern": name_patern_xml
})
@ -162,6 +224,9 @@ class ExtractSubsetResources(openpype.api.Extractor):
# add any xml overrides collected form segment.comment
modify_xml_data.update(instance.data["xml_overrides"])
self.log.debug("_ in_mark: {}".format(in_mark))
self.log.debug("_ out_mark: {}".format(out_mark))
export_kwargs = {}
# validate xml preset file is filled
if preset_file == "":
@ -196,9 +261,8 @@ class ExtractSubsetResources(openpype.api.Extractor):
"namePattern": "__thumbnail"
})
thumb_frame_number = int(in_mark + (
source_duration_handles / 2))
(out_mark - in_mark + 1) / 2))
self.log.debug("__ in_mark: {}".format(in_mark))
self.log.debug("__ thumb_frame_number: {}".format(
thumb_frame_number
))
@ -210,9 +274,6 @@ class ExtractSubsetResources(openpype.api.Extractor):
"out_mark": out_mark
})
self.log.debug("__ modify_xml_data: {}".format(
pformat(modify_xml_data)
))
preset_path = opfapi.modify_preset_file(
preset_orig_xml_path, staging_dir, modify_xml_data)
@ -281,9 +342,9 @@ class ExtractSubsetResources(openpype.api.Extractor):
# add frame range
if preset_config["representation_add_range"]:
representation_data.update({
"frameStart": frame_start_handle,
"frameStart": repre_frame_start,
"frameEnd": (
frame_start_handle + source_duration_handles),
repre_frame_start + source_duration_handles) - 1,
"fps": instance.data["fps"]
})
@ -300,8 +361,32 @@ class ExtractSubsetResources(openpype.api.Extractor):
# at the end remove the duplicated clip
flame.delete(exporting_clip)
self.log.debug("All representations: {}".format(
pformat(instance.data["representations"])))
def _get_retimed_attributes(self, instance):
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
# get basic variables
otio_clip = instance.data["otioClip"]
# get available range trimmed with processed retimes
retimed_attributes = get_media_range_with_retimes(
otio_clip, handle_start, handle_end)
self.log.debug(
">> retimed_attributes: {}".format(retimed_attributes))
r_media_in = int(retimed_attributes["mediaIn"])
r_media_out = int(retimed_attributes["mediaOut"])
version_data = retimed_attributes.get("versionData")
return {
"version_data": version_data,
"handle_start": int(retimed_attributes["handleStart"]),
"handle_end": int(retimed_attributes["handleEnd"]),
"source_duration": (
(r_media_out - r_media_in) + 1
),
"speed": float(retimed_attributes["speed"])
}
def _should_skip(self, preset_config, clip_path, unique_name):
# get activating attributes
@ -313,8 +398,6 @@ class ExtractSubsetResources(openpype.api.Extractor):
unique_name, activated_preset, filter_path_regex
)
)
self.log.debug(
"__ clip_path: `{}`".format(clip_path))
# skip if not activated presete
if not activated_preset:

View file

@ -263,16 +263,17 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
"retime": True,
"speed": time_scalar,
"timewarps": time_warp_nodes,
"handleStart": round(handle_start),
"handleEnd": round(handle_end)
"handleStart": int(round(handle_start)),
"handleEnd": int(round(handle_end))
}
}
returning_dict = {
"mediaIn": media_in_trimmed,
"mediaOut": media_out_trimmed,
"handleStart": round(handle_start),
"handleEnd": round(handle_end)
"handleStart": int(round(handle_start)),
"handleEnd": int(round(handle_end)),
"speed": time_scalar
}
# add version data only if retime

View file

@ -121,10 +121,8 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
otio.schema.ImageSequenceReference
):
is_sequence = True
else:
# for OpenTimelineIO 0.12 and older
if metadata.get("padding"):
is_sequence = True
elif metadata.get("padding"):
is_sequence = True
self.log.info(
"frame_start-frame_end: {}-{}".format(frame_start, frame_end))