mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge pull request #1545 from pypeclub/feature/1377-hiero-publish-with-retiming
This commit is contained in:
commit
f0d03ebb55
25 changed files with 517 additions and 1426 deletions
|
|
@ -62,6 +62,76 @@ def _get_metadata(item):
|
|||
return {}
|
||||
|
||||
|
||||
def create_time_effects(otio_clip, track_item):
|
||||
# get all subtrack items
|
||||
subTrackItems = flatten(track_item.parent().subTrackItems())
|
||||
speed = track_item.playbackSpeed()
|
||||
|
||||
otio_effect = None
|
||||
# retime on track item
|
||||
if speed != 1.:
|
||||
# make effect
|
||||
otio_effect = otio.schema.LinearTimeWarp()
|
||||
otio_effect.name = "Speed"
|
||||
otio_effect.time_scalar = speed
|
||||
otio_effect.metadata = {}
|
||||
|
||||
# freeze frame effect
|
||||
if speed == 0.:
|
||||
otio_effect = otio.schema.FreezeFrame()
|
||||
otio_effect.name = "FreezeFrame"
|
||||
otio_effect.metadata = {}
|
||||
|
||||
if otio_effect:
|
||||
# add otio effect to clip effects
|
||||
otio_clip.effects.append(otio_effect)
|
||||
|
||||
# loop trought and get all Timewarps
|
||||
for effect in subTrackItems:
|
||||
if ((track_item not in effect.linkedItems())
|
||||
and (len(effect.linkedItems()) > 0)):
|
||||
continue
|
||||
# avoid all effect which are not TimeWarp and disabled
|
||||
if "TimeWarp" not in effect.name():
|
||||
continue
|
||||
|
||||
if not effect.isEnabled():
|
||||
continue
|
||||
|
||||
node = effect.node()
|
||||
name = node["name"].value()
|
||||
|
||||
# solve effect class as effect name
|
||||
_name = effect.name()
|
||||
if "_" in _name:
|
||||
effect_name = re.sub(r"(?:_)[_0-9]+", "", _name) # more numbers
|
||||
else:
|
||||
effect_name = re.sub(r"\d+", "", _name) # one number
|
||||
|
||||
metadata = {}
|
||||
# add knob to metadata
|
||||
for knob in ["lookup", "length"]:
|
||||
value = node[knob].value()
|
||||
animated = node[knob].isAnimated()
|
||||
if animated:
|
||||
value = [
|
||||
((node[knob].getValueAt(i)) - i)
|
||||
for i in range(
|
||||
track_item.timelineIn(), track_item.timelineOut() + 1)
|
||||
]
|
||||
|
||||
metadata[knob] = value
|
||||
|
||||
# make effect
|
||||
otio_effect = otio.schema.TimeEffect()
|
||||
otio_effect.name = name
|
||||
otio_effect.effect_name = effect_name
|
||||
otio_effect.metadata = metadata
|
||||
|
||||
# add otio effect to clip effects
|
||||
otio_clip.effects.append(otio_effect)
|
||||
|
||||
|
||||
def create_otio_reference(clip):
|
||||
metadata = _get_metadata(clip)
|
||||
media_source = clip.mediaSource()
|
||||
|
|
@ -197,8 +267,12 @@ def create_otio_markers(otio_item, item):
|
|||
|
||||
def create_otio_clip(track_item):
|
||||
clip = track_item.source()
|
||||
source_in = track_item.sourceIn()
|
||||
duration = track_item.sourceDuration()
|
||||
speed = track_item.playbackSpeed()
|
||||
# flip if speed is in minus
|
||||
source_in = track_item.sourceIn() if speed > 0 else track_item.sourceOut()
|
||||
|
||||
duration = int(track_item.duration())
|
||||
|
||||
fps = utils.get_rate(track_item) or self.project_fps
|
||||
name = track_item.name()
|
||||
|
||||
|
|
@ -220,6 +294,11 @@ def create_otio_clip(track_item):
|
|||
create_otio_markers(otio_clip, track_item)
|
||||
create_otio_markers(otio_clip, track_item.source())
|
||||
|
||||
# only if video
|
||||
if not clip.mediaSource().hasAudio():
|
||||
# Add effects to clips
|
||||
create_time_effects(otio_clip, track_item)
|
||||
|
||||
return otio_clip
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,121 +0,0 @@
|
|||
from pyblish import api
|
||||
import hiero
|
||||
import math
|
||||
|
||||
|
||||
class CollectCalculateRetime(api.InstancePlugin):
|
||||
"""Calculate Retiming of selected track items."""
|
||||
|
||||
order = api.CollectorOrder + 0.02
|
||||
label = "Collect Calculate Retiming"
|
||||
hosts = ["hiero"]
|
||||
families = ['retime']
|
||||
|
||||
def process(self, instance):
|
||||
margin_in = instance.data["retimeMarginIn"]
|
||||
margin_out = instance.data["retimeMarginOut"]
|
||||
self.log.debug("margin_in: '{0}', margin_out: '{1}'".format(margin_in, margin_out))
|
||||
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
track_item = instance.data["item"]
|
||||
|
||||
# define basic clip frame range variables
|
||||
timeline_in = int(track_item.timelineIn())
|
||||
timeline_out = int(track_item.timelineOut())
|
||||
source_in = int(track_item.sourceIn())
|
||||
source_out = int(track_item.sourceOut())
|
||||
speed = track_item.playbackSpeed()
|
||||
self.log.debug("_BEFORE: \n timeline_in: `{0}`,\n timeline_out: `{1}`,\
|
||||
\n source_in: `{2}`,\n source_out: `{3}`,\n speed: `{4}`,\n handle_start: `{5}`,\n handle_end: `{6}`".format(
|
||||
timeline_in,
|
||||
timeline_out,
|
||||
source_in,
|
||||
source_out,
|
||||
speed,
|
||||
handle_start,
|
||||
handle_end
|
||||
))
|
||||
|
||||
# loop withing subtrack items
|
||||
source_in_change = 0
|
||||
source_out_change = 0
|
||||
for s_track_item in track_item.linkedItems():
|
||||
if isinstance(s_track_item, hiero.core.EffectTrackItem) \
|
||||
and "TimeWarp" in s_track_item.node().Class():
|
||||
|
||||
# adding timewarp attribute to instance
|
||||
if not instance.data.get("timeWarpNodes", None):
|
||||
instance.data["timeWarpNodes"] = list()
|
||||
|
||||
# ignore item if not enabled
|
||||
if s_track_item.isEnabled():
|
||||
node = s_track_item.node()
|
||||
name = node["name"].value()
|
||||
look_up = node["lookup"].value()
|
||||
animated = node["lookup"].isAnimated()
|
||||
if animated:
|
||||
look_up = [((node["lookup"].getValueAt(i)) - i)
|
||||
for i in range((timeline_in - handle_start), (timeline_out + handle_end) + 1)
|
||||
]
|
||||
# calculate differnce
|
||||
diff_in = (node["lookup"].getValueAt(
|
||||
timeline_in)) - timeline_in
|
||||
diff_out = (node["lookup"].getValueAt(
|
||||
timeline_out)) - timeline_out
|
||||
|
||||
# calculate source
|
||||
source_in_change += diff_in
|
||||
source_out_change += diff_out
|
||||
|
||||
# calculate speed
|
||||
speed_in = (node["lookup"].getValueAt(timeline_in) / (
|
||||
float(timeline_in) * .01)) * .01
|
||||
speed_out = (node["lookup"].getValueAt(timeline_out) / (
|
||||
float(timeline_out) * .01)) * .01
|
||||
|
||||
# calculate handles
|
||||
handle_start = int(
|
||||
math.ceil(
|
||||
(handle_start * speed_in * 1000) / 1000.0)
|
||||
)
|
||||
|
||||
handle_end = int(
|
||||
math.ceil(
|
||||
(handle_end * speed_out * 1000) / 1000.0)
|
||||
)
|
||||
self.log.debug(
|
||||
("diff_in, diff_out", diff_in, diff_out))
|
||||
self.log.debug(
|
||||
("source_in_change, source_out_change", source_in_change, source_out_change))
|
||||
|
||||
instance.data["timeWarpNodes"].append({"Class": "TimeWarp",
|
||||
"name": name,
|
||||
"lookup": look_up})
|
||||
|
||||
self.log.debug((source_in_change, source_out_change))
|
||||
# recalculate handles by the speed
|
||||
handle_start *= speed
|
||||
handle_end *= speed
|
||||
self.log.debug("speed: handle_start: '{0}', handle_end: '{1}'".format(handle_start, handle_end))
|
||||
|
||||
source_in += int(source_in_change)
|
||||
source_out += int(source_out_change * speed)
|
||||
handle_start += (margin_in)
|
||||
handle_end += (margin_out)
|
||||
self.log.debug("margin: handle_start: '{0}', handle_end: '{1}'".format(handle_start, handle_end))
|
||||
|
||||
# add all data to Instance
|
||||
instance.data["sourceIn"] = source_in
|
||||
instance.data["sourceOut"] = source_out
|
||||
instance.data["sourceInH"] = int(source_in - math.ceil(
|
||||
(handle_start * 1000) / 1000.0))
|
||||
instance.data["sourceOutH"] = int(source_out + math.ceil(
|
||||
(handle_end * 1000) / 1000.0))
|
||||
instance.data["speed"] = speed
|
||||
|
||||
self.log.debug("timeWarpNodes: {}".format(instance.data["timeWarpNodes"]))
|
||||
self.log.debug("sourceIn: {}".format(instance.data["sourceIn"]))
|
||||
self.log.debug("sourceOut: {}".format(instance.data["sourceOut"]))
|
||||
self.log.debug("speed: {}".format(instance.data["speed"]))
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectFramerate(api.ContextPlugin):
|
||||
"""Collect framerate from selected sequence."""
|
||||
|
||||
order = api.CollectorOrder + 0.001
|
||||
label = "Collect Framerate"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, context):
|
||||
sequence = context.data["activeSequence"]
|
||||
context.data["fps"] = self.get_rate(sequence)
|
||||
self.log.info("Framerate is collected: {}".format(context.data["fps"]))
|
||||
|
||||
def get_rate(self, sequence):
|
||||
num, den = sequence.framerate().toRational()
|
||||
rate = float(num) / float(den)
|
||||
|
||||
if rate.is_integer():
|
||||
return rate
|
||||
|
||||
return round(rate, 3)
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectClipMetadata(api.InstancePlugin):
|
||||
"""Collect Metadata from selected track items."""
|
||||
|
||||
order = api.CollectorOrder + 0.01
|
||||
label = "Collect Metadata"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, instance):
|
||||
item = instance.data["item"]
|
||||
ti_metadata = self.metadata_to_string(dict(item.metadata()))
|
||||
ms_metadata = self.metadata_to_string(
|
||||
dict(item.source().mediaSource().metadata()))
|
||||
|
||||
instance.data["clipMetadata"] = ti_metadata
|
||||
instance.data["mediaSourceMetadata"] = ms_metadata
|
||||
|
||||
self.log.info(instance.data["clipMetadata"])
|
||||
self.log.info(instance.data["mediaSourceMetadata"])
|
||||
return
|
||||
|
||||
def metadata_to_string(self, metadata):
|
||||
data = dict()
|
||||
for k, v in metadata.items():
|
||||
if v not in ["-", ""]:
|
||||
data[str(k)] = v
|
||||
|
||||
return data
|
||||
|
|
@ -1,90 +0,0 @@
|
|||
import pyblish.api
|
||||
import opentimelineio.opentime as otio_ot
|
||||
|
||||
|
||||
class CollectClipTimecodes(pyblish.api.InstancePlugin):
|
||||
"""Collect time with OpenTimelineIO:
|
||||
source_h(In,Out)[timecode, sec]
|
||||
timeline(In,Out)[timecode, sec]
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.101
|
||||
label = "Collect Timecodes"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
data = dict()
|
||||
self.log.debug("__ instance.data: {}".format(instance.data))
|
||||
# Timeline data.
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
source_in_h = instance.data("sourceInH",
|
||||
instance.data("sourceIn") - handle_start)
|
||||
source_out_h = instance.data("sourceOutH",
|
||||
instance.data("sourceOut") + handle_end)
|
||||
|
||||
timeline_in = instance.data["clipIn"]
|
||||
timeline_out = instance.data["clipOut"]
|
||||
|
||||
# set frame start with tag or take it from timeline
|
||||
frame_start = instance.data.get("startingFrame")
|
||||
|
||||
if not frame_start:
|
||||
frame_start = timeline_in
|
||||
|
||||
source = instance.data.get("source")
|
||||
|
||||
otio_data = dict()
|
||||
self.log.debug("__ source: `{}`".format(source))
|
||||
|
||||
rate_fps = instance.context.data["fps"]
|
||||
|
||||
otio_in_h_ratio = otio_ot.RationalTime(
|
||||
value=(source.timecodeStart() + (
|
||||
source_in_h + (source_out_h - source_in_h))),
|
||||
rate=rate_fps)
|
||||
|
||||
otio_out_h_ratio = otio_ot.RationalTime(
|
||||
value=(source.timecodeStart() + source_in_h),
|
||||
rate=rate_fps)
|
||||
|
||||
otio_timeline_in_ratio = otio_ot.RationalTime(
|
||||
value=int(
|
||||
instance.data.get("timelineTimecodeStart", 0)) + timeline_in,
|
||||
rate=rate_fps)
|
||||
|
||||
otio_timeline_out_ratio = otio_ot.RationalTime(
|
||||
value=int(
|
||||
instance.data.get("timelineTimecodeStart", 0)) + timeline_out,
|
||||
rate=rate_fps)
|
||||
|
||||
otio_data.update({
|
||||
|
||||
"otioClipInHTimecode": otio_ot.to_timecode(otio_in_h_ratio),
|
||||
|
||||
"otioClipOutHTimecode": otio_ot.to_timecode(otio_out_h_ratio),
|
||||
|
||||
"otioClipInHSec": otio_ot.to_seconds(otio_in_h_ratio),
|
||||
|
||||
"otioClipOutHSec": otio_ot.to_seconds(otio_out_h_ratio),
|
||||
|
||||
"otioTimelineInTimecode": otio_ot.to_timecode(
|
||||
otio_timeline_in_ratio),
|
||||
|
||||
"otioTimelineOutTimecode": otio_ot.to_timecode(
|
||||
otio_timeline_out_ratio),
|
||||
|
||||
"otioTimelineInSec": otio_ot.to_seconds(otio_timeline_in_ratio),
|
||||
|
||||
"otioTimelineOutSec": otio_ot.to_seconds(otio_timeline_out_ratio)
|
||||
})
|
||||
|
||||
data.update({
|
||||
"otioData": otio_data,
|
||||
"sourceTimecodeIn": otio_ot.to_timecode(otio_in_h_ratio),
|
||||
"sourceTimecodeOut": otio_ot.to_timecode(otio_out_h_ratio)
|
||||
})
|
||||
instance.data.update(data)
|
||||
self.log.debug("data: {}".format(instance.data))
|
||||
|
|
@ -6,7 +6,7 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
|
|||
"""Collect soft effects instances."""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.579
|
||||
label = "Pre-collect Clip Effects Instances"
|
||||
label = "Precollect Clip Effects Instances"
|
||||
families = ["clip"]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
@ -40,6 +40,12 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
|
|||
if review and review_track_index == _track_index:
|
||||
continue
|
||||
for sitem in sub_track_items:
|
||||
effect = None
|
||||
# make sure this subtrack item is relative of track item
|
||||
if ((track_item not in sitem.linkedItems())
|
||||
and (len(sitem.linkedItems()) > 0)):
|
||||
continue
|
||||
|
||||
if not (track_index <= _track_index):
|
||||
continue
|
||||
|
||||
|
|
@ -162,7 +168,7 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
|
|||
# grab animation including handles
|
||||
knob_anim = [node[knob].getValueAt(i)
|
||||
for i in range(
|
||||
self.clip_in_h, self.clip_in_h + 1)]
|
||||
self.clip_in_h, self.clip_out_h + 1)]
|
||||
|
||||
node_serialized[knob] = knob_anim
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -133,6 +133,13 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
# create audio subset instance
|
||||
self.create_audio_instance(context, **data)
|
||||
|
||||
# add colorspace data
|
||||
instance.data.update({
|
||||
"versionData": {
|
||||
"colorspace": track_item.sourceMediaColourTransform(),
|
||||
}
|
||||
})
|
||||
|
||||
# add audioReview attribute to plate instance data
|
||||
# if reviewTrack is on
|
||||
if tag_data.get("reviewTrack") is not None:
|
||||
|
|
@ -304,9 +311,10 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
@staticmethod
|
||||
def create_otio_time_range_from_timeline_item_data(track_item):
|
||||
speed = track_item.playbackSpeed()
|
||||
timeline = phiero.get_current_sequence()
|
||||
frame_start = int(track_item.timelineIn())
|
||||
frame_duration = int(track_item.sourceDuration())
|
||||
frame_duration = int(track_item.sourceDuration() / speed)
|
||||
fps = timeline.framerate().toFloat()
|
||||
|
||||
return hiero_export.create_otio_time_range(
|
||||
|
|
@ -376,6 +384,8 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
subtracks = []
|
||||
subTrackItems = flatten(clip.parent().subTrackItems())
|
||||
for item in subTrackItems:
|
||||
if "TimeWarp" in item.name():
|
||||
continue
|
||||
# avoid all anotation
|
||||
if isinstance(item, hiero.core.Annotation):
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -1,70 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFrameRanges(pyblish.api.InstancePlugin):
|
||||
""" Collect all framranges.
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
label = "Collect Frame Ranges"
|
||||
hosts = ["hiero"]
|
||||
families = ["clip", "effect"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
data = dict()
|
||||
track_item = instance.data["item"]
|
||||
|
||||
# handles
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
# source frame ranges
|
||||
source_in = int(track_item.sourceIn())
|
||||
source_out = int(track_item.sourceOut())
|
||||
source_in_h = int(source_in - handle_start)
|
||||
source_out_h = int(source_out + handle_end)
|
||||
|
||||
# timeline frame ranges
|
||||
clip_in = int(track_item.timelineIn())
|
||||
clip_out = int(track_item.timelineOut())
|
||||
clip_in_h = clip_in - handle_start
|
||||
clip_out_h = clip_out + handle_end
|
||||
|
||||
# durations
|
||||
clip_duration = (clip_out - clip_in) + 1
|
||||
clip_duration_h = clip_duration + (handle_start + handle_end)
|
||||
|
||||
# set frame start with tag or take it from timeline `startingFrame`
|
||||
frame_start = instance.data.get("workfileFrameStart")
|
||||
|
||||
if not frame_start:
|
||||
frame_start = clip_in
|
||||
|
||||
frame_end = frame_start + (clip_out - clip_in)
|
||||
|
||||
data.update({
|
||||
# media source frame range
|
||||
"sourceIn": source_in,
|
||||
"sourceOut": source_out,
|
||||
"sourceInH": source_in_h,
|
||||
"sourceOutH": source_out_h,
|
||||
|
||||
# timeline frame range
|
||||
"clipIn": clip_in,
|
||||
"clipOut": clip_out,
|
||||
"clipInH": clip_in_h,
|
||||
"clipOutH": clip_out_h,
|
||||
|
||||
# workfile frame range
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
|
||||
"clipDuration": clip_duration,
|
||||
"clipDurationH": clip_duration_h,
|
||||
|
||||
"fps": instance.context.data["fps"]
|
||||
})
|
||||
self.log.info("Frame range data for instance `{}` are: {}".format(
|
||||
instance, data))
|
||||
instance.data.update(data)
|
||||
|
|
@ -1,116 +0,0 @@
|
|||
import pyblish.api
|
||||
import avalon.api as avalon
|
||||
|
||||
|
||||
class CollectHierarchy(pyblish.api.ContextPlugin):
|
||||
"""Collecting hierarchy from `parents`.
|
||||
|
||||
present in `clip` family instances coming from the request json data file
|
||||
|
||||
It will add `hierarchical_context` into each instance for integrate
|
||||
plugins to be able to create needed parents for the context if they
|
||||
don't exist yet
|
||||
"""
|
||||
|
||||
label = "Collect Hierarchy"
|
||||
order = pyblish.api.CollectorOrder
|
||||
families = ["clip"]
|
||||
|
||||
def process(self, context):
|
||||
temp_context = {}
|
||||
project_name = avalon.Session["AVALON_PROJECT"]
|
||||
final_context = {}
|
||||
final_context[project_name] = {}
|
||||
final_context[project_name]['entity_type'] = 'Project'
|
||||
|
||||
for instance in context:
|
||||
self.log.info("Processing instance: `{}` ...".format(instance))
|
||||
|
||||
# shot data dict
|
||||
shot_data = {}
|
||||
families = instance.data.get("families")
|
||||
|
||||
# filter out all unepropriate instances
|
||||
if not instance.data["publish"]:
|
||||
continue
|
||||
if not families:
|
||||
continue
|
||||
# exclude other families then self.families with intersection
|
||||
if not set(self.families).intersection(families):
|
||||
continue
|
||||
|
||||
# exclude if not heroTrack True
|
||||
if not instance.data.get("heroTrack"):
|
||||
continue
|
||||
|
||||
# update families to include `shot` for hierarchy integration
|
||||
instance.data["families"] = families + ["shot"]
|
||||
|
||||
# get asset build data if any available
|
||||
shot_data["inputs"] = [
|
||||
x["_id"] for x in instance.data.get("assetbuilds", [])
|
||||
]
|
||||
|
||||
# suppose that all instances are Shots
|
||||
shot_data['entity_type'] = 'Shot'
|
||||
shot_data['tasks'] = instance.data.get("tasks") or []
|
||||
shot_data["comments"] = instance.data.get("comments", [])
|
||||
|
||||
shot_data['custom_attributes'] = {
|
||||
"handleStart": instance.data["handleStart"],
|
||||
"handleEnd": instance.data["handleEnd"],
|
||||
"frameStart": instance.data["frameStart"],
|
||||
"frameEnd": instance.data["frameEnd"],
|
||||
"clipIn": instance.data["clipIn"],
|
||||
"clipOut": instance.data["clipOut"],
|
||||
'fps': instance.context.data["fps"],
|
||||
"resolutionWidth": instance.data["resolutionWidth"],
|
||||
"resolutionHeight": instance.data["resolutionHeight"],
|
||||
"pixelAspect": instance.data["pixelAspect"]
|
||||
}
|
||||
|
||||
actual = {instance.data["asset"]: shot_data}
|
||||
|
||||
for parent in reversed(instance.data["parents"]):
|
||||
next_dict = {}
|
||||
parent_name = parent["entity_name"]
|
||||
next_dict[parent_name] = {}
|
||||
next_dict[parent_name]["entity_type"] = parent[
|
||||
"entity_type"].capitalize()
|
||||
next_dict[parent_name]["childs"] = actual
|
||||
actual = next_dict
|
||||
|
||||
temp_context = self._update_dict(temp_context, actual)
|
||||
|
||||
# skip if nothing for hierarchy available
|
||||
if not temp_context:
|
||||
return
|
||||
|
||||
final_context[project_name]['childs'] = temp_context
|
||||
|
||||
# adding hierarchy context to context
|
||||
context.data["hierarchyContext"] = final_context
|
||||
self.log.debug("context.data[hierarchyContext] is: {}".format(
|
||||
context.data["hierarchyContext"]))
|
||||
|
||||
def _update_dict(self, parent_dict, child_dict):
|
||||
"""
|
||||
Nesting each children into its parent.
|
||||
|
||||
Args:
|
||||
parent_dict (dict): parent dict wich should be nested with children
|
||||
child_dict (dict): children dict which should be injested
|
||||
"""
|
||||
|
||||
for key in parent_dict:
|
||||
if key in child_dict and isinstance(parent_dict[key], dict):
|
||||
child_dict[key] = self._update_dict(
|
||||
parent_dict[key], child_dict[key]
|
||||
)
|
||||
else:
|
||||
if parent_dict.get(key) and child_dict.get(key):
|
||||
continue
|
||||
else:
|
||||
child_dict[key] = parent_dict[key]
|
||||
|
||||
return child_dict
|
||||
|
|
@ -1,169 +0,0 @@
|
|||
from pyblish import api
|
||||
import os
|
||||
import re
|
||||
import clique
|
||||
|
||||
|
||||
class CollectPlates(api.InstancePlugin):
|
||||
"""Collect plate representations.
|
||||
"""
|
||||
|
||||
# Run just before CollectSubsets
|
||||
order = api.CollectorOrder + 0.1020
|
||||
label = "Collect Plates"
|
||||
hosts = ["hiero"]
|
||||
families = ["plate"]
|
||||
|
||||
def process(self, instance):
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
self.main_clip = instance.data["item"]
|
||||
# get plate source attributes
|
||||
source_media = instance.data["sourceMedia"]
|
||||
source_path = instance.data["sourcePath"]
|
||||
source_first = instance.data["sourceFirst"]
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = instance.data["frameEnd"]
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
source_in = instance.data["sourceIn"]
|
||||
source_out = instance.data["sourceOut"]
|
||||
source_in_h = instance.data["sourceInH"]
|
||||
source_out_h = instance.data["sourceOutH"]
|
||||
|
||||
# define if review media is sequence
|
||||
is_sequence = bool(not source_media.singleFile())
|
||||
self.log.debug("is_sequence: {}".format(is_sequence))
|
||||
|
||||
file_dir = os.path.dirname(source_path)
|
||||
file = os.path.basename(source_path)
|
||||
ext = os.path.splitext(file)[-1]
|
||||
|
||||
# detect if sequence
|
||||
if not is_sequence:
|
||||
# is video file
|
||||
files = file
|
||||
else:
|
||||
files = list()
|
||||
spliter, padding = self.detect_sequence(file)
|
||||
self.log.debug("_ spliter, padding: {}, {}".format(
|
||||
spliter, padding))
|
||||
base_name = file.split(spliter)[0]
|
||||
|
||||
# define collection and calculate frame range
|
||||
collection = clique.Collection(
|
||||
base_name,
|
||||
ext,
|
||||
padding,
|
||||
set(range(
|
||||
int(source_first + source_in_h),
|
||||
int(source_first + source_out_h) + 1
|
||||
))
|
||||
)
|
||||
self.log.debug("_ collection: {}".format(collection))
|
||||
|
||||
real_files = os.listdir(file_dir)
|
||||
self.log.debug("_ real_files: {}".format(real_files))
|
||||
|
||||
# collect frames to repre files list
|
||||
self.handle_start_exclude = list()
|
||||
self.handle_end_exclude = list()
|
||||
for findex, item in enumerate(collection):
|
||||
if item not in real_files:
|
||||
self.log.debug("_ item: {}".format(item))
|
||||
test_index = findex + int(source_first + source_in_h)
|
||||
test_start = int(source_first + source_in)
|
||||
test_end = int(source_first + source_out)
|
||||
if (test_index < test_start):
|
||||
self.handle_start_exclude.append(test_index)
|
||||
elif (test_index > test_end):
|
||||
self.handle_end_exclude.append(test_index)
|
||||
continue
|
||||
files.append(item)
|
||||
|
||||
# change label
|
||||
instance.data["label"] = "{0} - ({1})".format(
|
||||
instance.data["label"], ext
|
||||
)
|
||||
|
||||
self.log.debug("Instance review: {}".format(instance.data["name"]))
|
||||
|
||||
# adding representation for review mov
|
||||
representation = {
|
||||
"files": files,
|
||||
"stagingDir": file_dir,
|
||||
"frameStart": frame_start - handle_start,
|
||||
"frameEnd": frame_end + handle_end,
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:]
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
self.version_data(instance)
|
||||
|
||||
self.log.debug(
|
||||
"Added representations: {}".format(
|
||||
instance.data["representations"]))
|
||||
|
||||
self.log.debug(
|
||||
"instance.data: {}".format(instance.data))
|
||||
|
||||
def version_data(self, instance):
|
||||
transfer_data = [
|
||||
"handleStart", "handleEnd", "sourceIn", "sourceOut",
|
||||
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
|
||||
"clipIn", "clipOut", "clipInH", "clipOutH", "asset",
|
||||
"track"
|
||||
]
|
||||
|
||||
version_data = dict()
|
||||
# pass data to version
|
||||
version_data.update({k: instance.data[k] for k in transfer_data})
|
||||
|
||||
if 'version' in instance.data:
|
||||
version_data["version"] = instance.data["version"]
|
||||
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
if self.handle_start_exclude:
|
||||
handle_start -= len(self.handle_start_exclude)
|
||||
|
||||
if self.handle_end_exclude:
|
||||
handle_end -= len(self.handle_end_exclude)
|
||||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"colorspace": self.main_clip.sourceMediaColourTransform(),
|
||||
"families": instance.data["families"],
|
||||
"subset": instance.data["subset"],
|
||||
"fps": instance.data["fps"],
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
})
|
||||
instance.data["versionData"] = version_data
|
||||
|
||||
def detect_sequence(self, file):
|
||||
""" Get identificating pater for image sequence
|
||||
|
||||
Can find file.0001.ext, file.%02d.ext, file.####.ext
|
||||
|
||||
Return:
|
||||
string: any matching sequence patern
|
||||
int: padding of sequnce numbering
|
||||
"""
|
||||
foundall = re.findall(
|
||||
r"(#+)|(%\d+d)|(?<=[^a-zA-Z0-9])(\d+)(?=\.\w+$)", file)
|
||||
if foundall:
|
||||
found = sorted(list(set(foundall[0])))[-1]
|
||||
|
||||
if "%" in found:
|
||||
padding = int(re.findall(r"\d+", found)[-1])
|
||||
else:
|
||||
padding = len(found)
|
||||
|
||||
return found, padding
|
||||
else:
|
||||
return None, None
|
||||
|
|
@ -1,261 +0,0 @@
|
|||
from pyblish import api
|
||||
import os
|
||||
import clique
|
||||
from openpype.hosts.hiero.api import (
|
||||
is_overlapping, get_sequence_pattern_and_padding)
|
||||
|
||||
|
||||
class CollectReview(api.InstancePlugin):
|
||||
"""Collect review representation.
|
||||
"""
|
||||
|
||||
# Run just before CollectSubsets
|
||||
order = api.CollectorOrder + 0.1022
|
||||
label = "Collect Review"
|
||||
hosts = ["hiero"]
|
||||
families = ["review"]
|
||||
|
||||
def get_review_item(self, instance):
|
||||
"""
|
||||
Get review clip track item from review track name
|
||||
|
||||
Args:
|
||||
instance (obj): publishing instance
|
||||
|
||||
Returns:
|
||||
hiero.core.TrackItem: corresponding track item
|
||||
|
||||
Raises:
|
||||
Exception: description
|
||||
|
||||
"""
|
||||
review_track = instance.data.get("reviewTrack")
|
||||
video_tracks = instance.context.data["videoTracks"]
|
||||
for track in video_tracks:
|
||||
if review_track not in track.name():
|
||||
continue
|
||||
for item in track.items():
|
||||
self.log.debug(item)
|
||||
if is_overlapping(item, self.main_clip):
|
||||
self.log.debug("Winner is: {}".format(item))
|
||||
break
|
||||
|
||||
# validate the clip is fully converted with review clip
|
||||
assert is_overlapping(
|
||||
item, self.main_clip, strict=True), (
|
||||
"Review clip not cowering fully "
|
||||
"the clip `{}`").format(self.main_clip.name())
|
||||
|
||||
return item
|
||||
|
||||
def process(self, instance):
|
||||
tags = ["review", "ftrackreview"]
|
||||
|
||||
# get reviewable item from `review` instance.data attribute
|
||||
self.main_clip = instance.data.get("item")
|
||||
self.rw_clip = self.get_review_item(instance)
|
||||
|
||||
# let user know there is missing review clip and convert instance
|
||||
# back as not reviewable
|
||||
assert self.rw_clip, "Missing reviewable clip for '{}'".format(
|
||||
self.main_clip.name()
|
||||
)
|
||||
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
# get review media main info
|
||||
rw_source = self.rw_clip.source().mediaSource()
|
||||
rw_source_duration = int(rw_source.duration())
|
||||
self.rw_source_path = rw_source.firstpath()
|
||||
rw_source_file_info = rw_source.fileinfos().pop()
|
||||
|
||||
# define if review media is sequence
|
||||
is_sequence = bool(not rw_source.singleFile())
|
||||
self.log.debug("is_sequence: {}".format(is_sequence))
|
||||
|
||||
# get handles
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
# review timeline and source frame ranges
|
||||
rw_clip_in = int(self.rw_clip.timelineIn())
|
||||
rw_clip_out = int(self.rw_clip.timelineOut())
|
||||
self.rw_clip_source_in = int(self.rw_clip.sourceIn())
|
||||
self.rw_clip_source_out = int(self.rw_clip.sourceOut())
|
||||
rw_source_first = int(rw_source_file_info.startFrame())
|
||||
|
||||
# calculate delivery source_in and source_out
|
||||
# main_clip_timeline_in - review_item_timeline_in + 1
|
||||
main_clip_in = self.main_clip.timelineIn()
|
||||
main_clip_out = self.main_clip.timelineOut()
|
||||
|
||||
source_in_diff = main_clip_in - rw_clip_in
|
||||
source_out_diff = main_clip_out - rw_clip_out
|
||||
|
||||
if source_in_diff:
|
||||
self.rw_clip_source_in += source_in_diff
|
||||
if source_out_diff:
|
||||
self.rw_clip_source_out += source_out_diff
|
||||
|
||||
# review clip durations
|
||||
rw_clip_duration = (
|
||||
self.rw_clip_source_out - self.rw_clip_source_in) + 1
|
||||
rw_clip_duration_h = rw_clip_duration + (
|
||||
handle_start + handle_end)
|
||||
|
||||
# add created data to review item data
|
||||
instance.data["reviewItemData"] = {
|
||||
"mediaDuration": rw_source_duration
|
||||
}
|
||||
|
||||
file_dir = os.path.dirname(self.rw_source_path)
|
||||
file = os.path.basename(self.rw_source_path)
|
||||
ext = os.path.splitext(file)[-1]
|
||||
|
||||
# detect if sequence
|
||||
if not is_sequence:
|
||||
# is video file
|
||||
files = file
|
||||
else:
|
||||
files = list()
|
||||
spliter, padding = get_sequence_pattern_and_padding(file)
|
||||
self.log.debug("_ spliter, padding: {}, {}".format(
|
||||
spliter, padding))
|
||||
base_name = file.split(spliter)[0]
|
||||
|
||||
# define collection and calculate frame range
|
||||
collection = clique.Collection(base_name, ext, padding, set(range(
|
||||
int(rw_source_first + int(
|
||||
self.rw_clip_source_in - handle_start)),
|
||||
int(rw_source_first + int(
|
||||
self.rw_clip_source_out + handle_end) + 1))))
|
||||
self.log.debug("_ collection: {}".format(collection))
|
||||
|
||||
real_files = os.listdir(file_dir)
|
||||
self.log.debug("_ real_files: {}".format(real_files))
|
||||
|
||||
# collect frames to repre files list
|
||||
for item in collection:
|
||||
if item not in real_files:
|
||||
self.log.debug("_ item: {}".format(item))
|
||||
continue
|
||||
files.append(item)
|
||||
|
||||
# add prep tag
|
||||
tags.extend(["prep", "delete"])
|
||||
|
||||
# change label
|
||||
instance.data["label"] = "{0} - ({1})".format(
|
||||
instance.data["label"], ext
|
||||
)
|
||||
|
||||
self.log.debug("Instance review: {}".format(instance.data["name"]))
|
||||
|
||||
# adding representation for review mov
|
||||
representation = {
|
||||
"files": files,
|
||||
"stagingDir": file_dir,
|
||||
"frameStart": rw_source_first + self.rw_clip_source_in,
|
||||
"frameEnd": rw_source_first + self.rw_clip_source_out,
|
||||
"frameStartFtrack": int(
|
||||
self.rw_clip_source_in - handle_start),
|
||||
"frameEndFtrack": int(self.rw_clip_source_out + handle_end),
|
||||
"step": 1,
|
||||
"fps": instance.data["fps"],
|
||||
"name": "review",
|
||||
"tags": tags,
|
||||
"ext": ext[1:]
|
||||
}
|
||||
|
||||
if rw_source_duration > rw_clip_duration_h:
|
||||
self.log.debug("Media duration higher: {}".format(
|
||||
(rw_source_duration - rw_clip_duration_h)))
|
||||
representation.update({
|
||||
"frameStart": rw_source_first + int(
|
||||
self.rw_clip_source_in - handle_start),
|
||||
"frameEnd": rw_source_first + int(
|
||||
self.rw_clip_source_out + handle_end),
|
||||
"tags": ["_cut-bigger", "prep", "delete"]
|
||||
})
|
||||
elif rw_source_duration < rw_clip_duration_h:
|
||||
self.log.debug("Media duration higher: {}".format(
|
||||
(rw_source_duration - rw_clip_duration_h)))
|
||||
representation.update({
|
||||
"frameStart": rw_source_first + int(
|
||||
self.rw_clip_source_in - handle_start),
|
||||
"frameEnd": rw_source_first + int(
|
||||
self.rw_clip_source_out + handle_end),
|
||||
"tags": ["prep", "delete"]
|
||||
})
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.create_thumbnail(instance)
|
||||
|
||||
self.log.debug(
|
||||
"Added representations: {}".format(
|
||||
instance.data["representations"]))
|
||||
|
||||
def create_thumbnail(self, instance):
|
||||
source_file = os.path.basename(self.rw_source_path)
|
||||
spliter, padding = get_sequence_pattern_and_padding(source_file)
|
||||
|
||||
if spliter:
|
||||
head, ext = source_file.split(spliter)
|
||||
else:
|
||||
head, ext = os.path.splitext(source_file)
|
||||
|
||||
# staging dir creation
|
||||
staging_dir = os.path.dirname(
|
||||
self.rw_source_path)
|
||||
|
||||
# get thumbnail frame from the middle
|
||||
thumb_frame = int(self.rw_clip_source_in + (
|
||||
(self.rw_clip_source_out - self.rw_clip_source_in) / 2))
|
||||
|
||||
thumb_file = "{}thumbnail{}{}".format(head, thumb_frame, ".png")
|
||||
thumb_path = os.path.join(staging_dir, thumb_file)
|
||||
|
||||
thumbnail = self.rw_clip.thumbnail(thumb_frame).save(
|
||||
thumb_path,
|
||||
format='png'
|
||||
)
|
||||
self.log.debug(
|
||||
"__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame))
|
||||
|
||||
self.log.debug("__ thumbnail: {}".format(thumbnail))
|
||||
thumb_representation = {
|
||||
'files': thumb_file,
|
||||
'stagingDir': staging_dir,
|
||||
'name': "thumbnail",
|
||||
'thumbnail': True,
|
||||
'ext': "png"
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
thumb_representation)
|
||||
|
||||
def version_data(self, instance):
|
||||
transfer_data = [
|
||||
"handleStart", "handleEnd", "sourceIn", "sourceOut",
|
||||
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
|
||||
"clipIn", "clipOut", "clipInH", "clipOutH", "asset",
|
||||
"track"
|
||||
]
|
||||
|
||||
version_data = dict()
|
||||
# pass data to version
|
||||
version_data.update({k: instance.data[k] for k in transfer_data})
|
||||
|
||||
if 'version' in instance.data:
|
||||
version_data["version"] = instance.data["version"]
|
||||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"colorspace": self.rw_clip.sourceMediaColourTransform(),
|
||||
"families": instance.data["families"],
|
||||
"subset": instance.data["subset"],
|
||||
"fps": instance.data["fps"]
|
||||
})
|
||||
instance.data["versionData"] = version_data
|
||||
|
|
@ -1,57 +0,0 @@
|
|||
import os
|
||||
from hiero.exporters.FnExportUtil import writeSequenceAudioWithHandles
|
||||
import pyblish
|
||||
import openpype
|
||||
|
||||
|
||||
class ExtractAudioFile(openpype.api.Extractor):
|
||||
"""Extracts audio subset file from all active timeline audio tracks"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Subset Audio"
|
||||
hosts = ["hiero"]
|
||||
families = ["audio"]
|
||||
match = pyblish.api.Intersection
|
||||
|
||||
def process(self, instance):
|
||||
# get sequence
|
||||
sequence = instance.context.data["activeSequence"]
|
||||
subset = instance.data["subset"]
|
||||
|
||||
# get timeline in / out
|
||||
clip_in = instance.data["clipIn"]
|
||||
clip_out = instance.data["clipOut"]
|
||||
# get handles from context
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
self.log.info("Created staging dir: {}...".format(staging_dir))
|
||||
|
||||
# path to wav file
|
||||
audio_file = os.path.join(
|
||||
staging_dir, "{}.wav".format(subset)
|
||||
)
|
||||
|
||||
# export audio to disk
|
||||
writeSequenceAudioWithHandles(
|
||||
audio_file,
|
||||
sequence,
|
||||
clip_in,
|
||||
clip_out,
|
||||
handle_start,
|
||||
handle_end
|
||||
)
|
||||
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
representation = {
|
||||
'files': os.path.basename(audio_file),
|
||||
'stagingDir': staging_dir,
|
||||
'name': "wav",
|
||||
'ext': "wav"
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
|
@ -1,334 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
import six
|
||||
import errno
|
||||
from pyblish import api
|
||||
import openpype
|
||||
import clique
|
||||
from avalon.vendor import filelink
|
||||
|
||||
|
||||
class ExtractReviewPreparation(openpype.api.Extractor):
|
||||
"""Cut up clips from long video file"""
|
||||
|
||||
order = api.ExtractorOrder
|
||||
label = "Extract Review Preparation"
|
||||
hosts = ["hiero"]
|
||||
families = ["review"]
|
||||
|
||||
# presets
|
||||
tags_addition = []
|
||||
|
||||
def process(self, instance):
|
||||
inst_data = instance.data
|
||||
asset = inst_data["asset"]
|
||||
review_item_data = instance.data.get("reviewItemData")
|
||||
|
||||
# get representation and loop them
|
||||
representations = inst_data["representations"]
|
||||
|
||||
# get resolution default
|
||||
resolution_width = inst_data["resolutionWidth"]
|
||||
resolution_height = inst_data["resolutionHeight"]
|
||||
|
||||
# frame range data
|
||||
media_duration = review_item_data["mediaDuration"]
|
||||
|
||||
ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
ffprobe_path = openpype.lib.get_ffmpeg_tool_path("ffprobe")
|
||||
|
||||
# filter out mov and img sequences
|
||||
representations_new = representations[:]
|
||||
for repre in representations:
|
||||
input_args = list()
|
||||
output_args = list()
|
||||
|
||||
tags = repre.get("tags", [])
|
||||
|
||||
# check if supported tags are in representation for activation
|
||||
filter_tag = False
|
||||
for tag in ["_cut-bigger", "prep"]:
|
||||
if tag in tags:
|
||||
filter_tag = True
|
||||
break
|
||||
if not filter_tag:
|
||||
continue
|
||||
|
||||
self.log.debug("__ repre: {}".format(repre))
|
||||
|
||||
files = repre.get("files")
|
||||
staging_dir = repre.get("stagingDir")
|
||||
fps = repre.get("fps")
|
||||
ext = repre.get("ext")
|
||||
|
||||
# make paths
|
||||
full_output_dir = os.path.join(
|
||||
staging_dir, "cuts")
|
||||
|
||||
if isinstance(files, list):
|
||||
new_files = list()
|
||||
|
||||
# frame range delivery included handles
|
||||
frame_start = (
|
||||
inst_data["frameStart"] - inst_data["handleStart"])
|
||||
frame_end = (
|
||||
inst_data["frameEnd"] + inst_data["handleEnd"])
|
||||
self.log.debug("_ frame_start: {}".format(frame_start))
|
||||
self.log.debug("_ frame_end: {}".format(frame_end))
|
||||
|
||||
# make collection from input files list
|
||||
collections, remainder = clique.assemble(files)
|
||||
collection = collections.pop()
|
||||
self.log.debug("_ collection: {}".format(collection))
|
||||
|
||||
# name components
|
||||
head = collection.format("{head}")
|
||||
padding = collection.format("{padding}")
|
||||
tail = collection.format("{tail}")
|
||||
self.log.debug("_ head: {}".format(head))
|
||||
self.log.debug("_ padding: {}".format(padding))
|
||||
self.log.debug("_ tail: {}".format(tail))
|
||||
|
||||
# make destination file with instance data
|
||||
# frame start and end range
|
||||
index = 0
|
||||
for image in collection:
|
||||
dst_file_num = frame_start + index
|
||||
dst_file_name = head + str(padding % dst_file_num) + tail
|
||||
src = os.path.join(staging_dir, image)
|
||||
dst = os.path.join(full_output_dir, dst_file_name)
|
||||
self.log.info("Creating temp hardlinks: {}".format(dst))
|
||||
self.hardlink_file(src, dst)
|
||||
new_files.append(dst_file_name)
|
||||
index += 1
|
||||
|
||||
self.log.debug("_ new_files: {}".format(new_files))
|
||||
|
||||
else:
|
||||
# ffmpeg when single file
|
||||
new_files = "{}_{}".format(asset, files)
|
||||
|
||||
# frame range
|
||||
frame_start = repre.get("frameStart")
|
||||
frame_end = repre.get("frameEnd")
|
||||
|
||||
full_input_path = os.path.join(
|
||||
staging_dir, files)
|
||||
|
||||
os.path.isdir(full_output_dir) or os.makedirs(full_output_dir)
|
||||
|
||||
full_output_path = os.path.join(
|
||||
full_output_dir, new_files)
|
||||
|
||||
self.log.debug(
|
||||
"__ full_input_path: {}".format(full_input_path))
|
||||
self.log.debug(
|
||||
"__ full_output_path: {}".format(full_output_path))
|
||||
|
||||
# check if audio stream is in input video file
|
||||
ffprob_cmd = (
|
||||
"\"{ffprobe_path}\" -i \"{full_input_path}\" -show_streams"
|
||||
" -select_streams a -loglevel error"
|
||||
).format(**locals())
|
||||
|
||||
self.log.debug("ffprob_cmd: {}".format(ffprob_cmd))
|
||||
audio_check_output = openpype.api.run_subprocess(ffprob_cmd)
|
||||
self.log.debug(
|
||||
"audio_check_output: {}".format(audio_check_output))
|
||||
|
||||
# Fix one frame difference
|
||||
""" TODO: this is just work-around for issue:
|
||||
https://github.com/pypeclub/pype/issues/659
|
||||
"""
|
||||
frame_duration_extend = 1
|
||||
if audio_check_output and ("audio" in inst_data["families"]):
|
||||
frame_duration_extend = 0
|
||||
|
||||
# translate frame to sec
|
||||
start_sec = float(frame_start) / fps
|
||||
duration_sec = float(
|
||||
(frame_end - frame_start) + frame_duration_extend) / fps
|
||||
|
||||
empty_add = None
|
||||
|
||||
# check if not missing frames at start
|
||||
if (start_sec < 0) or (media_duration < frame_end):
|
||||
# for later swithing off `-c:v copy` output arg
|
||||
empty_add = True
|
||||
|
||||
# init empty variables
|
||||
video_empty_start = video_layer_start = ""
|
||||
audio_empty_start = audio_layer_start = ""
|
||||
video_empty_end = video_layer_end = ""
|
||||
audio_empty_end = audio_layer_end = ""
|
||||
audio_input = audio_output = ""
|
||||
v_inp_idx = 0
|
||||
concat_n = 1
|
||||
|
||||
# try to get video native resolution data
|
||||
try:
|
||||
resolution_output = openpype.api.run_subprocess((
|
||||
"\"{ffprobe_path}\" -i \"{full_input_path}\""
|
||||
" -v error "
|
||||
"-select_streams v:0 -show_entries "
|
||||
"stream=width,height -of csv=s=x:p=0"
|
||||
).format(**locals()))
|
||||
|
||||
x, y = resolution_output.split("x")
|
||||
resolution_width = int(x)
|
||||
resolution_height = int(y)
|
||||
except Exception as _ex:
|
||||
self.log.warning(
|
||||
"Video native resolution is untracable: {}".format(
|
||||
_ex))
|
||||
|
||||
if audio_check_output:
|
||||
# adding input for empty audio
|
||||
input_args.append("-f lavfi -i anullsrc")
|
||||
|
||||
# define audio empty concat variables
|
||||
audio_input = "[1:a]"
|
||||
audio_output = ":a=1"
|
||||
v_inp_idx = 1
|
||||
|
||||
# adding input for video black frame
|
||||
input_args.append((
|
||||
"-f lavfi -i \"color=c=black:"
|
||||
"s={resolution_width}x{resolution_height}:r={fps}\""
|
||||
).format(**locals()))
|
||||
|
||||
if (start_sec < 0):
|
||||
# recalculate input video timing
|
||||
empty_start_dur = abs(start_sec)
|
||||
start_sec = 0
|
||||
duration_sec = float(frame_end - (
|
||||
frame_start + (empty_start_dur * fps)) + 1) / fps
|
||||
|
||||
# define starting empty video concat variables
|
||||
video_empty_start = (
|
||||
"[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];" # noqa
|
||||
).format(**locals())
|
||||
video_layer_start = "[gv0]"
|
||||
|
||||
if audio_check_output:
|
||||
# define starting empty audio concat variables
|
||||
audio_empty_start = (
|
||||
"[0]atrim=duration={empty_start_dur}[ga0];"
|
||||
).format(**locals())
|
||||
audio_layer_start = "[ga0]"
|
||||
|
||||
# alter concat number of clips
|
||||
concat_n += 1
|
||||
|
||||
# check if not missing frames at the end
|
||||
if (media_duration < frame_end):
|
||||
# recalculate timing
|
||||
empty_end_dur = float(
|
||||
frame_end - media_duration + 1) / fps
|
||||
duration_sec = float(
|
||||
media_duration - frame_start) / fps
|
||||
|
||||
# define ending empty video concat variables
|
||||
video_empty_end = (
|
||||
"[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];"
|
||||
).format(**locals())
|
||||
video_layer_end = "[gv1]"
|
||||
|
||||
if audio_check_output:
|
||||
# define ending empty audio concat variables
|
||||
audio_empty_end = (
|
||||
"[0]atrim=duration={empty_end_dur}[ga1];"
|
||||
).format(**locals())
|
||||
audio_layer_end = "[ga0]"
|
||||
|
||||
# alter concat number of clips
|
||||
concat_n += 1
|
||||
|
||||
# concatting black frame togather
|
||||
output_args.append((
|
||||
"-filter_complex \""
|
||||
"{audio_empty_start}"
|
||||
"{video_empty_start}"
|
||||
"{audio_empty_end}"
|
||||
"{video_empty_end}"
|
||||
"{video_layer_start}{audio_layer_start}[1:v]{audio_input}" # noqa
|
||||
"{video_layer_end}{audio_layer_end}"
|
||||
"concat=n={concat_n}:v=1{audio_output}\""
|
||||
).format(**locals()))
|
||||
|
||||
# append ffmpeg input video clip
|
||||
input_args.append("-ss {}".format(start_sec))
|
||||
input_args.append("-t {}".format(duration_sec))
|
||||
input_args.append("-i \"{}\"".format(full_input_path))
|
||||
|
||||
# add copy audio video codec if only shortening clip
|
||||
if ("_cut-bigger" in tags) and (not empty_add):
|
||||
output_args.append("-c:v copy")
|
||||
|
||||
# make sure it is having no frame to frame comprassion
|
||||
output_args.append("-intra")
|
||||
|
||||
# output filename
|
||||
output_args.append("-y \"{}\"".format(full_output_path))
|
||||
|
||||
mov_args = [
|
||||
"\"{}\"".format(ffmpeg_path),
|
||||
" ".join(input_args),
|
||||
" ".join(output_args)
|
||||
]
|
||||
subprcs_cmd = " ".join(mov_args)
|
||||
|
||||
# run subprocess
|
||||
self.log.debug("Executing: {}".format(subprcs_cmd))
|
||||
output = openpype.api.run_subprocess(subprcs_cmd)
|
||||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
repre_new = {
|
||||
"files": new_files,
|
||||
"stagingDir": full_output_dir,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"frameStartFtrack": frame_start,
|
||||
"frameEndFtrack": frame_end,
|
||||
"step": 1,
|
||||
"fps": fps,
|
||||
"name": "cut_up_preview",
|
||||
"tags": [
|
||||
"review", "ftrackreview", "delete"] + self.tags_addition,
|
||||
"ext": ext,
|
||||
"anatomy_template": "publish"
|
||||
}
|
||||
|
||||
representations_new.append(repre_new)
|
||||
|
||||
for repre in representations_new:
|
||||
if ("delete" in repre.get("tags", [])) and (
|
||||
"cut_up_preview" not in repre["name"]):
|
||||
representations_new.remove(repre)
|
||||
|
||||
self.log.debug(
|
||||
"Representations: {}".format(representations_new))
|
||||
instance.data["representations"] = representations_new
|
||||
|
||||
def hardlink_file(self, src, dst):
|
||||
dirname = os.path.dirname(dst)
|
||||
|
||||
# make sure the destination folder exist
|
||||
try:
|
||||
os.makedirs(dirname)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
self.log.critical("An unexpected error occurred.")
|
||||
six.reraise(*sys.exc_info())
|
||||
|
||||
# create hardlined file
|
||||
try:
|
||||
filelink.create(src, dst, filelink.HARDLINK)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
self.log.critical("An unexpected error occurred.")
|
||||
six.reraise(*sys.exc_info())
|
||||
|
|
@ -0,0 +1,171 @@
|
|||
from pyblish import api
|
||||
import hiero
|
||||
import math
|
||||
from openpype.hosts.hiero.otio.hiero_export import create_otio_time_range
|
||||
|
||||
class PrecollectRetime(api.InstancePlugin):
|
||||
"""Calculate Retiming of selected track items."""
|
||||
|
||||
order = api.CollectorOrder - 0.578
|
||||
label = "Precollect Retime"
|
||||
hosts = ["hiero"]
|
||||
families = ['retime_']
|
||||
|
||||
def process(self, instance):
|
||||
if not instance.data.get("versionData"):
|
||||
instance.data["versionData"] = {}
|
||||
|
||||
# get basic variables
|
||||
otio_clip = instance.data["otioClip"]
|
||||
|
||||
source_range = otio_clip.source_range
|
||||
oc_source_fps = source_range.start_time.rate
|
||||
oc_source_in = source_range.start_time.value
|
||||
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
frame_start = instance.data["frameStart"]
|
||||
|
||||
track_item = instance.data["item"]
|
||||
|
||||
# define basic clip frame range variables
|
||||
timeline_in = int(track_item.timelineIn())
|
||||
timeline_out = int(track_item.timelineOut())
|
||||
source_in = int(track_item.sourceIn())
|
||||
source_out = int(track_item.sourceOut())
|
||||
speed = track_item.playbackSpeed()
|
||||
|
||||
# calculate available material before retime
|
||||
available_in = int(track_item.handleInLength() * speed)
|
||||
available_out = int(track_item.handleOutLength() * speed)
|
||||
|
||||
self.log.debug((
|
||||
"_BEFORE: \n timeline_in: `{0}`,\n timeline_out: `{1}`, \n "
|
||||
"source_in: `{2}`,\n source_out: `{3}`,\n speed: `{4}`,\n "
|
||||
"handle_start: `{5}`,\n handle_end: `{6}`").format(
|
||||
timeline_in,
|
||||
timeline_out,
|
||||
source_in,
|
||||
source_out,
|
||||
speed,
|
||||
handle_start,
|
||||
handle_end
|
||||
))
|
||||
|
||||
# loop withing subtrack items
|
||||
time_warp_nodes = []
|
||||
source_in_change = 0
|
||||
source_out_change = 0
|
||||
for s_track_item in track_item.linkedItems():
|
||||
if isinstance(s_track_item, hiero.core.EffectTrackItem) \
|
||||
and "TimeWarp" in s_track_item.node().Class():
|
||||
|
||||
# adding timewarp attribute to instance
|
||||
time_warp_nodes = []
|
||||
|
||||
# ignore item if not enabled
|
||||
if s_track_item.isEnabled():
|
||||
node = s_track_item.node()
|
||||
name = node["name"].value()
|
||||
look_up = node["lookup"].value()
|
||||
animated = node["lookup"].isAnimated()
|
||||
if animated:
|
||||
look_up = [
|
||||
((node["lookup"].getValueAt(i)) - i)
|
||||
for i in range(
|
||||
(timeline_in - handle_start),
|
||||
(timeline_out + handle_end) + 1)
|
||||
]
|
||||
# calculate differnce
|
||||
diff_in = (node["lookup"].getValueAt(
|
||||
timeline_in)) - timeline_in
|
||||
diff_out = (node["lookup"].getValueAt(
|
||||
timeline_out)) - timeline_out
|
||||
|
||||
# calculate source
|
||||
source_in_change += diff_in
|
||||
source_out_change += diff_out
|
||||
|
||||
# calculate speed
|
||||
speed_in = (node["lookup"].getValueAt(timeline_in) / (
|
||||
float(timeline_in) * .01)) * .01
|
||||
speed_out = (node["lookup"].getValueAt(timeline_out) / (
|
||||
float(timeline_out) * .01)) * .01
|
||||
|
||||
# calculate handles
|
||||
handle_start = int(
|
||||
math.ceil(
|
||||
(handle_start * speed_in * 1000) / 1000.0)
|
||||
)
|
||||
|
||||
handle_end = int(
|
||||
math.ceil(
|
||||
(handle_end * speed_out * 1000) / 1000.0)
|
||||
)
|
||||
self.log.debug(
|
||||
("diff_in, diff_out", diff_in, diff_out))
|
||||
self.log.debug(
|
||||
("source_in_change, source_out_change",
|
||||
source_in_change, source_out_change))
|
||||
|
||||
time_warp_nodes.append({
|
||||
"Class": "TimeWarp",
|
||||
"name": name,
|
||||
"lookup": look_up
|
||||
})
|
||||
|
||||
self.log.debug(
|
||||
"timewarp source in changes: in {}, out {}".format(
|
||||
source_in_change, source_out_change))
|
||||
|
||||
# recalculate handles by the speed
|
||||
handle_start *= speed
|
||||
handle_end *= speed
|
||||
self.log.debug("speed: handle_start: '{0}', handle_end: '{1}'".format(
|
||||
handle_start, handle_end))
|
||||
|
||||
# recalculate source with timewarp and by the speed
|
||||
source_in += int(source_in_change)
|
||||
source_out += int(source_out_change * speed)
|
||||
|
||||
source_in_h = int(source_in - math.ceil(
|
||||
(handle_start * 1000) / 1000.0))
|
||||
source_out_h = int(source_out + math.ceil(
|
||||
(handle_end * 1000) / 1000.0))
|
||||
|
||||
self.log.debug(
|
||||
"retimed: source_in_h: '{0}', source_out_h: '{1}'".format(
|
||||
source_in_h, source_out_h))
|
||||
|
||||
# add all data to Instance
|
||||
instance.data["handleStart"] = handle_start
|
||||
instance.data["handleEnd"] = handle_end
|
||||
instance.data["sourceIn"] = source_in
|
||||
instance.data["sourceOut"] = source_out
|
||||
instance.data["sourceInH"] = source_in_h
|
||||
instance.data["sourceOutH"] = source_out_h
|
||||
instance.data["speed"] = speed
|
||||
|
||||
source_handle_start = source_in_h - source_in
|
||||
# frame_start = instance.data["frameStart"] + source_handle_start
|
||||
duration = source_out_h - source_in_h
|
||||
frame_end = int(frame_start + duration - (handle_start + handle_end))
|
||||
|
||||
instance.data["versionData"].update({
|
||||
"retime": True,
|
||||
"speed": speed,
|
||||
"timewarps": time_warp_nodes,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": abs(source_handle_start),
|
||||
"handleEnd": source_out_h - source_out
|
||||
})
|
||||
self.log.debug("versionData: {}".format(instance.data["versionData"]))
|
||||
self.log.debug("sourceIn: {}".format(instance.data["sourceIn"]))
|
||||
self.log.debug("sourceOut: {}".format(instance.data["sourceOut"]))
|
||||
self.log.debug("speed: {}".format(instance.data["speed"]))
|
||||
|
||||
# change otio clip data
|
||||
instance.data["otioClip"].source_range = create_otio_time_range(
|
||||
oc_source_in, (source_out - source_in + 1), oc_source_fps)
|
||||
self.log.debug("otioClip: {}".format(instance.data["otioClip"]))
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
import pyblish
|
||||
from openpype.hosts.hiero.api import is_overlapping
|
||||
|
||||
|
||||
class ValidateAudioFile(pyblish.api.InstancePlugin):
|
||||
"""Validate audio subset has avilable audio track clips"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Audio Tracks"
|
||||
hosts = ["hiero"]
|
||||
families = ["audio"]
|
||||
|
||||
def process(self, instance):
|
||||
clip = instance.data["item"]
|
||||
audio_tracks = instance.context.data["audioTracks"]
|
||||
audio_clip = None
|
||||
|
||||
for a_track in audio_tracks:
|
||||
for item in a_track.items():
|
||||
if is_overlapping(item, clip):
|
||||
audio_clip = item
|
||||
|
||||
assert audio_clip, "Missing relative audio clip for clip {}".format(
|
||||
clip.name()
|
||||
)
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class ValidateHierarchy(api.InstancePlugin):
|
||||
"""Validate clip's hierarchy data.
|
||||
|
||||
"""
|
||||
|
||||
order = api.ValidatorOrder
|
||||
families = ["clip", "shot"]
|
||||
label = "Validate Hierarchy"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, instance):
|
||||
asset_name = instance.data.get("asset", None)
|
||||
hierarchy = instance.data.get("hierarchy", None)
|
||||
parents = instance.data.get("parents", None)
|
||||
|
||||
assert hierarchy, "Hierarchy Tag has to be set \
|
||||
and added to clip `{}`".format(asset_name)
|
||||
assert parents, "Parents build from Hierarchy Tag has \
|
||||
to be set and added to clip `{}`".format(asset_name)
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class ValidateNames(api.InstancePlugin):
|
||||
"""Validate sequence, video track and track item names.
|
||||
|
||||
When creating output directories with the name of an item, ending with a
|
||||
whitespace will fail the extraction.
|
||||
Exact matching to optimize processing.
|
||||
"""
|
||||
|
||||
order = api.ValidatorOrder
|
||||
families = ["clip"]
|
||||
match = api.Exact
|
||||
label = "Names"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
item = instance.data["item"]
|
||||
|
||||
msg = "Track item \"{0}\" ends with a whitespace."
|
||||
assert not item.name().endswith(" "), msg.format(item.name())
|
||||
|
||||
msg = "Video track \"{0}\" ends with a whitespace."
|
||||
msg = msg.format(item.parent().name())
|
||||
assert not item.parent().name().endswith(" "), msg
|
||||
|
||||
msg = "Sequence \"{0}\" ends with a whitespace."
|
||||
msg = msg.format(item.parent().parent().name())
|
||||
assert not item.parent().parent().name().endswith(" "), msg
|
||||
|
|
@ -41,7 +41,7 @@ class LoadMov(api.Loader):
|
|||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
script_start = nuke.root()["first_frame"].value()
|
||||
first_frame = nuke.root()["first_frame"].value()
|
||||
|
||||
# options gui
|
||||
defaults = {
|
||||
|
|
@ -71,6 +71,9 @@ class LoadMov(api.Loader):
|
|||
version_data = version.get("data", {})
|
||||
repr_id = context["representation"]["_id"]
|
||||
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
self.handle_end = version_data.get("handleEnd", 0)
|
||||
|
||||
orig_first = version_data.get("frameStart")
|
||||
orig_last = version_data.get("frameEnd")
|
||||
diff = orig_first - 1
|
||||
|
|
@ -78,9 +81,6 @@ class LoadMov(api.Loader):
|
|||
first = orig_first - diff
|
||||
last = orig_last - diff
|
||||
|
||||
handle_start = version_data.get("handleStart", 0)
|
||||
handle_end = version_data.get("handleEnd", 0)
|
||||
|
||||
colorspace = version_data.get("colorspace")
|
||||
repr_cont = context["representation"]["context"]
|
||||
|
||||
|
|
@ -89,7 +89,7 @@ class LoadMov(api.Loader):
|
|||
|
||||
context["representation"]["_id"]
|
||||
# create handles offset (only to last, because of mov)
|
||||
last += handle_start + handle_end
|
||||
last += self.handle_start + self.handle_end
|
||||
|
||||
# Fallback to asset name when namespace is None
|
||||
if namespace is None:
|
||||
|
|
@ -133,10 +133,11 @@ class LoadMov(api.Loader):
|
|||
|
||||
if start_at_workfile:
|
||||
# start at workfile start
|
||||
read_node['frame'].setValue(str(self.script_start))
|
||||
read_node['frame'].setValue(str(self.first_frame))
|
||||
else:
|
||||
# start at version frame start
|
||||
read_node['frame'].setValue(str(orig_first - handle_start))
|
||||
read_node['frame'].setValue(
|
||||
str(orig_first - self.handle_start))
|
||||
|
||||
if colorspace:
|
||||
read_node["colorspace"].setValue(str(colorspace))
|
||||
|
|
@ -167,6 +168,11 @@ class LoadMov(api.Loader):
|
|||
|
||||
read_node["tile_color"].setValue(int("0x4ecd25ff", 16))
|
||||
|
||||
if version_data.get("retime", None):
|
||||
speed = version_data.get("speed", 1)
|
||||
time_warp_nodes = version_data.get("timewarps", [])
|
||||
self.make_retimes(speed, time_warp_nodes)
|
||||
|
||||
return containerise(
|
||||
read_node,
|
||||
name=name,
|
||||
|
|
@ -229,9 +235,8 @@ class LoadMov(api.Loader):
|
|||
# set first to 1
|
||||
first = orig_first - diff
|
||||
last = orig_last - diff
|
||||
handles = version_data.get("handles", 0)
|
||||
handle_start = version_data.get("handleStart", 0)
|
||||
handle_end = version_data.get("handleEnd", 0)
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
self.handle_end = version_data.get("handleEnd", 0)
|
||||
colorspace = version_data.get("colorspace")
|
||||
|
||||
if first is None:
|
||||
|
|
@ -242,13 +247,8 @@ class LoadMov(api.Loader):
|
|||
read_node['name'].value(), representation))
|
||||
first = 0
|
||||
|
||||
# fix handle start and end if none are available
|
||||
if not handle_start and not handle_end:
|
||||
handle_start = handles
|
||||
handle_end = handles
|
||||
|
||||
# create handles offset (only to last, because of mov)
|
||||
last += handle_start + handle_end
|
||||
last += self.handle_start + self.handle_end
|
||||
|
||||
read_node["file"].setValue(file)
|
||||
|
||||
|
|
@ -259,12 +259,12 @@ class LoadMov(api.Loader):
|
|||
read_node["last"].setValue(last)
|
||||
read_node['frame_mode'].setValue("start at")
|
||||
|
||||
if int(self.script_start) == int(read_node['frame'].value()):
|
||||
if int(self.first_frame) == int(read_node['frame'].value()):
|
||||
# start at workfile start
|
||||
read_node['frame'].setValue(str(self.script_start))
|
||||
read_node['frame'].setValue(str(self.first_frame))
|
||||
else:
|
||||
# start at version frame start
|
||||
read_node['frame'].setValue(str(orig_first - handle_start))
|
||||
read_node['frame'].setValue(str(orig_first - self.handle_start))
|
||||
|
||||
if colorspace:
|
||||
read_node["colorspace"].setValue(str(colorspace))
|
||||
|
|
@ -282,8 +282,8 @@ class LoadMov(api.Loader):
|
|||
"version": str(version.get("name")),
|
||||
"colorspace": version_data.get("colorspace"),
|
||||
"source": version_data.get("source"),
|
||||
"handleStart": str(handle_start),
|
||||
"handleEnd": str(handle_end),
|
||||
"handleStart": str(self.handle_start),
|
||||
"handleEnd": str(self.handle_end),
|
||||
"fps": str(version_data.get("fps")),
|
||||
"author": version_data.get("author"),
|
||||
"outputDir": version_data.get("outputDir")
|
||||
|
|
@ -295,6 +295,11 @@ class LoadMov(api.Loader):
|
|||
else:
|
||||
read_node["tile_color"].setValue(int("0x4ecd25ff", 16))
|
||||
|
||||
if version_data.get("retime", None):
|
||||
speed = version_data.get("speed", 1)
|
||||
time_warp_nodes = version_data.get("timewarps", [])
|
||||
self.make_retimes(speed, time_warp_nodes)
|
||||
|
||||
# Update the imprinted representation
|
||||
update_container(
|
||||
read_node, updated_dict
|
||||
|
|
@ -310,3 +315,32 @@ class LoadMov(api.Loader):
|
|||
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(read_node)
|
||||
|
||||
def make_retimes(self, speed, time_warp_nodes):
|
||||
''' Create all retime and timewarping nodes with coppied animation '''
|
||||
if speed != 1:
|
||||
rtn = nuke.createNode(
|
||||
"Retime",
|
||||
"speed {}".format(speed))
|
||||
rtn["before"].setValue("continue")
|
||||
rtn["after"].setValue("continue")
|
||||
rtn["input.first_lock"].setValue(True)
|
||||
rtn["input.first"].setValue(
|
||||
self.first_frame
|
||||
)
|
||||
|
||||
if time_warp_nodes != []:
|
||||
start_anim = self.first_frame + (self.handle_start / speed)
|
||||
for timewarp in time_warp_nodes:
|
||||
twn = nuke.createNode(timewarp["Class"],
|
||||
"name {}".format(timewarp["name"]))
|
||||
if isinstance(timewarp["lookup"], list):
|
||||
# if array for animation
|
||||
twn["lookup"].setAnimated()
|
||||
for i, value in enumerate(timewarp["lookup"]):
|
||||
twn["lookup"].setValueAt(
|
||||
(start_anim + i) + value,
|
||||
(start_anim + i))
|
||||
else:
|
||||
# if static value `int`
|
||||
twn["lookup"].setValue(timewarp["lookup"])
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ class LoadSequence(api.Loader):
|
|||
if version_data.get("retime", None):
|
||||
speed = version_data.get("speed", 1)
|
||||
time_warp_nodes = version_data.get("timewarps", [])
|
||||
self.make_retimes(read_node, speed, time_warp_nodes)
|
||||
self.make_retimes(speed, time_warp_nodes)
|
||||
|
||||
return containerise(read_node,
|
||||
name=name,
|
||||
|
|
@ -256,7 +256,7 @@ class LoadSequence(api.Loader):
|
|||
if version_data.get("retime", None):
|
||||
speed = version_data.get("speed", 1)
|
||||
time_warp_nodes = version_data.get("timewarps", [])
|
||||
self.make_retimes(read_node, speed, time_warp_nodes)
|
||||
self.make_retimes(speed, time_warp_nodes)
|
||||
|
||||
# Update the imprinted representation
|
||||
update_container(
|
||||
|
|
@ -285,10 +285,11 @@ class LoadSequence(api.Loader):
|
|||
rtn["after"].setValue("continue")
|
||||
rtn["input.first_lock"].setValue(True)
|
||||
rtn["input.first"].setValue(
|
||||
self.handle_start + self.first_frame
|
||||
self.first_frame
|
||||
)
|
||||
|
||||
if time_warp_nodes != []:
|
||||
start_anim = self.first_frame + (self.handle_start / speed)
|
||||
for timewarp in time_warp_nodes:
|
||||
twn = nuke.createNode(timewarp["Class"],
|
||||
"name {}".format(timewarp["name"]))
|
||||
|
|
@ -297,8 +298,8 @@ class LoadSequence(api.Loader):
|
|||
twn["lookup"].setAnimated()
|
||||
for i, value in enumerate(timewarp["lookup"]):
|
||||
twn["lookup"].setValueAt(
|
||||
(self.first_frame + i) + value,
|
||||
(self.first_frame + i))
|
||||
(start_anim + i) + value,
|
||||
(start_anim + i))
|
||||
else:
|
||||
# if static value `int`
|
||||
twn["lookup"].setValue(timewarp["lookup"])
|
||||
|
|
|
|||
|
|
@ -4,8 +4,10 @@ import clique
|
|||
from .import_utils import discover_host_vendor_module
|
||||
|
||||
try:
|
||||
import opentimelineio as otio
|
||||
from opentimelineio import opentime as _ot
|
||||
except ImportError:
|
||||
otio = discover_host_vendor_module("opentimelineio")
|
||||
_ot = discover_host_vendor_module("opentimelineio.opentime")
|
||||
|
||||
|
||||
|
|
@ -166,3 +168,119 @@ def make_sequence_collection(path, otio_range, metadata):
|
|||
head=head, tail=tail, padding=metadata["padding"])
|
||||
collection.indexes.update([i for i in range(first, (last + 1))])
|
||||
return dir_path, collection
|
||||
|
||||
|
||||
def _sequence_resize(source, length):
|
||||
step = float(len(source) - 1) / (length - 1)
|
||||
for i in range(length):
|
||||
low, ratio = divmod(i * step, 1)
|
||||
high = low + 1 if ratio > 0 else low
|
||||
yield (1 - ratio) * source[int(low)] + ratio * source[int(high)]
|
||||
|
||||
|
||||
def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
|
||||
source_range = otio_clip.source_range
|
||||
available_range = otio_clip.available_range()
|
||||
media_in = available_range.start_time.value
|
||||
media_out = available_range.end_time_inclusive().value
|
||||
|
||||
# modifiers
|
||||
time_scalar = 1.
|
||||
offset_in = 0
|
||||
offset_out = 0
|
||||
time_warp_nodes = []
|
||||
|
||||
# Check for speed effects and adjust playback speed accordingly
|
||||
for effect in otio_clip.effects:
|
||||
if isinstance(effect, otio.schema.LinearTimeWarp):
|
||||
time_scalar = effect.time_scalar
|
||||
|
||||
elif isinstance(effect, otio.schema.FreezeFrame):
|
||||
# For freeze frame, playback speed must be set after range
|
||||
time_scalar = 0.
|
||||
|
||||
elif isinstance(effect, otio.schema.TimeEffect):
|
||||
# For freeze frame, playback speed must be set after range
|
||||
name = effect.name
|
||||
effect_name = effect.effect_name
|
||||
if "TimeWarp" not in effect_name:
|
||||
continue
|
||||
metadata = effect.metadata
|
||||
lookup = metadata.get("lookup")
|
||||
if not lookup:
|
||||
continue
|
||||
|
||||
# time warp node
|
||||
tw_node = {
|
||||
"Class": "TimeWarp",
|
||||
"name": name
|
||||
}
|
||||
tw_node.update(metadata)
|
||||
|
||||
# get first and last frame offsets
|
||||
offset_in += lookup[0]
|
||||
offset_out += lookup[-1]
|
||||
|
||||
# add to timewarp nodes
|
||||
time_warp_nodes.append(tw_node)
|
||||
|
||||
# multiply by time scalar
|
||||
offset_in *= time_scalar
|
||||
offset_out *= time_scalar
|
||||
|
||||
# filip offset if reversed speed
|
||||
if time_scalar < 0:
|
||||
_offset_in = offset_out
|
||||
_offset_out = offset_in
|
||||
offset_in = _offset_in
|
||||
offset_out = _offset_out
|
||||
|
||||
# scale handles
|
||||
handle_start *= abs(time_scalar)
|
||||
handle_end *= abs(time_scalar)
|
||||
|
||||
# filip handles if reversed speed
|
||||
if time_scalar < 0:
|
||||
_handle_start = handle_end
|
||||
_handle_end = handle_start
|
||||
handle_start = _handle_start
|
||||
handle_end = _handle_end
|
||||
|
||||
source_in = source_range.start_time.value
|
||||
|
||||
media_in_trimmed = (
|
||||
media_in + source_in + offset_in)
|
||||
media_out_trimmed = (
|
||||
media_in + source_in + (
|
||||
((source_range.duration.value - 1) * abs(
|
||||
time_scalar)) + offset_out))
|
||||
|
||||
# calculate available hanles
|
||||
if (media_in_trimmed - media_in) < handle_start:
|
||||
handle_start = (media_in_trimmed - media_in)
|
||||
if (media_out - media_out_trimmed) < handle_end:
|
||||
handle_end = (media_out - media_out_trimmed)
|
||||
|
||||
# create version data
|
||||
version_data = {
|
||||
"versionData": {
|
||||
"retime": True,
|
||||
"speed": time_scalar,
|
||||
"timewarps": time_warp_nodes,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
}
|
||||
}
|
||||
|
||||
returning_dict = {
|
||||
"mediaIn": media_in_trimmed,
|
||||
"mediaOut": media_out_trimmed,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
}
|
||||
|
||||
# add version data only if retime
|
||||
if time_warp_nodes or time_scalar != 1.:
|
||||
returning_dict.update(version_data)
|
||||
|
||||
return returning_dict
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ class CollectOcioReview(pyblish.api.InstancePlugin):
|
|||
otio_review_clips = []
|
||||
otio_timeline = instance.context.data["otioTimeline"]
|
||||
otio_clip = instance.data["otioClip"]
|
||||
|
||||
self.log.debug("__ otioClip: {}".format(otio_clip))
|
||||
# optionally get `reviewTrack`
|
||||
review_track_name = instance.data.get("reviewTrack")
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ class CollectOcioReview(pyblish.api.InstancePlugin):
|
|||
otio_tl_range = otio_clip.range_in_parent()
|
||||
|
||||
# calculate real timeline end needed for the clip
|
||||
clip_end_frame = int(
|
||||
clip_frame_end = int(
|
||||
otio_tl_range.start_time.value + otio_tl_range.duration.value)
|
||||
|
||||
# skip if no review track available
|
||||
|
|
@ -57,13 +57,12 @@ class CollectOcioReview(pyblish.api.InstancePlugin):
|
|||
track_rip = track.range_in_parent()
|
||||
|
||||
# calculate real track end frame
|
||||
track_end_frame = int(
|
||||
track_rip.start_time.value + track_rip.duration.value)
|
||||
track_frame_end = int(track_rip.end_time_exclusive().value)
|
||||
|
||||
# check if the end of track is not lower then clip requirement
|
||||
if clip_end_frame > track_end_frame:
|
||||
if clip_frame_end > track_frame_end:
|
||||
# calculate diference duration
|
||||
gap_duration = clip_end_frame - track_end_frame
|
||||
gap_duration = clip_frame_end - track_frame_end
|
||||
# create rational time range for gap
|
||||
otio_gap_range = otio.opentime.TimeRange(
|
||||
start_time=otio.opentime.RationalTime(
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ import clique
|
|||
import opentimelineio as otio
|
||||
import pyblish.api
|
||||
import openpype
|
||||
from openpype.lib import editorial
|
||||
|
||||
|
||||
class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
|
||||
|
|
@ -27,59 +28,80 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
|
|||
return
|
||||
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
version_data = dict()
|
||||
instance.data["representations"] = []
|
||||
|
||||
if not instance.data.get("versionData"):
|
||||
instance.data["versionData"] = {}
|
||||
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
# get basic variables
|
||||
otio_clip = instance.data["otioClip"]
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = instance.data["frameEnd"]
|
||||
|
||||
# generate range in parent
|
||||
otio_src_range = otio_clip.source_range
|
||||
otio_avalable_range = otio_clip.available_range()
|
||||
trimmed_media_range = openpype.lib.trim_media_range(
|
||||
otio_avalable_range, otio_src_range)
|
||||
media_fps = otio_avalable_range.start_time.rate
|
||||
|
||||
# calculate wth handles
|
||||
otio_src_range_handles = openpype.lib.otio_range_with_handles(
|
||||
otio_src_range, instance)
|
||||
trimmed_media_range_h = openpype.lib.trim_media_range(
|
||||
otio_avalable_range, otio_src_range_handles)
|
||||
# get available range trimmed with processed retimes
|
||||
retimed_attributes = editorial.get_media_range_with_retimes(
|
||||
otio_clip, handle_start, handle_end)
|
||||
self.log.debug(
|
||||
">> retimed_attributes: {}".format(retimed_attributes))
|
||||
|
||||
# frame start and end from media
|
||||
s_frame_start, s_frame_end = openpype.lib.otio_range_to_frame_range(
|
||||
trimmed_media_range)
|
||||
a_frame_start, a_frame_end = openpype.lib.otio_range_to_frame_range(
|
||||
otio_avalable_range)
|
||||
a_frame_start_h, a_frame_end_h = openpype.lib.\
|
||||
otio_range_to_frame_range(trimmed_media_range_h)
|
||||
# break down into variables
|
||||
media_in = int(retimed_attributes["mediaIn"])
|
||||
media_out = int(retimed_attributes["mediaOut"])
|
||||
handle_start = int(retimed_attributes["handleStart"])
|
||||
handle_end = int(retimed_attributes["handleEnd"])
|
||||
|
||||
# fix frame_start and frame_end frame to be in range of media
|
||||
if a_frame_start_h < a_frame_start:
|
||||
a_frame_start_h = a_frame_start
|
||||
# set versiondata if any retime
|
||||
version_data = retimed_attributes.get("versionData")
|
||||
|
||||
if a_frame_end_h > a_frame_end:
|
||||
a_frame_end_h = a_frame_end
|
||||
if version_data:
|
||||
instance.data["versionData"].update(version_data)
|
||||
|
||||
# count the difference for frame_start and frame_end
|
||||
diff_start = s_frame_start - a_frame_start_h
|
||||
diff_end = a_frame_end_h - s_frame_end
|
||||
# convert to available frame range with handles
|
||||
a_frame_start_h = media_in - handle_start
|
||||
a_frame_end_h = media_out + handle_end
|
||||
|
||||
# create trimmed ocio time range
|
||||
trimmed_media_range_h = editorial.range_from_frames(
|
||||
a_frame_start_h, (a_frame_end_h - a_frame_start_h + 1),
|
||||
media_fps
|
||||
)
|
||||
self.log.debug("trimmed_media_range_h: {}".format(
|
||||
trimmed_media_range_h))
|
||||
self.log.debug("a_frame_start_h: {}".format(
|
||||
a_frame_start_h))
|
||||
self.log.debug("a_frame_end_h: {}".format(
|
||||
a_frame_end_h))
|
||||
|
||||
# create frame start and end
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = frame_start + (media_out - media_in)
|
||||
|
||||
# add to version data start and end range data
|
||||
# for loader plugins to be correctly displayed and loaded
|
||||
version_data.update({
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": diff_start,
|
||||
"handleEnd": diff_end,
|
||||
"fps": otio_avalable_range.start_time.rate
|
||||
instance.data["versionData"].update({
|
||||
"fps": media_fps
|
||||
})
|
||||
|
||||
if not instance.data["versionData"].get("retime"):
|
||||
instance.data["versionData"].update({
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
})
|
||||
else:
|
||||
instance.data["versionData"].update({
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end
|
||||
})
|
||||
|
||||
# change frame_start and frame_end values
|
||||
# for representation to be correctly renumbered in integrate_new
|
||||
frame_start -= diff_start
|
||||
frame_end += diff_end
|
||||
frame_start -= handle_start
|
||||
frame_end += handle_end
|
||||
|
||||
media_ref = otio_clip.media_reference
|
||||
metadata = media_ref.metadata
|
||||
|
|
@ -136,12 +158,13 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
|
|||
frame_start, frame_end, file=filename)
|
||||
|
||||
if repre:
|
||||
instance.data["versionData"] = version_data
|
||||
self.log.debug(">>>>>>>> version data {}".format(version_data))
|
||||
# add representation to instance data
|
||||
instance.data["representations"].append(repre)
|
||||
self.log.debug(">>>>>>>> {}".format(repre))
|
||||
|
||||
import pprint
|
||||
self.log.debug(pprint.pformat(instance.data))
|
||||
|
||||
def _create_representation(self, start, end, **kwargs):
|
||||
"""
|
||||
Creating representation data.
|
||||
|
|
|
|||
|
|
@ -51,7 +51,6 @@ class ExtractOTIOReview(openpype.api.Extractor):
|
|||
|
||||
def process(self, instance):
|
||||
# TODO: convert resulting image sequence to mp4
|
||||
# TODO: add oudio ouput to the mp4 if audio in review is on.
|
||||
|
||||
# get otio clip and other time info from instance clip
|
||||
# TODO: what if handles are different in `versionData`?
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue