mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-02 00:44:52 +01:00
hiero: remove old code
This commit is contained in:
parent
4d93fd705b
commit
4f0e02ea57
12 changed files with 0 additions and 1228 deletions
|
|
@ -1,23 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectFramerate(api.ContextPlugin):
|
||||
"""Collect framerate from selected sequence."""
|
||||
|
||||
order = api.CollectorOrder + 0.001
|
||||
label = "Collect Framerate"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, context):
|
||||
sequence = context.data["activeSequence"]
|
||||
context.data["fps"] = self.get_rate(sequence)
|
||||
self.log.info("Framerate is collected: {}".format(context.data["fps"]))
|
||||
|
||||
def get_rate(self, sequence):
|
||||
num, den = sequence.framerate().toRational()
|
||||
rate = float(num) / float(den)
|
||||
|
||||
if rate.is_integer():
|
||||
return rate
|
||||
|
||||
return round(rate, 3)
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectClipMetadata(api.InstancePlugin):
|
||||
"""Collect Metadata from selected track items."""
|
||||
|
||||
order = api.CollectorOrder + 0.01
|
||||
label = "Collect Metadata"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, instance):
|
||||
item = instance.data["item"]
|
||||
ti_metadata = self.metadata_to_string(dict(item.metadata()))
|
||||
ms_metadata = self.metadata_to_string(
|
||||
dict(item.source().mediaSource().metadata()))
|
||||
|
||||
instance.data["clipMetadata"] = ti_metadata
|
||||
instance.data["mediaSourceMetadata"] = ms_metadata
|
||||
|
||||
self.log.info(instance.data["clipMetadata"])
|
||||
self.log.info(instance.data["mediaSourceMetadata"])
|
||||
return
|
||||
|
||||
def metadata_to_string(self, metadata):
|
||||
data = dict()
|
||||
for k, v in metadata.items():
|
||||
if v not in ["-", ""]:
|
||||
data[str(k)] = v
|
||||
|
||||
return data
|
||||
|
|
@ -1,90 +0,0 @@
|
|||
import pyblish.api
|
||||
import opentimelineio.opentime as otio_ot
|
||||
|
||||
|
||||
class CollectClipTimecodes(pyblish.api.InstancePlugin):
|
||||
"""Collect time with OpenTimelineIO:
|
||||
source_h(In,Out)[timecode, sec]
|
||||
timeline(In,Out)[timecode, sec]
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.101
|
||||
label = "Collect Timecodes"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
data = dict()
|
||||
self.log.debug("__ instance.data: {}".format(instance.data))
|
||||
# Timeline data.
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
source_in_h = instance.data("sourceInH",
|
||||
instance.data("sourceIn") - handle_start)
|
||||
source_out_h = instance.data("sourceOutH",
|
||||
instance.data("sourceOut") + handle_end)
|
||||
|
||||
timeline_in = instance.data["clipIn"]
|
||||
timeline_out = instance.data["clipOut"]
|
||||
|
||||
# set frame start with tag or take it from timeline
|
||||
frame_start = instance.data.get("startingFrame")
|
||||
|
||||
if not frame_start:
|
||||
frame_start = timeline_in
|
||||
|
||||
source = instance.data.get("source")
|
||||
|
||||
otio_data = dict()
|
||||
self.log.debug("__ source: `{}`".format(source))
|
||||
|
||||
rate_fps = instance.context.data["fps"]
|
||||
|
||||
otio_in_h_ratio = otio_ot.RationalTime(
|
||||
value=(source.timecodeStart() + (
|
||||
source_in_h + (source_out_h - source_in_h))),
|
||||
rate=rate_fps)
|
||||
|
||||
otio_out_h_ratio = otio_ot.RationalTime(
|
||||
value=(source.timecodeStart() + source_in_h),
|
||||
rate=rate_fps)
|
||||
|
||||
otio_timeline_in_ratio = otio_ot.RationalTime(
|
||||
value=int(
|
||||
instance.data.get("timelineTimecodeStart", 0)) + timeline_in,
|
||||
rate=rate_fps)
|
||||
|
||||
otio_timeline_out_ratio = otio_ot.RationalTime(
|
||||
value=int(
|
||||
instance.data.get("timelineTimecodeStart", 0)) + timeline_out,
|
||||
rate=rate_fps)
|
||||
|
||||
otio_data.update({
|
||||
|
||||
"otioClipInHTimecode": otio_ot.to_timecode(otio_in_h_ratio),
|
||||
|
||||
"otioClipOutHTimecode": otio_ot.to_timecode(otio_out_h_ratio),
|
||||
|
||||
"otioClipInHSec": otio_ot.to_seconds(otio_in_h_ratio),
|
||||
|
||||
"otioClipOutHSec": otio_ot.to_seconds(otio_out_h_ratio),
|
||||
|
||||
"otioTimelineInTimecode": otio_ot.to_timecode(
|
||||
otio_timeline_in_ratio),
|
||||
|
||||
"otioTimelineOutTimecode": otio_ot.to_timecode(
|
||||
otio_timeline_out_ratio),
|
||||
|
||||
"otioTimelineInSec": otio_ot.to_seconds(otio_timeline_in_ratio),
|
||||
|
||||
"otioTimelineOutSec": otio_ot.to_seconds(otio_timeline_out_ratio)
|
||||
})
|
||||
|
||||
data.update({
|
||||
"otioData": otio_data,
|
||||
"sourceTimecodeIn": otio_ot.to_timecode(otio_in_h_ratio),
|
||||
"sourceTimecodeOut": otio_ot.to_timecode(otio_out_h_ratio)
|
||||
})
|
||||
instance.data.update(data)
|
||||
self.log.debug("data: {}".format(instance.data))
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFrameRanges(pyblish.api.InstancePlugin):
|
||||
""" Collect all framranges.
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
label = "Collect Frame Ranges"
|
||||
hosts = ["hiero"]
|
||||
families = ["clip", "effect"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
data = dict()
|
||||
track_item = instance.data["item"]
|
||||
|
||||
# handles
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
# source frame ranges
|
||||
source_in = int(track_item.sourceIn())
|
||||
source_out = int(track_item.sourceOut())
|
||||
source_in_h = int(source_in - handle_start)
|
||||
source_out_h = int(source_out + handle_end)
|
||||
|
||||
# timeline frame ranges
|
||||
clip_in = int(track_item.timelineIn())
|
||||
clip_out = int(track_item.timelineOut())
|
||||
clip_in_h = clip_in - handle_start
|
||||
clip_out_h = clip_out + handle_end
|
||||
|
||||
# durations
|
||||
clip_duration = (clip_out - clip_in) + 1
|
||||
clip_duration_h = clip_duration + (handle_start + handle_end)
|
||||
|
||||
# set frame start with tag or take it from timeline `startingFrame`
|
||||
frame_start = instance.data.get("workfileFrameStart")
|
||||
|
||||
if not frame_start:
|
||||
frame_start = clip_in
|
||||
|
||||
frame_end = frame_start + (clip_out - clip_in)
|
||||
|
||||
data.update({
|
||||
# media source frame range
|
||||
"sourceIn": source_in,
|
||||
"sourceOut": source_out,
|
||||
"sourceInH": source_in_h,
|
||||
"sourceOutH": source_out_h,
|
||||
|
||||
# timeline frame range
|
||||
"clipIn": clip_in,
|
||||
"clipOut": clip_out,
|
||||
"clipInH": clip_in_h,
|
||||
"clipOutH": clip_out_h,
|
||||
|
||||
# workfile frame range
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
|
||||
"clipDuration": clip_duration,
|
||||
"clipDurationH": clip_duration_h,
|
||||
|
||||
"fps": instance.context.data["fps"]
|
||||
})
|
||||
self.log.info("Frame range data for instance `{}` are: {}".format(
|
||||
instance, data))
|
||||
instance.data.update(data)
|
||||
|
|
@ -1,116 +0,0 @@
|
|||
import pyblish.api
|
||||
import avalon.api as avalon
|
||||
|
||||
|
||||
class CollectHierarchy(pyblish.api.ContextPlugin):
|
||||
"""Collecting hierarchy from `parents`.
|
||||
|
||||
present in `clip` family instances coming from the request json data file
|
||||
|
||||
It will add `hierarchical_context` into each instance for integrate
|
||||
plugins to be able to create needed parents for the context if they
|
||||
don't exist yet
|
||||
"""
|
||||
|
||||
label = "Collect Hierarchy"
|
||||
order = pyblish.api.CollectorOrder
|
||||
families = ["clip"]
|
||||
|
||||
def process(self, context):
|
||||
temp_context = {}
|
||||
project_name = avalon.Session["AVALON_PROJECT"]
|
||||
final_context = {}
|
||||
final_context[project_name] = {}
|
||||
final_context[project_name]['entity_type'] = 'Project'
|
||||
|
||||
for instance in context:
|
||||
self.log.info("Processing instance: `{}` ...".format(instance))
|
||||
|
||||
# shot data dict
|
||||
shot_data = {}
|
||||
families = instance.data.get("families")
|
||||
|
||||
# filter out all unepropriate instances
|
||||
if not instance.data["publish"]:
|
||||
continue
|
||||
if not families:
|
||||
continue
|
||||
# exclude other families then self.families with intersection
|
||||
if not set(self.families).intersection(families):
|
||||
continue
|
||||
|
||||
# exclude if not heroTrack True
|
||||
if not instance.data.get("heroTrack"):
|
||||
continue
|
||||
|
||||
# update families to include `shot` for hierarchy integration
|
||||
instance.data["families"] = families + ["shot"]
|
||||
|
||||
# get asset build data if any available
|
||||
shot_data["inputs"] = [
|
||||
x["_id"] for x in instance.data.get("assetbuilds", [])
|
||||
]
|
||||
|
||||
# suppose that all instances are Shots
|
||||
shot_data['entity_type'] = 'Shot'
|
||||
shot_data['tasks'] = instance.data.get("tasks") or []
|
||||
shot_data["comments"] = instance.data.get("comments", [])
|
||||
|
||||
shot_data['custom_attributes'] = {
|
||||
"handleStart": instance.data["handleStart"],
|
||||
"handleEnd": instance.data["handleEnd"],
|
||||
"frameStart": instance.data["frameStart"],
|
||||
"frameEnd": instance.data["frameEnd"],
|
||||
"clipIn": instance.data["clipIn"],
|
||||
"clipOut": instance.data["clipOut"],
|
||||
'fps': instance.context.data["fps"],
|
||||
"resolutionWidth": instance.data["resolutionWidth"],
|
||||
"resolutionHeight": instance.data["resolutionHeight"],
|
||||
"pixelAspect": instance.data["pixelAspect"]
|
||||
}
|
||||
|
||||
actual = {instance.data["asset"]: shot_data}
|
||||
|
||||
for parent in reversed(instance.data["parents"]):
|
||||
next_dict = {}
|
||||
parent_name = parent["entity_name"]
|
||||
next_dict[parent_name] = {}
|
||||
next_dict[parent_name]["entity_type"] = parent[
|
||||
"entity_type"].capitalize()
|
||||
next_dict[parent_name]["childs"] = actual
|
||||
actual = next_dict
|
||||
|
||||
temp_context = self._update_dict(temp_context, actual)
|
||||
|
||||
# skip if nothing for hierarchy available
|
||||
if not temp_context:
|
||||
return
|
||||
|
||||
final_context[project_name]['childs'] = temp_context
|
||||
|
||||
# adding hierarchy context to context
|
||||
context.data["hierarchyContext"] = final_context
|
||||
self.log.debug("context.data[hierarchyContext] is: {}".format(
|
||||
context.data["hierarchyContext"]))
|
||||
|
||||
def _update_dict(self, parent_dict, child_dict):
|
||||
"""
|
||||
Nesting each children into its parent.
|
||||
|
||||
Args:
|
||||
parent_dict (dict): parent dict wich should be nested with children
|
||||
child_dict (dict): children dict which should be injested
|
||||
"""
|
||||
|
||||
for key in parent_dict:
|
||||
if key in child_dict and isinstance(parent_dict[key], dict):
|
||||
child_dict[key] = self._update_dict(
|
||||
parent_dict[key], child_dict[key]
|
||||
)
|
||||
else:
|
||||
if parent_dict.get(key) and child_dict.get(key):
|
||||
continue
|
||||
else:
|
||||
child_dict[key] = parent_dict[key]
|
||||
|
||||
return child_dict
|
||||
|
|
@ -1,169 +0,0 @@
|
|||
from pyblish import api
|
||||
import os
|
||||
import re
|
||||
import clique
|
||||
|
||||
|
||||
class CollectPlates(api.InstancePlugin):
|
||||
"""Collect plate representations.
|
||||
"""
|
||||
|
||||
# Run just before CollectSubsets
|
||||
order = api.CollectorOrder + 0.1020
|
||||
label = "Collect Plates"
|
||||
hosts = ["hiero"]
|
||||
families = ["plate"]
|
||||
|
||||
def process(self, instance):
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
self.main_clip = instance.data["item"]
|
||||
# get plate source attributes
|
||||
source_media = instance.data["sourceMedia"]
|
||||
source_path = instance.data["sourcePath"]
|
||||
source_first = instance.data["sourceFirst"]
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = instance.data["frameEnd"]
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
source_in = instance.data["sourceIn"]
|
||||
source_out = instance.data["sourceOut"]
|
||||
source_in_h = instance.data["sourceInH"]
|
||||
source_out_h = instance.data["sourceOutH"]
|
||||
|
||||
# define if review media is sequence
|
||||
is_sequence = bool(not source_media.singleFile())
|
||||
self.log.debug("is_sequence: {}".format(is_sequence))
|
||||
|
||||
file_dir = os.path.dirname(source_path)
|
||||
file = os.path.basename(source_path)
|
||||
ext = os.path.splitext(file)[-1]
|
||||
|
||||
# detect if sequence
|
||||
if not is_sequence:
|
||||
# is video file
|
||||
files = file
|
||||
else:
|
||||
files = list()
|
||||
spliter, padding = self.detect_sequence(file)
|
||||
self.log.debug("_ spliter, padding: {}, {}".format(
|
||||
spliter, padding))
|
||||
base_name = file.split(spliter)[0]
|
||||
|
||||
# define collection and calculate frame range
|
||||
collection = clique.Collection(
|
||||
base_name,
|
||||
ext,
|
||||
padding,
|
||||
set(range(
|
||||
int(source_first + source_in_h),
|
||||
int(source_first + source_out_h) + 1
|
||||
))
|
||||
)
|
||||
self.log.debug("_ collection: {}".format(collection))
|
||||
|
||||
real_files = os.listdir(file_dir)
|
||||
self.log.debug("_ real_files: {}".format(real_files))
|
||||
|
||||
# collect frames to repre files list
|
||||
self.handle_start_exclude = list()
|
||||
self.handle_end_exclude = list()
|
||||
for findex, item in enumerate(collection):
|
||||
if item not in real_files:
|
||||
self.log.debug("_ item: {}".format(item))
|
||||
test_index = findex + int(source_first + source_in_h)
|
||||
test_start = int(source_first + source_in)
|
||||
test_end = int(source_first + source_out)
|
||||
if (test_index < test_start):
|
||||
self.handle_start_exclude.append(test_index)
|
||||
elif (test_index > test_end):
|
||||
self.handle_end_exclude.append(test_index)
|
||||
continue
|
||||
files.append(item)
|
||||
|
||||
# change label
|
||||
instance.data["label"] = "{0} - ({1})".format(
|
||||
instance.data["label"], ext
|
||||
)
|
||||
|
||||
self.log.debug("Instance review: {}".format(instance.data["name"]))
|
||||
|
||||
# adding representation for review mov
|
||||
representation = {
|
||||
"files": files,
|
||||
"stagingDir": file_dir,
|
||||
"frameStart": frame_start - handle_start,
|
||||
"frameEnd": frame_end + handle_end,
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:]
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
self.version_data(instance)
|
||||
|
||||
self.log.debug(
|
||||
"Added representations: {}".format(
|
||||
instance.data["representations"]))
|
||||
|
||||
self.log.debug(
|
||||
"instance.data: {}".format(instance.data))
|
||||
|
||||
def version_data(self, instance):
|
||||
transfer_data = [
|
||||
"handleStart", "handleEnd", "sourceIn", "sourceOut",
|
||||
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
|
||||
"clipIn", "clipOut", "clipInH", "clipOutH", "asset",
|
||||
"track"
|
||||
]
|
||||
|
||||
version_data = dict()
|
||||
# pass data to version
|
||||
version_data.update({k: instance.data[k] for k in transfer_data})
|
||||
|
||||
if 'version' in instance.data:
|
||||
version_data["version"] = instance.data["version"]
|
||||
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
if self.handle_start_exclude:
|
||||
handle_start -= len(self.handle_start_exclude)
|
||||
|
||||
if self.handle_end_exclude:
|
||||
handle_end -= len(self.handle_end_exclude)
|
||||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"colorspace": self.main_clip.sourceMediaColourTransform(),
|
||||
"families": instance.data["families"],
|
||||
"subset": instance.data["subset"],
|
||||
"fps": instance.data["fps"],
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
})
|
||||
instance.data["versionData"] = version_data
|
||||
|
||||
def detect_sequence(self, file):
|
||||
""" Get identificating pater for image sequence
|
||||
|
||||
Can find file.0001.ext, file.%02d.ext, file.####.ext
|
||||
|
||||
Return:
|
||||
string: any matching sequence patern
|
||||
int: padding of sequnce numbering
|
||||
"""
|
||||
foundall = re.findall(
|
||||
r"(#+)|(%\d+d)|(?<=[^a-zA-Z0-9])(\d+)(?=\.\w+$)", file)
|
||||
if foundall:
|
||||
found = sorted(list(set(foundall[0])))[-1]
|
||||
|
||||
if "%" in found:
|
||||
padding = int(re.findall(r"\d+", found)[-1])
|
||||
else:
|
||||
padding = len(found)
|
||||
|
||||
return found, padding
|
||||
else:
|
||||
return None, None
|
||||
|
|
@ -1,261 +0,0 @@
|
|||
from pyblish import api
|
||||
import os
|
||||
import clique
|
||||
from openpype.hosts.hiero.api import (
|
||||
is_overlapping, get_sequence_pattern_and_padding)
|
||||
|
||||
|
||||
class CollectReview(api.InstancePlugin):
|
||||
"""Collect review representation.
|
||||
"""
|
||||
|
||||
# Run just before CollectSubsets
|
||||
order = api.CollectorOrder + 0.1022
|
||||
label = "Collect Review"
|
||||
hosts = ["hiero"]
|
||||
families = ["review"]
|
||||
|
||||
def get_review_item(self, instance):
|
||||
"""
|
||||
Get review clip track item from review track name
|
||||
|
||||
Args:
|
||||
instance (obj): publishing instance
|
||||
|
||||
Returns:
|
||||
hiero.core.TrackItem: corresponding track item
|
||||
|
||||
Raises:
|
||||
Exception: description
|
||||
|
||||
"""
|
||||
review_track = instance.data.get("reviewTrack")
|
||||
video_tracks = instance.context.data["videoTracks"]
|
||||
for track in video_tracks:
|
||||
if review_track not in track.name():
|
||||
continue
|
||||
for item in track.items():
|
||||
self.log.debug(item)
|
||||
if is_overlapping(item, self.main_clip):
|
||||
self.log.debug("Winner is: {}".format(item))
|
||||
break
|
||||
|
||||
# validate the clip is fully converted with review clip
|
||||
assert is_overlapping(
|
||||
item, self.main_clip, strict=True), (
|
||||
"Review clip not cowering fully "
|
||||
"the clip `{}`").format(self.main_clip.name())
|
||||
|
||||
return item
|
||||
|
||||
def process(self, instance):
|
||||
tags = ["review", "ftrackreview"]
|
||||
|
||||
# get reviewable item from `review` instance.data attribute
|
||||
self.main_clip = instance.data.get("item")
|
||||
self.rw_clip = self.get_review_item(instance)
|
||||
|
||||
# let user know there is missing review clip and convert instance
|
||||
# back as not reviewable
|
||||
assert self.rw_clip, "Missing reviewable clip for '{}'".format(
|
||||
self.main_clip.name()
|
||||
)
|
||||
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
# get review media main info
|
||||
rw_source = self.rw_clip.source().mediaSource()
|
||||
rw_source_duration = int(rw_source.duration())
|
||||
self.rw_source_path = rw_source.firstpath()
|
||||
rw_source_file_info = rw_source.fileinfos().pop()
|
||||
|
||||
# define if review media is sequence
|
||||
is_sequence = bool(not rw_source.singleFile())
|
||||
self.log.debug("is_sequence: {}".format(is_sequence))
|
||||
|
||||
# get handles
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
# review timeline and source frame ranges
|
||||
rw_clip_in = int(self.rw_clip.timelineIn())
|
||||
rw_clip_out = int(self.rw_clip.timelineOut())
|
||||
self.rw_clip_source_in = int(self.rw_clip.sourceIn())
|
||||
self.rw_clip_source_out = int(self.rw_clip.sourceOut())
|
||||
rw_source_first = int(rw_source_file_info.startFrame())
|
||||
|
||||
# calculate delivery source_in and source_out
|
||||
# main_clip_timeline_in - review_item_timeline_in + 1
|
||||
main_clip_in = self.main_clip.timelineIn()
|
||||
main_clip_out = self.main_clip.timelineOut()
|
||||
|
||||
source_in_diff = main_clip_in - rw_clip_in
|
||||
source_out_diff = main_clip_out - rw_clip_out
|
||||
|
||||
if source_in_diff:
|
||||
self.rw_clip_source_in += source_in_diff
|
||||
if source_out_diff:
|
||||
self.rw_clip_source_out += source_out_diff
|
||||
|
||||
# review clip durations
|
||||
rw_clip_duration = (
|
||||
self.rw_clip_source_out - self.rw_clip_source_in) + 1
|
||||
rw_clip_duration_h = rw_clip_duration + (
|
||||
handle_start + handle_end)
|
||||
|
||||
# add created data to review item data
|
||||
instance.data["reviewItemData"] = {
|
||||
"mediaDuration": rw_source_duration
|
||||
}
|
||||
|
||||
file_dir = os.path.dirname(self.rw_source_path)
|
||||
file = os.path.basename(self.rw_source_path)
|
||||
ext = os.path.splitext(file)[-1]
|
||||
|
||||
# detect if sequence
|
||||
if not is_sequence:
|
||||
# is video file
|
||||
files = file
|
||||
else:
|
||||
files = list()
|
||||
spliter, padding = get_sequence_pattern_and_padding(file)
|
||||
self.log.debug("_ spliter, padding: {}, {}".format(
|
||||
spliter, padding))
|
||||
base_name = file.split(spliter)[0]
|
||||
|
||||
# define collection and calculate frame range
|
||||
collection = clique.Collection(base_name, ext, padding, set(range(
|
||||
int(rw_source_first + int(
|
||||
self.rw_clip_source_in - handle_start)),
|
||||
int(rw_source_first + int(
|
||||
self.rw_clip_source_out + handle_end) + 1))))
|
||||
self.log.debug("_ collection: {}".format(collection))
|
||||
|
||||
real_files = os.listdir(file_dir)
|
||||
self.log.debug("_ real_files: {}".format(real_files))
|
||||
|
||||
# collect frames to repre files list
|
||||
for item in collection:
|
||||
if item not in real_files:
|
||||
self.log.debug("_ item: {}".format(item))
|
||||
continue
|
||||
files.append(item)
|
||||
|
||||
# add prep tag
|
||||
tags.extend(["prep", "delete"])
|
||||
|
||||
# change label
|
||||
instance.data["label"] = "{0} - ({1})".format(
|
||||
instance.data["label"], ext
|
||||
)
|
||||
|
||||
self.log.debug("Instance review: {}".format(instance.data["name"]))
|
||||
|
||||
# adding representation for review mov
|
||||
representation = {
|
||||
"files": files,
|
||||
"stagingDir": file_dir,
|
||||
"frameStart": rw_source_first + self.rw_clip_source_in,
|
||||
"frameEnd": rw_source_first + self.rw_clip_source_out,
|
||||
"frameStartFtrack": int(
|
||||
self.rw_clip_source_in - handle_start),
|
||||
"frameEndFtrack": int(self.rw_clip_source_out + handle_end),
|
||||
"step": 1,
|
||||
"fps": instance.data["fps"],
|
||||
"name": "review",
|
||||
"tags": tags,
|
||||
"ext": ext[1:]
|
||||
}
|
||||
|
||||
if rw_source_duration > rw_clip_duration_h:
|
||||
self.log.debug("Media duration higher: {}".format(
|
||||
(rw_source_duration - rw_clip_duration_h)))
|
||||
representation.update({
|
||||
"frameStart": rw_source_first + int(
|
||||
self.rw_clip_source_in - handle_start),
|
||||
"frameEnd": rw_source_first + int(
|
||||
self.rw_clip_source_out + handle_end),
|
||||
"tags": ["_cut-bigger", "prep", "delete"]
|
||||
})
|
||||
elif rw_source_duration < rw_clip_duration_h:
|
||||
self.log.debug("Media duration higher: {}".format(
|
||||
(rw_source_duration - rw_clip_duration_h)))
|
||||
representation.update({
|
||||
"frameStart": rw_source_first + int(
|
||||
self.rw_clip_source_in - handle_start),
|
||||
"frameEnd": rw_source_first + int(
|
||||
self.rw_clip_source_out + handle_end),
|
||||
"tags": ["prep", "delete"]
|
||||
})
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.create_thumbnail(instance)
|
||||
|
||||
self.log.debug(
|
||||
"Added representations: {}".format(
|
||||
instance.data["representations"]))
|
||||
|
||||
def create_thumbnail(self, instance):
|
||||
source_file = os.path.basename(self.rw_source_path)
|
||||
spliter, padding = get_sequence_pattern_and_padding(source_file)
|
||||
|
||||
if spliter:
|
||||
head, ext = source_file.split(spliter)
|
||||
else:
|
||||
head, ext = os.path.splitext(source_file)
|
||||
|
||||
# staging dir creation
|
||||
staging_dir = os.path.dirname(
|
||||
self.rw_source_path)
|
||||
|
||||
# get thumbnail frame from the middle
|
||||
thumb_frame = int(self.rw_clip_source_in + (
|
||||
(self.rw_clip_source_out - self.rw_clip_source_in) / 2))
|
||||
|
||||
thumb_file = "{}thumbnail{}{}".format(head, thumb_frame, ".png")
|
||||
thumb_path = os.path.join(staging_dir, thumb_file)
|
||||
|
||||
thumbnail = self.rw_clip.thumbnail(thumb_frame).save(
|
||||
thumb_path,
|
||||
format='png'
|
||||
)
|
||||
self.log.debug(
|
||||
"__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame))
|
||||
|
||||
self.log.debug("__ thumbnail: {}".format(thumbnail))
|
||||
thumb_representation = {
|
||||
'files': thumb_file,
|
||||
'stagingDir': staging_dir,
|
||||
'name': "thumbnail",
|
||||
'thumbnail': True,
|
||||
'ext': "png"
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
thumb_representation)
|
||||
|
||||
def version_data(self, instance):
|
||||
transfer_data = [
|
||||
"handleStart", "handleEnd", "sourceIn", "sourceOut",
|
||||
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
|
||||
"clipIn", "clipOut", "clipInH", "clipOutH", "asset",
|
||||
"track"
|
||||
]
|
||||
|
||||
version_data = dict()
|
||||
# pass data to version
|
||||
version_data.update({k: instance.data[k] for k in transfer_data})
|
||||
|
||||
if 'version' in instance.data:
|
||||
version_data["version"] = instance.data["version"]
|
||||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"colorspace": self.rw_clip.sourceMediaColourTransform(),
|
||||
"families": instance.data["families"],
|
||||
"subset": instance.data["subset"],
|
||||
"fps": instance.data["fps"]
|
||||
})
|
||||
instance.data["versionData"] = version_data
|
||||
|
|
@ -1,57 +0,0 @@
|
|||
import os
|
||||
from hiero.exporters.FnExportUtil import writeSequenceAudioWithHandles
|
||||
import pyblish
|
||||
import openpype
|
||||
|
||||
|
||||
class ExtractAudioFile(openpype.api.Extractor):
|
||||
"""Extracts audio subset file from all active timeline audio tracks"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Subset Audio"
|
||||
hosts = ["hiero"]
|
||||
families = ["audio"]
|
||||
match = pyblish.api.Intersection
|
||||
|
||||
def process(self, instance):
|
||||
# get sequence
|
||||
sequence = instance.context.data["activeSequence"]
|
||||
subset = instance.data["subset"]
|
||||
|
||||
# get timeline in / out
|
||||
clip_in = instance.data["clipIn"]
|
||||
clip_out = instance.data["clipOut"]
|
||||
# get handles from context
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
self.log.info("Created staging dir: {}...".format(staging_dir))
|
||||
|
||||
# path to wav file
|
||||
audio_file = os.path.join(
|
||||
staging_dir, "{}.wav".format(subset)
|
||||
)
|
||||
|
||||
# export audio to disk
|
||||
writeSequenceAudioWithHandles(
|
||||
audio_file,
|
||||
sequence,
|
||||
clip_in,
|
||||
clip_out,
|
||||
handle_start,
|
||||
handle_end
|
||||
)
|
||||
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
representation = {
|
||||
'files': os.path.basename(audio_file),
|
||||
'stagingDir': staging_dir,
|
||||
'name': "wav",
|
||||
'ext': "wav"
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
|
@ -1,334 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
import six
|
||||
import errno
|
||||
from pyblish import api
|
||||
import openpype
|
||||
import clique
|
||||
from avalon.vendor import filelink
|
||||
|
||||
|
||||
class ExtractReviewPreparation(openpype.api.Extractor):
|
||||
"""Cut up clips from long video file"""
|
||||
|
||||
order = api.ExtractorOrder
|
||||
label = "Extract Review Preparation"
|
||||
hosts = ["hiero"]
|
||||
families = ["review"]
|
||||
|
||||
# presets
|
||||
tags_addition = []
|
||||
|
||||
def process(self, instance):
|
||||
inst_data = instance.data
|
||||
asset = inst_data["asset"]
|
||||
review_item_data = instance.data.get("reviewItemData")
|
||||
|
||||
# get representation and loop them
|
||||
representations = inst_data["representations"]
|
||||
|
||||
# get resolution default
|
||||
resolution_width = inst_data["resolutionWidth"]
|
||||
resolution_height = inst_data["resolutionHeight"]
|
||||
|
||||
# frame range data
|
||||
media_duration = review_item_data["mediaDuration"]
|
||||
|
||||
ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
ffprobe_path = openpype.lib.get_ffmpeg_tool_path("ffprobe")
|
||||
|
||||
# filter out mov and img sequences
|
||||
representations_new = representations[:]
|
||||
for repre in representations:
|
||||
input_args = list()
|
||||
output_args = list()
|
||||
|
||||
tags = repre.get("tags", [])
|
||||
|
||||
# check if supported tags are in representation for activation
|
||||
filter_tag = False
|
||||
for tag in ["_cut-bigger", "prep"]:
|
||||
if tag in tags:
|
||||
filter_tag = True
|
||||
break
|
||||
if not filter_tag:
|
||||
continue
|
||||
|
||||
self.log.debug("__ repre: {}".format(repre))
|
||||
|
||||
files = repre.get("files")
|
||||
staging_dir = repre.get("stagingDir")
|
||||
fps = repre.get("fps")
|
||||
ext = repre.get("ext")
|
||||
|
||||
# make paths
|
||||
full_output_dir = os.path.join(
|
||||
staging_dir, "cuts")
|
||||
|
||||
if isinstance(files, list):
|
||||
new_files = list()
|
||||
|
||||
# frame range delivery included handles
|
||||
frame_start = (
|
||||
inst_data["frameStart"] - inst_data["handleStart"])
|
||||
frame_end = (
|
||||
inst_data["frameEnd"] + inst_data["handleEnd"])
|
||||
self.log.debug("_ frame_start: {}".format(frame_start))
|
||||
self.log.debug("_ frame_end: {}".format(frame_end))
|
||||
|
||||
# make collection from input files list
|
||||
collections, remainder = clique.assemble(files)
|
||||
collection = collections.pop()
|
||||
self.log.debug("_ collection: {}".format(collection))
|
||||
|
||||
# name components
|
||||
head = collection.format("{head}")
|
||||
padding = collection.format("{padding}")
|
||||
tail = collection.format("{tail}")
|
||||
self.log.debug("_ head: {}".format(head))
|
||||
self.log.debug("_ padding: {}".format(padding))
|
||||
self.log.debug("_ tail: {}".format(tail))
|
||||
|
||||
# make destination file with instance data
|
||||
# frame start and end range
|
||||
index = 0
|
||||
for image in collection:
|
||||
dst_file_num = frame_start + index
|
||||
dst_file_name = head + str(padding % dst_file_num) + tail
|
||||
src = os.path.join(staging_dir, image)
|
||||
dst = os.path.join(full_output_dir, dst_file_name)
|
||||
self.log.info("Creating temp hardlinks: {}".format(dst))
|
||||
self.hardlink_file(src, dst)
|
||||
new_files.append(dst_file_name)
|
||||
index += 1
|
||||
|
||||
self.log.debug("_ new_files: {}".format(new_files))
|
||||
|
||||
else:
|
||||
# ffmpeg when single file
|
||||
new_files = "{}_{}".format(asset, files)
|
||||
|
||||
# frame range
|
||||
frame_start = repre.get("frameStart")
|
||||
frame_end = repre.get("frameEnd")
|
||||
|
||||
full_input_path = os.path.join(
|
||||
staging_dir, files)
|
||||
|
||||
os.path.isdir(full_output_dir) or os.makedirs(full_output_dir)
|
||||
|
||||
full_output_path = os.path.join(
|
||||
full_output_dir, new_files)
|
||||
|
||||
self.log.debug(
|
||||
"__ full_input_path: {}".format(full_input_path))
|
||||
self.log.debug(
|
||||
"__ full_output_path: {}".format(full_output_path))
|
||||
|
||||
# check if audio stream is in input video file
|
||||
ffprob_cmd = (
|
||||
"\"{ffprobe_path}\" -i \"{full_input_path}\" -show_streams"
|
||||
" -select_streams a -loglevel error"
|
||||
).format(**locals())
|
||||
|
||||
self.log.debug("ffprob_cmd: {}".format(ffprob_cmd))
|
||||
audio_check_output = openpype.api.run_subprocess(ffprob_cmd)
|
||||
self.log.debug(
|
||||
"audio_check_output: {}".format(audio_check_output))
|
||||
|
||||
# Fix one frame difference
|
||||
""" TODO: this is just work-around for issue:
|
||||
https://github.com/pypeclub/pype/issues/659
|
||||
"""
|
||||
frame_duration_extend = 1
|
||||
if audio_check_output and ("audio" in inst_data["families"]):
|
||||
frame_duration_extend = 0
|
||||
|
||||
# translate frame to sec
|
||||
start_sec = float(frame_start) / fps
|
||||
duration_sec = float(
|
||||
(frame_end - frame_start) + frame_duration_extend) / fps
|
||||
|
||||
empty_add = None
|
||||
|
||||
# check if not missing frames at start
|
||||
if (start_sec < 0) or (media_duration < frame_end):
|
||||
# for later swithing off `-c:v copy` output arg
|
||||
empty_add = True
|
||||
|
||||
# init empty variables
|
||||
video_empty_start = video_layer_start = ""
|
||||
audio_empty_start = audio_layer_start = ""
|
||||
video_empty_end = video_layer_end = ""
|
||||
audio_empty_end = audio_layer_end = ""
|
||||
audio_input = audio_output = ""
|
||||
v_inp_idx = 0
|
||||
concat_n = 1
|
||||
|
||||
# try to get video native resolution data
|
||||
try:
|
||||
resolution_output = openpype.api.run_subprocess((
|
||||
"\"{ffprobe_path}\" -i \"{full_input_path}\""
|
||||
" -v error "
|
||||
"-select_streams v:0 -show_entries "
|
||||
"stream=width,height -of csv=s=x:p=0"
|
||||
).format(**locals()))
|
||||
|
||||
x, y = resolution_output.split("x")
|
||||
resolution_width = int(x)
|
||||
resolution_height = int(y)
|
||||
except Exception as _ex:
|
||||
self.log.warning(
|
||||
"Video native resolution is untracable: {}".format(
|
||||
_ex))
|
||||
|
||||
if audio_check_output:
|
||||
# adding input for empty audio
|
||||
input_args.append("-f lavfi -i anullsrc")
|
||||
|
||||
# define audio empty concat variables
|
||||
audio_input = "[1:a]"
|
||||
audio_output = ":a=1"
|
||||
v_inp_idx = 1
|
||||
|
||||
# adding input for video black frame
|
||||
input_args.append((
|
||||
"-f lavfi -i \"color=c=black:"
|
||||
"s={resolution_width}x{resolution_height}:r={fps}\""
|
||||
).format(**locals()))
|
||||
|
||||
if (start_sec < 0):
|
||||
# recalculate input video timing
|
||||
empty_start_dur = abs(start_sec)
|
||||
start_sec = 0
|
||||
duration_sec = float(frame_end - (
|
||||
frame_start + (empty_start_dur * fps)) + 1) / fps
|
||||
|
||||
# define starting empty video concat variables
|
||||
video_empty_start = (
|
||||
"[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];" # noqa
|
||||
).format(**locals())
|
||||
video_layer_start = "[gv0]"
|
||||
|
||||
if audio_check_output:
|
||||
# define starting empty audio concat variables
|
||||
audio_empty_start = (
|
||||
"[0]atrim=duration={empty_start_dur}[ga0];"
|
||||
).format(**locals())
|
||||
audio_layer_start = "[ga0]"
|
||||
|
||||
# alter concat number of clips
|
||||
concat_n += 1
|
||||
|
||||
# check if not missing frames at the end
|
||||
if (media_duration < frame_end):
|
||||
# recalculate timing
|
||||
empty_end_dur = float(
|
||||
frame_end - media_duration + 1) / fps
|
||||
duration_sec = float(
|
||||
media_duration - frame_start) / fps
|
||||
|
||||
# define ending empty video concat variables
|
||||
video_empty_end = (
|
||||
"[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];"
|
||||
).format(**locals())
|
||||
video_layer_end = "[gv1]"
|
||||
|
||||
if audio_check_output:
|
||||
# define ending empty audio concat variables
|
||||
audio_empty_end = (
|
||||
"[0]atrim=duration={empty_end_dur}[ga1];"
|
||||
).format(**locals())
|
||||
audio_layer_end = "[ga0]"
|
||||
|
||||
# alter concat number of clips
|
||||
concat_n += 1
|
||||
|
||||
# concatting black frame togather
|
||||
output_args.append((
|
||||
"-filter_complex \""
|
||||
"{audio_empty_start}"
|
||||
"{video_empty_start}"
|
||||
"{audio_empty_end}"
|
||||
"{video_empty_end}"
|
||||
"{video_layer_start}{audio_layer_start}[1:v]{audio_input}" # noqa
|
||||
"{video_layer_end}{audio_layer_end}"
|
||||
"concat=n={concat_n}:v=1{audio_output}\""
|
||||
).format(**locals()))
|
||||
|
||||
# append ffmpeg input video clip
|
||||
input_args.append("-ss {}".format(start_sec))
|
||||
input_args.append("-t {}".format(duration_sec))
|
||||
input_args.append("-i \"{}\"".format(full_input_path))
|
||||
|
||||
# add copy audio video codec if only shortening clip
|
||||
if ("_cut-bigger" in tags) and (not empty_add):
|
||||
output_args.append("-c:v copy")
|
||||
|
||||
# make sure it is having no frame to frame comprassion
|
||||
output_args.append("-intra")
|
||||
|
||||
# output filename
|
||||
output_args.append("-y \"{}\"".format(full_output_path))
|
||||
|
||||
mov_args = [
|
||||
"\"{}\"".format(ffmpeg_path),
|
||||
" ".join(input_args),
|
||||
" ".join(output_args)
|
||||
]
|
||||
subprcs_cmd = " ".join(mov_args)
|
||||
|
||||
# run subprocess
|
||||
self.log.debug("Executing: {}".format(subprcs_cmd))
|
||||
output = openpype.api.run_subprocess(subprcs_cmd)
|
||||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
repre_new = {
|
||||
"files": new_files,
|
||||
"stagingDir": full_output_dir,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"frameStartFtrack": frame_start,
|
||||
"frameEndFtrack": frame_end,
|
||||
"step": 1,
|
||||
"fps": fps,
|
||||
"name": "cut_up_preview",
|
||||
"tags": [
|
||||
"review", "ftrackreview", "delete"] + self.tags_addition,
|
||||
"ext": ext,
|
||||
"anatomy_template": "publish"
|
||||
}
|
||||
|
||||
representations_new.append(repre_new)
|
||||
|
||||
for repre in representations_new:
|
||||
if ("delete" in repre.get("tags", [])) and (
|
||||
"cut_up_preview" not in repre["name"]):
|
||||
representations_new.remove(repre)
|
||||
|
||||
self.log.debug(
|
||||
"Representations: {}".format(representations_new))
|
||||
instance.data["representations"] = representations_new
|
||||
|
||||
def hardlink_file(self, src, dst):
|
||||
dirname = os.path.dirname(dst)
|
||||
|
||||
# make sure the destination folder exist
|
||||
try:
|
||||
os.makedirs(dirname)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
self.log.critical("An unexpected error occurred.")
|
||||
six.reraise(*sys.exc_info())
|
||||
|
||||
# create hardlined file
|
||||
try:
|
||||
filelink.create(src, dst, filelink.HARDLINK)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
self.log.critical("An unexpected error occurred.")
|
||||
six.reraise(*sys.exc_info())
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
import pyblish
|
||||
from openpype.hosts.hiero.api import is_overlapping
|
||||
|
||||
|
||||
class ValidateAudioFile(pyblish.api.InstancePlugin):
|
||||
"""Validate audio subset has avilable audio track clips"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Audio Tracks"
|
||||
hosts = ["hiero"]
|
||||
families = ["audio"]
|
||||
|
||||
def process(self, instance):
|
||||
clip = instance.data["item"]
|
||||
audio_tracks = instance.context.data["audioTracks"]
|
||||
audio_clip = None
|
||||
|
||||
for a_track in audio_tracks:
|
||||
for item in a_track.items():
|
||||
if is_overlapping(item, clip):
|
||||
audio_clip = item
|
||||
|
||||
assert audio_clip, "Missing relative audio clip for clip {}".format(
|
||||
clip.name()
|
||||
)
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class ValidateHierarchy(api.InstancePlugin):
|
||||
"""Validate clip's hierarchy data.
|
||||
|
||||
"""
|
||||
|
||||
order = api.ValidatorOrder
|
||||
families = ["clip", "shot"]
|
||||
label = "Validate Hierarchy"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, instance):
|
||||
asset_name = instance.data.get("asset", None)
|
||||
hierarchy = instance.data.get("hierarchy", None)
|
||||
parents = instance.data.get("parents", None)
|
||||
|
||||
assert hierarchy, "Hierarchy Tag has to be set \
|
||||
and added to clip `{}`".format(asset_name)
|
||||
assert parents, "Parents build from Hierarchy Tag has \
|
||||
to be set and added to clip `{}`".format(asset_name)
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class ValidateNames(api.InstancePlugin):
|
||||
"""Validate sequence, video track and track item names.
|
||||
|
||||
When creating output directories with the name of an item, ending with a
|
||||
whitespace will fail the extraction.
|
||||
Exact matching to optimize processing.
|
||||
"""
|
||||
|
||||
order = api.ValidatorOrder
|
||||
families = ["clip"]
|
||||
match = api.Exact
|
||||
label = "Names"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
item = instance.data["item"]
|
||||
|
||||
msg = "Track item \"{0}\" ends with a whitespace."
|
||||
assert not item.name().endswith(" "), msg.format(item.name())
|
||||
|
||||
msg = "Video track \"{0}\" ends with a whitespace."
|
||||
msg = msg.format(item.parent().name())
|
||||
assert not item.parent().name().endswith(" "), msg
|
||||
|
||||
msg = "Sequence \"{0}\" ends with a whitespace."
|
||||
msg = msg.format(item.parent().parent().name())
|
||||
assert not item.parent().parent().name().endswith(" "), msg
|
||||
Loading…
Add table
Add a link
Reference in a new issue