mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Hiero: Solving Merge conflicts
This commit is contained in:
parent
e8d00b5f3c
commit
40fe1ba76a
12 changed files with 31 additions and 1045 deletions
|
|
@ -4,10 +4,10 @@ import hiero
|
|||
from Qt import QtWidgets, QtCore
|
||||
from avalon.vendor import qargparse
|
||||
import avalon.api as avalon
|
||||
import pype.api as pype
|
||||
import openpype.api as openpype
|
||||
from . import lib
|
||||
|
||||
log = pype.Logger().get_logger(__name__)
|
||||
log = openpype.Logger().get_logger(__name__)
|
||||
|
||||
|
||||
def load_stylesheet():
|
||||
|
|
@ -477,7 +477,7 @@ class ClipLoader:
|
|||
|
||||
"""
|
||||
asset_name = self.context["representation"]["context"]["asset"]
|
||||
self.data["assetData"] = pype.get_asset(asset_name)["data"]
|
||||
self.data["assetData"] = openpype.get_asset(asset_name)["data"]
|
||||
|
||||
def _make_track_item(self, source_bin_item, audio=False):
|
||||
""" Create track item with """
|
||||
|
|
@ -593,16 +593,16 @@ class ClipLoader:
|
|||
return track_item
|
||||
|
||||
|
||||
class Creator(pype.Creator):
|
||||
class Creator(openpype.Creator):
|
||||
"""Creator class wrapper
|
||||
"""
|
||||
clip_color = "Purple"
|
||||
rename_index = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
import pype.hosts.hiero.api as phiero
|
||||
import openpype.hosts.hiero.api as phiero
|
||||
super(Creator, self).__init__(*args, **kwargs)
|
||||
self.presets = pype.get_current_project_settings()[
|
||||
self.presets = openpype.get_current_project_settings()[
|
||||
"hiero"]["create"].get(self.__class__.__name__, {})
|
||||
|
||||
# adding basic current context resolve objects
|
||||
|
|
@ -774,8 +774,8 @@ class PublishClip:
|
|||
_spl = text.split("#")
|
||||
_len = (len(_spl) - 1)
|
||||
_repl = "{{{0}:0>{1}}}".format(name, _len)
|
||||
new_text = text.replace(("#" * _len), _repl)
|
||||
return new_text
|
||||
return text.replace(("#" * _len), _repl)
|
||||
|
||||
|
||||
def _convert_to_tag_data(self):
|
||||
""" Convert internal data to tag data.
|
||||
|
|
@ -120,9 +120,9 @@ class CreateShotClip(phiero.Creator):
|
|||
"vSyncTrack": {
|
||||
"value": gui_tracks, # noqa
|
||||
"type": "QComboBox",
|
||||
"label": "Master track",
|
||||
"label": "Hero track",
|
||||
"target": "ui",
|
||||
"toolTip": "Select driving track name which should be mastering all others", # noqa
|
||||
"toolTip": "Select driving track name which should be hero for all others", # noqa
|
||||
"order": 1}
|
||||
}
|
||||
},
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ class CollectReview(api.InstancePlugin):
|
|||
Exception: description
|
||||
|
||||
"""
|
||||
review_track = instance.data.get("review")
|
||||
review_track = instance.data.get("reviewTrack")
|
||||
video_tracks = instance.context.data["videoTracks"]
|
||||
for track in video_tracks:
|
||||
if review_track not in track.name():
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ class ExtractReviewPreparation(openpype.api.Extractor):
|
|||
).format(**locals())
|
||||
|
||||
self.log.debug("ffprob_cmd: {}".format(ffprob_cmd))
|
||||
audio_check_output = openpype.api.subprocess(ffprob_cmd)
|
||||
audio_check_output = openpype.api.run_subprocess(ffprob_cmd)
|
||||
self.log.debug(
|
||||
"audio_check_output: {}".format(audio_check_output))
|
||||
|
||||
|
|
@ -167,7 +167,7 @@ class ExtractReviewPreparation(openpype.api.Extractor):
|
|||
|
||||
# try to get video native resolution data
|
||||
try:
|
||||
resolution_output = openpype.api.subprocess((
|
||||
resolution_output = openpype.api.run_subprocess((
|
||||
"\"{ffprobe_path}\" -i \"{full_input_path}\""
|
||||
" -v error "
|
||||
"-select_streams v:0 -show_entries "
|
||||
|
|
@ -280,7 +280,7 @@ class ExtractReviewPreparation(openpype.api.Extractor):
|
|||
|
||||
# run subprocess
|
||||
self.log.debug("Executing: {}".format(subprcs_cmd))
|
||||
output = openpype.api.subprocess(subprcs_cmd)
|
||||
output = openpype.api.run_subprocess(subprcs_cmd)
|
||||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
repre_new = {
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ class PreCollectInstances(api.ContextPlugin):
|
|||
label = "Pre-collect Instances"
|
||||
hosts = ["hiero"]
|
||||
|
||||
|
||||
def process(self, context):
|
||||
track_items = phiero.get_track_items(
|
||||
selected=True, check_tagged=True, check_enabled=True)
|
||||
|
|
@ -34,7 +35,7 @@ class PreCollectInstances(api.ContextPlugin):
|
|||
"Processing enabled track items: {}".format(len(track_items)))
|
||||
|
||||
for _ti in track_items:
|
||||
data = dict()
|
||||
data = {}
|
||||
clip = _ti.source()
|
||||
|
||||
# get clips subtracks and anotations
|
||||
|
|
@ -60,7 +61,8 @@ class PreCollectInstances(api.ContextPlugin):
|
|||
|
||||
asset = tag_parsed_data["asset"]
|
||||
subset = tag_parsed_data["subset"]
|
||||
review = tag_parsed_data.get("review")
|
||||
review_track = tag_parsed_data.get("reviewTrack")
|
||||
hiero_track = tag_parsed_data.get("heroTrack")
|
||||
audio = tag_parsed_data.get("audio")
|
||||
|
||||
# remove audio attribute from data
|
||||
|
|
@ -78,8 +80,8 @@ class PreCollectInstances(api.ContextPlugin):
|
|||
file_info = media_source.fileinfos().pop()
|
||||
source_first_frame = int(file_info.startFrame())
|
||||
|
||||
# apply only for feview and master track instance
|
||||
if review:
|
||||
# apply only for review and master track instance
|
||||
if review_track and hiero_track:
|
||||
families += ["review", "ftrack"]
|
||||
|
||||
data.update({
|
||||
|
|
@ -94,6 +96,7 @@ class PreCollectInstances(api.ContextPlugin):
|
|||
# track item attributes
|
||||
"track": track.name(),
|
||||
"trackItem": track,
|
||||
"reviewTrack": review_track,
|
||||
|
||||
# version data
|
||||
"versionData": {
|
||||
|
|
@ -113,7 +116,7 @@ class PreCollectInstances(api.ContextPlugin):
|
|||
|
||||
instance = context.create_instance(**data)
|
||||
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.info("Creating instance.data: {}".format(instance.data))
|
||||
|
||||
if audio:
|
||||
a_data = dict()
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ import pyblish.api
|
|||
from avalon import io
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
||||
"""Create entities in Avalon based on collected data."""
|
||||
|
||||
|
|
@ -100,13 +99,20 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
if entity:
|
||||
# Do not override data, only update
|
||||
cur_entity_data = entity.get("data") or {}
|
||||
entity_tasks = cur_entity_data["tasks"] or {}
|
||||
|
||||
# create tasks as dict by default
|
||||
if not entity_tasks:
|
||||
cur_entity_data["tasks"] = entity_tasks
|
||||
|
||||
new_tasks = data.pop("tasks", {})
|
||||
if "tasks" not in cur_entity_data and not new_tasks:
|
||||
continue
|
||||
for task_name in new_tasks:
|
||||
if task_name in cur_entity_data["tasks"].keys():
|
||||
if task_name in entity_tasks.keys():
|
||||
continue
|
||||
cur_entity_data["tasks"][task_name] = new_tasks[task_name]
|
||||
cur_entity_data["tasks"][task_name] = new_tasks[
|
||||
task_name]
|
||||
cur_entity_data.update(data)
|
||||
data = cur_entity_data
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -1,261 +0,0 @@
|
|||
from pyblish import api
|
||||
import os
|
||||
import clique
|
||||
from pype.hosts.hiero.api import (
|
||||
is_overlapping, get_sequence_pattern_and_padding)
|
||||
|
||||
|
||||
class CollectReview(api.InstancePlugin):
|
||||
"""Collect review representation.
|
||||
"""
|
||||
|
||||
# Run just before CollectSubsets
|
||||
order = api.CollectorOrder + 0.1022
|
||||
label = "Collect Review"
|
||||
hosts = ["hiero"]
|
||||
families = ["review"]
|
||||
|
||||
def get_review_item(self, instance):
|
||||
"""
|
||||
Get review clip track item from review track name
|
||||
|
||||
Args:
|
||||
instance (obj): publishing instance
|
||||
|
||||
Returns:
|
||||
hiero.core.TrackItem: corresponding track item
|
||||
|
||||
Raises:
|
||||
Exception: description
|
||||
|
||||
"""
|
||||
review_track = instance.data.get("reviewTrack")
|
||||
video_tracks = instance.context.data["videoTracks"]
|
||||
for track in video_tracks:
|
||||
if review_track not in track.name():
|
||||
continue
|
||||
for item in track.items():
|
||||
self.log.debug(item)
|
||||
if is_overlapping(item, self.main_clip):
|
||||
self.log.debug("Winner is: {}".format(item))
|
||||
break
|
||||
|
||||
# validate the clip is fully converted with review clip
|
||||
assert is_overlapping(
|
||||
item, self.main_clip, strict=True), (
|
||||
"Review clip not cowering fully "
|
||||
"the clip `{}`").format(self.main_clip.name())
|
||||
|
||||
return item
|
||||
|
||||
def process(self, instance):
|
||||
tags = ["review", "ftrackreview"]
|
||||
|
||||
# get reviewable item from `review` instance.data attribute
|
||||
self.main_clip = instance.data.get("item")
|
||||
self.rw_clip = self.get_review_item(instance)
|
||||
|
||||
# let user know there is missing review clip and convert instance
|
||||
# back as not reviewable
|
||||
assert self.rw_clip, "Missing reviewable clip for '{}'".format(
|
||||
self.main_clip.name()
|
||||
)
|
||||
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
# get review media main info
|
||||
rw_source = self.rw_clip.source().mediaSource()
|
||||
rw_source_duration = int(rw_source.duration())
|
||||
self.rw_source_path = rw_source.firstpath()
|
||||
rw_source_file_info = rw_source.fileinfos().pop()
|
||||
|
||||
# define if review media is sequence
|
||||
is_sequence = bool(not rw_source.singleFile())
|
||||
self.log.debug("is_sequence: {}".format(is_sequence))
|
||||
|
||||
# get handles
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
# review timeline and source frame ranges
|
||||
rw_clip_in = int(self.rw_clip.timelineIn())
|
||||
rw_clip_out = int(self.rw_clip.timelineOut())
|
||||
self.rw_clip_source_in = int(self.rw_clip.sourceIn())
|
||||
self.rw_clip_source_out = int(self.rw_clip.sourceOut())
|
||||
rw_source_first = int(rw_source_file_info.startFrame())
|
||||
|
||||
# calculate delivery source_in and source_out
|
||||
# main_clip_timeline_in - review_item_timeline_in + 1
|
||||
main_clip_in = self.main_clip.timelineIn()
|
||||
main_clip_out = self.main_clip.timelineOut()
|
||||
|
||||
source_in_diff = main_clip_in - rw_clip_in
|
||||
source_out_diff = main_clip_out - rw_clip_out
|
||||
|
||||
if source_in_diff:
|
||||
self.rw_clip_source_in += source_in_diff
|
||||
if source_out_diff:
|
||||
self.rw_clip_source_out += source_out_diff
|
||||
|
||||
# review clip durations
|
||||
rw_clip_duration = (
|
||||
self.rw_clip_source_out - self.rw_clip_source_in) + 1
|
||||
rw_clip_duration_h = rw_clip_duration + (
|
||||
handle_start + handle_end)
|
||||
|
||||
# add created data to review item data
|
||||
instance.data["reviewItemData"] = {
|
||||
"mediaDuration": rw_source_duration
|
||||
}
|
||||
|
||||
file_dir = os.path.dirname(self.rw_source_path)
|
||||
file = os.path.basename(self.rw_source_path)
|
||||
ext = os.path.splitext(file)[-1]
|
||||
|
||||
# detect if sequence
|
||||
if not is_sequence:
|
||||
# is video file
|
||||
files = file
|
||||
else:
|
||||
files = list()
|
||||
spliter, padding = get_sequence_pattern_and_padding(file)
|
||||
self.log.debug("_ spliter, padding: {}, {}".format(
|
||||
spliter, padding))
|
||||
base_name = file.split(spliter)[0]
|
||||
|
||||
# define collection and calculate frame range
|
||||
collection = clique.Collection(base_name, ext, padding, set(range(
|
||||
int(rw_source_first + int(
|
||||
self.rw_clip_source_in - handle_start)),
|
||||
int(rw_source_first + int(
|
||||
self.rw_clip_source_out + handle_end) + 1))))
|
||||
self.log.debug("_ collection: {}".format(collection))
|
||||
|
||||
real_files = os.listdir(file_dir)
|
||||
self.log.debug("_ real_files: {}".format(real_files))
|
||||
|
||||
# collect frames to repre files list
|
||||
for item in collection:
|
||||
if item not in real_files:
|
||||
self.log.debug("_ item: {}".format(item))
|
||||
continue
|
||||
files.append(item)
|
||||
|
||||
# add prep tag
|
||||
tags.extend(["prep", "delete"])
|
||||
|
||||
# change label
|
||||
instance.data["label"] = "{0} - ({1})".format(
|
||||
instance.data["label"], ext
|
||||
)
|
||||
|
||||
self.log.debug("Instance review: {}".format(instance.data["name"]))
|
||||
|
||||
# adding representation for review mov
|
||||
representation = {
|
||||
"files": files,
|
||||
"stagingDir": file_dir,
|
||||
"frameStart": rw_source_first + self.rw_clip_source_in,
|
||||
"frameEnd": rw_source_first + self.rw_clip_source_out,
|
||||
"frameStartFtrack": int(
|
||||
self.rw_clip_source_in - handle_start),
|
||||
"frameEndFtrack": int(self.rw_clip_source_out + handle_end),
|
||||
"step": 1,
|
||||
"fps": instance.data["fps"],
|
||||
"name": "review",
|
||||
"tags": tags,
|
||||
"ext": ext[1:]
|
||||
}
|
||||
|
||||
if rw_source_duration > rw_clip_duration_h:
|
||||
self.log.debug("Media duration higher: {}".format(
|
||||
(rw_source_duration - rw_clip_duration_h)))
|
||||
representation.update({
|
||||
"frameStart": rw_source_first + int(
|
||||
self.rw_clip_source_in - handle_start),
|
||||
"frameEnd": rw_source_first + int(
|
||||
self.rw_clip_source_out + handle_end),
|
||||
"tags": ["_cut-bigger", "prep", "delete"]
|
||||
})
|
||||
elif rw_source_duration < rw_clip_duration_h:
|
||||
self.log.debug("Media duration higher: {}".format(
|
||||
(rw_source_duration - rw_clip_duration_h)))
|
||||
representation.update({
|
||||
"frameStart": rw_source_first + int(
|
||||
self.rw_clip_source_in - handle_start),
|
||||
"frameEnd": rw_source_first + int(
|
||||
self.rw_clip_source_out + handle_end),
|
||||
"tags": ["prep", "delete"]
|
||||
})
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.create_thumbnail(instance)
|
||||
|
||||
self.log.debug(
|
||||
"Added representations: {}".format(
|
||||
instance.data["representations"]))
|
||||
|
||||
def create_thumbnail(self, instance):
|
||||
source_file = os.path.basename(self.rw_source_path)
|
||||
spliter, padding = get_sequence_pattern_and_padding(source_file)
|
||||
|
||||
if spliter:
|
||||
head, ext = source_file.split(spliter)
|
||||
else:
|
||||
head, ext = os.path.splitext(source_file)
|
||||
|
||||
# staging dir creation
|
||||
staging_dir = os.path.dirname(
|
||||
self.rw_source_path)
|
||||
|
||||
# get thumbnail frame from the middle
|
||||
thumb_frame = int(self.rw_clip_source_in + (
|
||||
(self.rw_clip_source_out - self.rw_clip_source_in) / 2))
|
||||
|
||||
thumb_file = "{}thumbnail{}{}".format(head, thumb_frame, ".png")
|
||||
thumb_path = os.path.join(staging_dir, thumb_file)
|
||||
|
||||
thumbnail = self.rw_clip.thumbnail(thumb_frame).save(
|
||||
thumb_path,
|
||||
format='png'
|
||||
)
|
||||
self.log.debug(
|
||||
"__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame))
|
||||
|
||||
self.log.debug("__ thumbnail: {}".format(thumbnail))
|
||||
thumb_representation = {
|
||||
'files': thumb_file,
|
||||
'stagingDir': staging_dir,
|
||||
'name': "thumbnail",
|
||||
'thumbnail': True,
|
||||
'ext': "png"
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
thumb_representation)
|
||||
|
||||
def version_data(self, instance):
|
||||
transfer_data = [
|
||||
"handleStart", "handleEnd", "sourceIn", "sourceOut",
|
||||
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
|
||||
"clipIn", "clipOut", "clipInH", "clipOutH", "asset",
|
||||
"track"
|
||||
]
|
||||
|
||||
version_data = dict()
|
||||
# pass data to version
|
||||
version_data.update({k: instance.data[k] for k in transfer_data})
|
||||
|
||||
if 'version' in instance.data:
|
||||
version_data["version"] = instance.data["version"]
|
||||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"colorspace": self.rw_clip.sourceMediaColourTransform(),
|
||||
"families": instance.data["families"],
|
||||
"subset": instance.data["subset"],
|
||||
"fps": instance.data["fps"]
|
||||
})
|
||||
instance.data["versionData"] = version_data
|
||||
|
|
@ -1,334 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
import six
|
||||
import errno
|
||||
from pyblish import api
|
||||
import pype
|
||||
import clique
|
||||
from avalon.vendor import filelink
|
||||
|
||||
|
||||
class ExtractReviewPreparation(pype.api.Extractor):
|
||||
"""Cut up clips from long video file"""
|
||||
|
||||
order = api.ExtractorOrder
|
||||
label = "Extract Review Preparation"
|
||||
hosts = ["hiero"]
|
||||
families = ["review"]
|
||||
|
||||
# presets
|
||||
tags_addition = []
|
||||
|
||||
def process(self, instance):
|
||||
inst_data = instance.data
|
||||
asset = inst_data["asset"]
|
||||
review_item_data = instance.data.get("reviewItemData")
|
||||
|
||||
# get representation and loop them
|
||||
representations = inst_data["representations"]
|
||||
|
||||
# get resolution default
|
||||
resolution_width = inst_data["resolutionWidth"]
|
||||
resolution_height = inst_data["resolutionHeight"]
|
||||
|
||||
# frame range data
|
||||
media_duration = review_item_data["mediaDuration"]
|
||||
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe")
|
||||
|
||||
# filter out mov and img sequences
|
||||
representations_new = representations[:]
|
||||
for repre in representations:
|
||||
input_args = list()
|
||||
output_args = list()
|
||||
|
||||
tags = repre.get("tags", [])
|
||||
|
||||
# check if supported tags are in representation for activation
|
||||
filter_tag = False
|
||||
for tag in ["_cut-bigger", "prep"]:
|
||||
if tag in tags:
|
||||
filter_tag = True
|
||||
break
|
||||
if not filter_tag:
|
||||
continue
|
||||
|
||||
self.log.debug("__ repre: {}".format(repre))
|
||||
|
||||
files = repre.get("files")
|
||||
staging_dir = repre.get("stagingDir")
|
||||
fps = repre.get("fps")
|
||||
ext = repre.get("ext")
|
||||
|
||||
# make paths
|
||||
full_output_dir = os.path.join(
|
||||
staging_dir, "cuts")
|
||||
|
||||
if isinstance(files, list):
|
||||
new_files = list()
|
||||
|
||||
# frame range delivery included handles
|
||||
frame_start = (
|
||||
inst_data["frameStart"] - inst_data["handleStart"])
|
||||
frame_end = (
|
||||
inst_data["frameEnd"] + inst_data["handleEnd"])
|
||||
self.log.debug("_ frame_start: {}".format(frame_start))
|
||||
self.log.debug("_ frame_end: {}".format(frame_end))
|
||||
|
||||
# make collection from input files list
|
||||
collections, remainder = clique.assemble(files)
|
||||
collection = collections.pop()
|
||||
self.log.debug("_ collection: {}".format(collection))
|
||||
|
||||
# name components
|
||||
head = collection.format("{head}")
|
||||
padding = collection.format("{padding}")
|
||||
tail = collection.format("{tail}")
|
||||
self.log.debug("_ head: {}".format(head))
|
||||
self.log.debug("_ padding: {}".format(padding))
|
||||
self.log.debug("_ tail: {}".format(tail))
|
||||
|
||||
# make destination file with instance data
|
||||
# frame start and end range
|
||||
index = 0
|
||||
for image in collection:
|
||||
dst_file_num = frame_start + index
|
||||
dst_file_name = head + str(padding % dst_file_num) + tail
|
||||
src = os.path.join(staging_dir, image)
|
||||
dst = os.path.join(full_output_dir, dst_file_name)
|
||||
self.log.info("Creating temp hardlinks: {}".format(dst))
|
||||
self.hardlink_file(src, dst)
|
||||
new_files.append(dst_file_name)
|
||||
index += 1
|
||||
|
||||
self.log.debug("_ new_files: {}".format(new_files))
|
||||
|
||||
else:
|
||||
# ffmpeg when single file
|
||||
new_files = "{}_{}".format(asset, files)
|
||||
|
||||
# frame range
|
||||
frame_start = repre.get("frameStart")
|
||||
frame_end = repre.get("frameEnd")
|
||||
|
||||
full_input_path = os.path.join(
|
||||
staging_dir, files)
|
||||
|
||||
os.path.isdir(full_output_dir) or os.makedirs(full_output_dir)
|
||||
|
||||
full_output_path = os.path.join(
|
||||
full_output_dir, new_files)
|
||||
|
||||
self.log.debug(
|
||||
"__ full_input_path: {}".format(full_input_path))
|
||||
self.log.debug(
|
||||
"__ full_output_path: {}".format(full_output_path))
|
||||
|
||||
# check if audio stream is in input video file
|
||||
ffprob_cmd = (
|
||||
"\"{ffprobe_path}\" -i \"{full_input_path}\" -show_streams"
|
||||
" -select_streams a -loglevel error"
|
||||
).format(**locals())
|
||||
|
||||
self.log.debug("ffprob_cmd: {}".format(ffprob_cmd))
|
||||
audio_check_output = pype.api.run_subprocess(ffprob_cmd)
|
||||
self.log.debug(
|
||||
"audio_check_output: {}".format(audio_check_output))
|
||||
|
||||
# Fix one frame difference
|
||||
""" TODO: this is just work-around for issue:
|
||||
https://github.com/pypeclub/pype/issues/659
|
||||
"""
|
||||
frame_duration_extend = 1
|
||||
if audio_check_output and ("audio" in inst_data["families"]):
|
||||
frame_duration_extend = 0
|
||||
|
||||
# translate frame to sec
|
||||
start_sec = float(frame_start) / fps
|
||||
duration_sec = float(
|
||||
(frame_end - frame_start) + frame_duration_extend) / fps
|
||||
|
||||
empty_add = None
|
||||
|
||||
# check if not missing frames at start
|
||||
if (start_sec < 0) or (media_duration < frame_end):
|
||||
# for later swithing off `-c:v copy` output arg
|
||||
empty_add = True
|
||||
|
||||
# init empty variables
|
||||
video_empty_start = video_layer_start = ""
|
||||
audio_empty_start = audio_layer_start = ""
|
||||
video_empty_end = video_layer_end = ""
|
||||
audio_empty_end = audio_layer_end = ""
|
||||
audio_input = audio_output = ""
|
||||
v_inp_idx = 0
|
||||
concat_n = 1
|
||||
|
||||
# try to get video native resolution data
|
||||
try:
|
||||
resolution_output = pype.api.run_subprocess((
|
||||
"\"{ffprobe_path}\" -i \"{full_input_path}\""
|
||||
" -v error "
|
||||
"-select_streams v:0 -show_entries "
|
||||
"stream=width,height -of csv=s=x:p=0"
|
||||
).format(**locals()))
|
||||
|
||||
x, y = resolution_output.split("x")
|
||||
resolution_width = int(x)
|
||||
resolution_height = int(y)
|
||||
except Exception as _ex:
|
||||
self.log.warning(
|
||||
"Video native resolution is untracable: {}".format(
|
||||
_ex))
|
||||
|
||||
if audio_check_output:
|
||||
# adding input for empty audio
|
||||
input_args.append("-f lavfi -i anullsrc")
|
||||
|
||||
# define audio empty concat variables
|
||||
audio_input = "[1:a]"
|
||||
audio_output = ":a=1"
|
||||
v_inp_idx = 1
|
||||
|
||||
# adding input for video black frame
|
||||
input_args.append((
|
||||
"-f lavfi -i \"color=c=black:"
|
||||
"s={resolution_width}x{resolution_height}:r={fps}\""
|
||||
).format(**locals()))
|
||||
|
||||
if (start_sec < 0):
|
||||
# recalculate input video timing
|
||||
empty_start_dur = abs(start_sec)
|
||||
start_sec = 0
|
||||
duration_sec = float(frame_end - (
|
||||
frame_start + (empty_start_dur * fps)) + 1) / fps
|
||||
|
||||
# define starting empty video concat variables
|
||||
video_empty_start = (
|
||||
"[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];" # noqa
|
||||
).format(**locals())
|
||||
video_layer_start = "[gv0]"
|
||||
|
||||
if audio_check_output:
|
||||
# define starting empty audio concat variables
|
||||
audio_empty_start = (
|
||||
"[0]atrim=duration={empty_start_dur}[ga0];"
|
||||
).format(**locals())
|
||||
audio_layer_start = "[ga0]"
|
||||
|
||||
# alter concat number of clips
|
||||
concat_n += 1
|
||||
|
||||
# check if not missing frames at the end
|
||||
if (media_duration < frame_end):
|
||||
# recalculate timing
|
||||
empty_end_dur = float(
|
||||
frame_end - media_duration + 1) / fps
|
||||
duration_sec = float(
|
||||
media_duration - frame_start) / fps
|
||||
|
||||
# define ending empty video concat variables
|
||||
video_empty_end = (
|
||||
"[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];"
|
||||
).format(**locals())
|
||||
video_layer_end = "[gv1]"
|
||||
|
||||
if audio_check_output:
|
||||
# define ending empty audio concat variables
|
||||
audio_empty_end = (
|
||||
"[0]atrim=duration={empty_end_dur}[ga1];"
|
||||
).format(**locals())
|
||||
audio_layer_end = "[ga0]"
|
||||
|
||||
# alter concat number of clips
|
||||
concat_n += 1
|
||||
|
||||
# concatting black frame togather
|
||||
output_args.append((
|
||||
"-filter_complex \""
|
||||
"{audio_empty_start}"
|
||||
"{video_empty_start}"
|
||||
"{audio_empty_end}"
|
||||
"{video_empty_end}"
|
||||
"{video_layer_start}{audio_layer_start}[1:v]{audio_input}" # noqa
|
||||
"{video_layer_end}{audio_layer_end}"
|
||||
"concat=n={concat_n}:v=1{audio_output}\""
|
||||
).format(**locals()))
|
||||
|
||||
# append ffmpeg input video clip
|
||||
input_args.append("-ss {}".format(start_sec))
|
||||
input_args.append("-t {}".format(duration_sec))
|
||||
input_args.append("-i \"{}\"".format(full_input_path))
|
||||
|
||||
# add copy audio video codec if only shortening clip
|
||||
if ("_cut-bigger" in tags) and (not empty_add):
|
||||
output_args.append("-c:v copy")
|
||||
|
||||
# make sure it is having no frame to frame comprassion
|
||||
output_args.append("-intra")
|
||||
|
||||
# output filename
|
||||
output_args.append("-y \"{}\"".format(full_output_path))
|
||||
|
||||
mov_args = [
|
||||
"\"{}\"".format(ffmpeg_path),
|
||||
" ".join(input_args),
|
||||
" ".join(output_args)
|
||||
]
|
||||
subprcs_cmd = " ".join(mov_args)
|
||||
|
||||
# run subprocess
|
||||
self.log.debug("Executing: {}".format(subprcs_cmd))
|
||||
output = pype.api.run_subprocess(subprcs_cmd)
|
||||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
repre_new = {
|
||||
"files": new_files,
|
||||
"stagingDir": full_output_dir,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"frameStartFtrack": frame_start,
|
||||
"frameEndFtrack": frame_end,
|
||||
"step": 1,
|
||||
"fps": fps,
|
||||
"name": "cut_up_preview",
|
||||
"tags": [
|
||||
"review", "ftrackreview", "delete"] + self.tags_addition,
|
||||
"ext": ext,
|
||||
"anatomy_template": "publish"
|
||||
}
|
||||
|
||||
representations_new.append(repre_new)
|
||||
|
||||
for repre in representations_new:
|
||||
if ("delete" in repre.get("tags", [])) and (
|
||||
"cut_up_preview" not in repre["name"]):
|
||||
representations_new.remove(repre)
|
||||
|
||||
self.log.debug(
|
||||
"Representations: {}".format(representations_new))
|
||||
instance.data["representations"] = representations_new
|
||||
|
||||
def hardlink_file(self, src, dst):
|
||||
dirname = os.path.dirname(dst)
|
||||
|
||||
# make sure the destination folder exist
|
||||
try:
|
||||
os.makedirs(dirname)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
self.log.critical("An unexpected error occurred.")
|
||||
six.reraise(*sys.exc_info())
|
||||
|
||||
# create hardlined file
|
||||
try:
|
||||
filelink.create(src, dst, filelink.HARDLINK)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
self.log.critical("An unexpected error occurred.")
|
||||
six.reraise(*sys.exc_info())
|
||||
|
|
@ -1,224 +0,0 @@
|
|||
from compiler.ast import flatten
|
||||
from pyblish import api
|
||||
from pype.hosts.hiero import api as phiero
|
||||
import hiero
|
||||
# from pype.hosts.hiero.api import lib
|
||||
# reload(lib)
|
||||
# reload(phiero)
|
||||
|
||||
|
||||
class PreCollectInstances(api.ContextPlugin):
|
||||
"""Collect all Track items selection."""
|
||||
|
||||
order = api.CollectorOrder - 0.509
|
||||
label = "Pre-collect Instances"
|
||||
hosts = ["hiero"]
|
||||
|
||||
|
||||
def process(self, context):
|
||||
track_items = phiero.get_track_items(
|
||||
selected=True, check_tagged=True, check_enabled=True)
|
||||
# only return enabled track items
|
||||
if not track_items:
|
||||
track_items = phiero.get_track_items(
|
||||
check_enabled=True, check_tagged=True)
|
||||
# get sequence and video tracks
|
||||
sequence = context.data["activeSequence"]
|
||||
tracks = sequence.videoTracks()
|
||||
|
||||
# add collection to context
|
||||
tracks_effect_items = self.collect_sub_track_items(tracks)
|
||||
|
||||
context.data["tracksEffectItems"] = tracks_effect_items
|
||||
|
||||
self.log.info(
|
||||
"Processing enabled track items: {}".format(len(track_items)))
|
||||
|
||||
for _ti in track_items:
|
||||
data = {}
|
||||
clip = _ti.source()
|
||||
|
||||
# get clips subtracks and anotations
|
||||
annotations = self.clip_annotations(clip)
|
||||
subtracks = self.clip_subtrack(_ti)
|
||||
self.log.debug("Annotations: {}".format(annotations))
|
||||
self.log.debug(">> Subtracks: {}".format(subtracks))
|
||||
|
||||
# get pype tag data
|
||||
tag_parsed_data = phiero.get_track_item_pype_data(_ti)
|
||||
# self.log.debug(pformat(tag_parsed_data))
|
||||
|
||||
if not tag_parsed_data:
|
||||
continue
|
||||
|
||||
if tag_parsed_data.get("id") != "pyblish.avalon.instance":
|
||||
continue
|
||||
# add tag data to instance data
|
||||
data.update({
|
||||
k: v for k, v in tag_parsed_data.items()
|
||||
if k not in ("id", "applieswhole", "label")
|
||||
})
|
||||
|
||||
asset = tag_parsed_data["asset"]
|
||||
subset = tag_parsed_data["subset"]
|
||||
review_track = tag_parsed_data.get("reviewTrack")
|
||||
hiero_track = tag_parsed_data.get("heroTrack")
|
||||
audio = tag_parsed_data.get("audio")
|
||||
|
||||
# remove audio attribute from data
|
||||
data.pop("audio")
|
||||
|
||||
# insert family into families
|
||||
family = tag_parsed_data["family"]
|
||||
families = [str(f) for f in tag_parsed_data["families"]]
|
||||
families.insert(0, str(family))
|
||||
|
||||
track = _ti.parent()
|
||||
media_source = _ti.source().mediaSource()
|
||||
source_path = media_source.firstpath()
|
||||
file_head = media_source.filenameHead()
|
||||
file_info = media_source.fileinfos().pop()
|
||||
source_first_frame = int(file_info.startFrame())
|
||||
|
||||
# apply only for review and master track instance
|
||||
if review_track and hiero_track:
|
||||
families += ["review", "ftrack"]
|
||||
|
||||
data.update({
|
||||
"name": "{} {} {}".format(asset, subset, families),
|
||||
"asset": asset,
|
||||
"item": _ti,
|
||||
"families": families,
|
||||
|
||||
# tags
|
||||
"tags": _ti.tags(),
|
||||
|
||||
# track item attributes
|
||||
"track": track.name(),
|
||||
"trackItem": track,
|
||||
"reviewTrack": review_track,
|
||||
|
||||
# version data
|
||||
"versionData": {
|
||||
"colorspace": _ti.sourceMediaColourTransform()
|
||||
},
|
||||
|
||||
# source attribute
|
||||
"source": source_path,
|
||||
"sourceMedia": media_source,
|
||||
"sourcePath": source_path,
|
||||
"sourceFileHead": file_head,
|
||||
"sourceFirst": source_first_frame,
|
||||
|
||||
# clip's effect
|
||||
"clipEffectItems": subtracks
|
||||
})
|
||||
|
||||
instance = context.create_instance(**data)
|
||||
|
||||
self.log.info("Creating instance.data: {}".format(instance.data))
|
||||
|
||||
if audio:
|
||||
a_data = dict()
|
||||
|
||||
# add tag data to instance data
|
||||
a_data.update({
|
||||
k: v for k, v in tag_parsed_data.items()
|
||||
if k not in ("id", "applieswhole", "label")
|
||||
})
|
||||
|
||||
# create main attributes
|
||||
subset = "audioMain"
|
||||
family = "audio"
|
||||
families = ["clip", "ftrack"]
|
||||
families.insert(0, str(family))
|
||||
|
||||
name = "{} {} {}".format(asset, subset, families)
|
||||
|
||||
a_data.update({
|
||||
"name": name,
|
||||
"subset": subset,
|
||||
"asset": asset,
|
||||
"family": family,
|
||||
"families": families,
|
||||
"item": _ti,
|
||||
|
||||
# tags
|
||||
"tags": _ti.tags(),
|
||||
})
|
||||
|
||||
a_instance = context.create_instance(**a_data)
|
||||
self.log.info("Creating audio instance: {}".format(a_instance))
|
||||
|
||||
@staticmethod
|
||||
def clip_annotations(clip):
|
||||
"""
|
||||
Returns list of Clip's hiero.core.Annotation
|
||||
"""
|
||||
annotations = []
|
||||
subTrackItems = flatten(clip.subTrackItems())
|
||||
annotations += [item for item in subTrackItems if isinstance(
|
||||
item, hiero.core.Annotation)]
|
||||
return annotations
|
||||
|
||||
@staticmethod
|
||||
def clip_subtrack(clip):
|
||||
"""
|
||||
Returns list of Clip's hiero.core.SubTrackItem
|
||||
"""
|
||||
subtracks = []
|
||||
subTrackItems = flatten(clip.parent().subTrackItems())
|
||||
for item in subTrackItems:
|
||||
# avoid all anotation
|
||||
if isinstance(item, hiero.core.Annotation):
|
||||
continue
|
||||
# # avoid all not anaibled
|
||||
if not item.isEnabled():
|
||||
continue
|
||||
subtracks.append(item)
|
||||
return subtracks
|
||||
|
||||
@staticmethod
|
||||
def collect_sub_track_items(tracks):
|
||||
"""
|
||||
Returns dictionary with track index as key and list of subtracks
|
||||
"""
|
||||
# collect all subtrack items
|
||||
sub_track_items = dict()
|
||||
for track in tracks:
|
||||
items = track.items()
|
||||
|
||||
# skip if no clips on track > need track with effect only
|
||||
if items:
|
||||
continue
|
||||
|
||||
# skip all disabled tracks
|
||||
if not track.isEnabled():
|
||||
continue
|
||||
|
||||
track_index = track.trackIndex()
|
||||
_sub_track_items = flatten(track.subTrackItems())
|
||||
|
||||
# continue only if any subtrack items are collected
|
||||
if len(_sub_track_items) < 1:
|
||||
continue
|
||||
|
||||
enabled_sti = list()
|
||||
# loop all found subtrack items and check if they are enabled
|
||||
for _sti in _sub_track_items:
|
||||
# checking if not enabled
|
||||
if not _sti.isEnabled():
|
||||
continue
|
||||
if isinstance(_sti, hiero.core.Annotation):
|
||||
continue
|
||||
# collect the subtrack item
|
||||
enabled_sti.append(_sti)
|
||||
|
||||
# continue only if any subtrack items are collected
|
||||
if len(enabled_sti) < 1:
|
||||
continue
|
||||
|
||||
# add collection of subtrackitems to dict
|
||||
sub_track_items[track_index] = enabled_sti
|
||||
|
||||
return sub_track_items
|
||||
|
|
@ -1 +0,0 @@
|
|||
Subproject commit b746fedf7286c3755a46f07ab72f4c414cd41fc0
|
||||
|
|
@ -1 +0,0 @@
|
|||
Subproject commit d277f474ab016e7b53479c36af87cb861d0cc53e
|
||||
|
|
@ -1,202 +0,0 @@
|
|||
import pyblish.api
|
||||
from avalon import io
|
||||
from copy import deepcopy
|
||||
|
||||
class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
||||
"""Create entities in Avalon based on collected data."""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.01
|
||||
label = "Extract Hierarchy To Avalon"
|
||||
families = ["clip", "shot"]
|
||||
|
||||
def process(self, context):
|
||||
# processing starts here
|
||||
if "hierarchyContext" not in context.data:
|
||||
self.log.info("skipping IntegrateHierarchyToAvalon")
|
||||
return
|
||||
hierarchy_context = deepcopy(context.data["hierarchyContext"])
|
||||
|
||||
if not io.Session:
|
||||
io.install()
|
||||
|
||||
active_assets = []
|
||||
# filter only the active publishing insatnces
|
||||
for instance in context:
|
||||
if instance.data.get("publish") is False:
|
||||
continue
|
||||
|
||||
if not instance.data.get("asset"):
|
||||
continue
|
||||
|
||||
active_assets.append(instance.data["asset"])
|
||||
|
||||
# remove duplicity in list
|
||||
self.active_assets = list(set(active_assets))
|
||||
self.log.debug("__ self.active_assets: {}".format(self.active_assets))
|
||||
|
||||
hierarchy_context = self._get_assets(hierarchy_context)
|
||||
|
||||
self.log.debug("__ hierarchy_context: {}".format(hierarchy_context))
|
||||
input_data = context.data["hierarchyContext"] = hierarchy_context
|
||||
|
||||
self.project = None
|
||||
self.import_to_avalon(input_data)
|
||||
|
||||
def import_to_avalon(self, input_data, parent=None):
|
||||
for name in input_data:
|
||||
self.log.info("input_data[name]: {}".format(input_data[name]))
|
||||
entity_data = input_data[name]
|
||||
entity_type = entity_data["entity_type"]
|
||||
|
||||
data = {}
|
||||
data["entityType"] = entity_type
|
||||
|
||||
# Custom attributes.
|
||||
for k, val in entity_data.get("custom_attributes", {}).items():
|
||||
data[k] = val
|
||||
|
||||
if entity_type.lower() != "project":
|
||||
data["inputs"] = entity_data.get("inputs", [])
|
||||
|
||||
# Tasks.
|
||||
tasks = entity_data.get("tasks", {})
|
||||
if tasks is not None or len(tasks) > 0:
|
||||
data["tasks"] = tasks
|
||||
parents = []
|
||||
visualParent = None
|
||||
# do not store project"s id as visualParent (silo asset)
|
||||
if self.project is not None:
|
||||
if self.project["_id"] != parent["_id"]:
|
||||
visualParent = parent["_id"]
|
||||
parents.extend(
|
||||
parent.get("data", {}).get("parents", [])
|
||||
)
|
||||
parents.append(parent["name"])
|
||||
data["visualParent"] = visualParent
|
||||
data["parents"] = parents
|
||||
|
||||
update_data = True
|
||||
# Process project
|
||||
if entity_type.lower() == "project":
|
||||
entity = io.find_one({"type": "project"})
|
||||
# TODO: should be in validator?
|
||||
assert (entity is not None), "Did not find project in DB"
|
||||
|
||||
# get data from already existing project
|
||||
cur_entity_data = entity.get("data") or {}
|
||||
cur_entity_data.update(data)
|
||||
data = cur_entity_data
|
||||
|
||||
self.project = entity
|
||||
# Raise error if project or parent are not set
|
||||
elif self.project is None or parent is None:
|
||||
raise AssertionError(
|
||||
"Collected items are not in right order!"
|
||||
)
|
||||
# Else process assset
|
||||
else:
|
||||
entity = io.find_one({"type": "asset", "name": name})
|
||||
if entity:
|
||||
# Do not override data, only update
|
||||
cur_entity_data = entity.get("data") or {}
|
||||
entity_tasks = cur_entity_data["tasks"] or {}
|
||||
|
||||
# create tasks as dict by default
|
||||
if not entity_tasks:
|
||||
cur_entity_data["tasks"] = entity_tasks
|
||||
|
||||
new_tasks = data.pop("tasks", {})
|
||||
if "tasks" not in cur_entity_data and not new_tasks:
|
||||
continue
|
||||
for task_name in new_tasks:
|
||||
if task_name in entity_tasks.keys():
|
||||
continue
|
||||
cur_entity_data["tasks"][task_name] = new_tasks[
|
||||
task_name]
|
||||
cur_entity_data.update(data)
|
||||
data = cur_entity_data
|
||||
else:
|
||||
# Skip updating data
|
||||
update_data = False
|
||||
|
||||
archived_entities = io.find({
|
||||
"type": "archived_asset",
|
||||
"name": name
|
||||
})
|
||||
unarchive_entity = None
|
||||
for archived_entity in archived_entities:
|
||||
archived_parents = (
|
||||
archived_entity
|
||||
.get("data", {})
|
||||
.get("parents")
|
||||
)
|
||||
if data["parents"] == archived_parents:
|
||||
unarchive_entity = archived_entity
|
||||
break
|
||||
|
||||
if unarchive_entity is None:
|
||||
# Create entity if doesn"t exist
|
||||
entity = self.create_avalon_asset(name, data)
|
||||
else:
|
||||
# Unarchive if entity was archived
|
||||
entity = self.unarchive_entity(unarchive_entity, data)
|
||||
|
||||
if update_data:
|
||||
# Update entity data with input data
|
||||
io.update_many(
|
||||
{"_id": entity["_id"]},
|
||||
{"$set": {"data": data}}
|
||||
)
|
||||
|
||||
if "childs" in entity_data:
|
||||
self.import_to_avalon(entity_data["childs"], entity)
|
||||
|
||||
def unarchive_entity(self, entity, data):
|
||||
# Unarchived asset should not use same data
|
||||
new_entity = {
|
||||
"_id": entity["_id"],
|
||||
"schema": "avalon-core:asset-3.0",
|
||||
"name": entity["name"],
|
||||
"parent": self.project["_id"],
|
||||
"type": "asset",
|
||||
"data": data
|
||||
}
|
||||
io.replace_one(
|
||||
{"_id": entity["_id"]},
|
||||
new_entity
|
||||
)
|
||||
return new_entity
|
||||
|
||||
def create_avalon_asset(self, name, data):
|
||||
item = {
|
||||
"schema": "avalon-core:asset-3.0",
|
||||
"name": name,
|
||||
"parent": self.project["_id"],
|
||||
"type": "asset",
|
||||
"data": data
|
||||
}
|
||||
self.log.debug("Creating asset: {}".format(item))
|
||||
entity_id = io.insert_one(item).inserted_id
|
||||
|
||||
return io.find_one({"_id": entity_id})
|
||||
|
||||
def _get_assets(self, input_dict):
|
||||
""" Returns only asset dictionary.
|
||||
Usually the last part of deep dictionary which
|
||||
is not having any children
|
||||
"""
|
||||
input_dict_copy = deepcopy(input_dict)
|
||||
|
||||
for key in input_dict.keys():
|
||||
self.log.debug("__ key: {}".format(key))
|
||||
# check if child key is available
|
||||
if input_dict[key].get("childs"):
|
||||
# loop deeper
|
||||
input_dict_copy[key]["childs"] = self._get_assets(
|
||||
input_dict[key]["childs"])
|
||||
else:
|
||||
# filter out unwanted assets
|
||||
if key not in self.active_assets:
|
||||
input_dict_copy.pop(key, None)
|
||||
|
||||
return input_dict_copy
|
||||
Loading…
Add table
Add a link
Reference in a new issue