feat(sp): wip publishing clips

This commit is contained in:
Jakub Jezek 2020-08-04 21:02:44 +02:00
parent 17e3c19d7a
commit 8f57ae45fe
No known key found for this signature in database
GPG key ID: C4B96E101D2A47F3
5 changed files with 273 additions and 180 deletions

View file

@ -210,6 +210,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
self.log.debug(
"assets_shared: {assets_shared}".format(**locals()))
class CollectHierarchyContext(pyblish.api.ContextPlugin):
'''Collecting Hierarchy from instaces and building
context hierarchy tree

View file

@ -84,147 +84,159 @@ class CollectClips(pyblish.api.InstancePlugin):
label = f"{name} (framerange: {clip_in}-{clip_out})"
new_instance_data = {
# shared attributes
"representations": [],
# timing properities
"trackStartFrame": track_start_frame,
"sourceIn": source_in,
"sourceOut": source_in + clip_duration,
"clipIn": clip_in,
"clipOut": clip_out,
"clipDuration": clip_duration,
"handleStart": int(asset_data["handleStart"]),
"handleEnd": int(asset_data["handleEnd"]),
"fps": fps
}
# adding Review-able instance
shot_instance_data = new_instance_data.copy()
shot_instance_data.update({
# unique attributes
"name": name,
"label": label,
"asset": name,
"subset": "plateRef",
"item": clip,
# instance properities
"family": "clip",
"families": ["review", "ftrack"],
"ftrackFamily": "review",
"editorialVideoPath": instance.data[
"editorialVideoPath"]
})
instances.append(
instance.context.create_instance(**{
"name": name,
"label": label,
"asset": name,
"subset": "plateRef",
"item": clip,
# timing properities
"trackStartFrame": track_start_frame,
"sourceIn": source_in,
"sourceOut": source_in + clip_duration,
"clipIn": clip_in,
"clipOut": clip_out,
"clipDuration": clip_duration,
"handleStart": int(asset_data["handleStart"]),
"handleEnd": int(asset_data["handleEnd"]),
"fps": fps,
# instance properities
"family": "clip",
"families": ["review", "ftrack"],
"ftrackFamily": "review",
"representations": [],
"editorialVideoPath": instance.data[
"editorialVideoPath"]
})
instance.context.create_instance(**shot_instance_data)
)
def process_old(self, instance):
representation = instance.data["representations"][0]
file_path = os.path.join(
representation["stagingDir"], representation["files"]
)
instance.context.data["editorialPath"] = file_path
context.data["assetsShared"][name] = {
"_clipIn": clip_in,
"_clipOut": clip_out
}
extension = os.path.splitext(file_path)[1][1:]
kwargs = {}
if extension == "edl":
# EDL has no frame rate embedded so needs explicit frame rate else
# 24 is asssumed.
kwargs["rate"] = plib.get_asset()["data"]["fps"]
timeline = otio.adapters.read_from_file(file_path, **kwargs)
tracks = timeline.each_child(
descended_from_type=otio.schema.track.Track
)
asset_entity = instance.context.data["assetEntity"]
asset_name = asset_entity["name"]
# Ask user for sequence start. Usually 10:00:00:00.
sequence_start_frame = 900000
# Project specific prefix naming. This needs to be replaced with some
# options to be more flexible.
asset_name = asset_name.split("_")[0]
instances = []
for track in tracks:
track_start_frame = (
abs(track.source_range.start_time.value) - sequence_start_frame
)
for child in track.each_child():
# skip all generators like black ampty
if isinstance(
child.media_reference,
otio.schema.GeneratorReference):
continue
# Transitions are ignored, because Clips have the full frame
# range.
if isinstance(child, otio.schema.transition.Transition):
continue
if child.name is None:
continue
# Hardcoded to expect a shot name of "[name].[extension]"
child_name = os.path.splitext(child.name)[0].lower()
name = f"{asset_name}_{child_name}"
frame_start = track_start_frame
frame_start += child.range_in_parent().start_time.value
frame_end = track_start_frame
frame_end += child.range_in_parent().end_time_inclusive().value
label = f"{name} (framerange: {frame_start}-{frame_end})"
instances.append(
instance.context.create_instance(**{
"name": name,
"label": label,
"frameStart": frame_start,
"frameEnd": frame_end,
"family": "shot",
"families": ["review", "ftrack"],
"ftrackFamily": "review",
"asset": name,
"subset": "shotMain",
"representations": [],
"source": file_path
})
)
visual_hierarchy = [asset_entity]
while True:
visual_parent = io.find_one(
{"_id": visual_hierarchy[-1]["data"]["visualParent"]}
)
if visual_parent:
visual_hierarchy.append(visual_parent)
else:
visual_hierarchy.append(instance.context.data["projectEntity"])
break
context_hierarchy = None
for entity in visual_hierarchy:
childs = {}
if context_hierarchy:
name = context_hierarchy.pop("name")
childs = {name: context_hierarchy}
else:
for instance in instances:
childs[instance.data["name"]] = {
"childs": {},
"entity_type": "Shot",
"custom_attributes": {
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"]
}
}
context_hierarchy = {
"entity_type": entity["data"]["entityType"],
"childs": childs,
"name": entity["name"]
}
name = context_hierarchy.pop("name")
context_hierarchy = {name: context_hierarchy}
instance.context.data["hierarchyContext"] = context_hierarchy
self.log.info(
"Hierarchy:\n" +
json_util.dumps(context_hierarchy, sort_keys=True, indent=4)
)
# def process_old(self, instance):
# representation = instance.data["representations"][0]
# file_path = os.path.join(
# representation["stagingDir"], representation["files"]
# )
# instance.context.data["editorialPath"] = file_path
#
# extension = os.path.splitext(file_path)[1][1:]
# kwargs = {}
# if extension == "edl":
# # EDL has no frame rate embedded so needs explicit frame rate else
# # 24 is asssumed.
# kwargs["rate"] = plib.get_asset()["data"]["fps"]
#
# timeline = otio.adapters.read_from_file(file_path, **kwargs)
# tracks = timeline.each_child(
# descended_from_type=otio.schema.track.Track
# )
# asset_entity = instance.context.data["assetEntity"]
# asset_name = asset_entity["name"]
#
# # Ask user for sequence start. Usually 10:00:00:00.
# sequence_start_frame = 900000
#
# # Project specific prefix naming. This needs to be replaced with some
# # options to be more flexible.
# asset_name = asset_name.split("_")[0]
#
# instances = []
# for track in tracks:
# track_start_frame = (
# abs(track.source_range.start_time.value) - sequence_start_frame
# )
# for child in track.each_child():
# # skip all generators like black ampty
# if isinstance(
# child.media_reference,
# otio.schema.GeneratorReference):
# continue
#
# # Transitions are ignored, because Clips have the full frame
# # range.
# if isinstance(child, otio.schema.transition.Transition):
# continue
#
# if child.name is None:
# continue
#
# # Hardcoded to expect a shot name of "[name].[extension]"
# child_name = os.path.splitext(child.name)[0].lower()
# name = f"{asset_name}_{child_name}"
#
# frame_start = track_start_frame
# frame_start += child.range_in_parent().start_time.value
# frame_end = track_start_frame
# frame_end += child.range_in_parent().end_time_inclusive().value
#
# label = f"{name} (framerange: {frame_start}-{frame_end})"
# instances.append(
# instance.context.create_instance(**{
# "name": name,
# "label": label,
# "frameStart": frame_start,
# "frameEnd": frame_end,
# "family": "shot",
# "families": ["review", "ftrack"],
# "ftrackFamily": "review",
# "asset": name,
# "subset": "shotMain",
# "representations": [],
# "source": file_path
# })
# )
#
# visual_hierarchy = [asset_entity]
# while True:
# visual_parent = io.find_one(
# {"_id": visual_hierarchy[-1]["data"]["visualParent"]}
# )
# if visual_parent:
# visual_hierarchy.append(visual_parent)
# else:
# visual_hierarchy.append(instance.context.data["projectEntity"])
# break
#
# context_hierarchy = None
# for entity in visual_hierarchy:
# childs = {}
# if context_hierarchy:
# name = context_hierarchy.pop("name")
# childs = {name: context_hierarchy}
# else:
# for instance in instances:
# childs[instance.data["name"]] = {
# "childs": {},
# "entity_type": "Shot",
# "custom_attributes": {
# "frameStart": instance.data["frameStart"],
# "frameEnd": instance.data["frameEnd"]
# }
# }
#
# context_hierarchy = {
# "entity_type": entity["data"]["entityType"],
# "childs": childs,
# "name": entity["name"]
# }
#
# name = context_hierarchy.pop("name")
# context_hierarchy = {name: context_hierarchy}
# instance.context.data["hierarchyContext"] = context_hierarchy
# self.log.info(
# "Hierarchy:\n" +
# json_util.dumps(context_hierarchy, sort_keys=True, indent=4)
# )

View file

@ -116,6 +116,7 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin):
})
def process(self, instance):
asset = instance.data["asset"]
assets_shared = instance.context.data.get("assetsShared")
context = instance.context
anatomy_data = context.data["anatomyData"]
@ -138,13 +139,23 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin):
label = f"{self.shot_name} ({frame_start}-{frame_end})"
instance.data["label"] = label
assets_shared[self.shot_name] = {
# dealing with shared attributes across instances
# with the same asset name
if assets_shared.get(asset):
self.log.debug("Adding to shared assets: `{}`".format(
asset))
asset_shared = assets_shared.get(asset)
else:
asset_shared = assets_shared[asset]
asset_shared.update({
"asset": instance.data["asset"],
"hierarchy": instance.data["hierarchy"],
"parents": instance.data["parents"],
"fps": instance.data["fps"],
"tasks": instance.data["tasks"]
}
})
class CollectHierarchyContext(pyblish.api.ContextPlugin):

View file

@ -0,0 +1,37 @@
from pyblish import api
class CollectShots(api.InstancePlugin):
"""Collect Shot from Clip."""
# Run just before CollectClipSubsets
order = api.CollectorOrder + 0.1021
label = "Collect Shots"
hosts = ["standalonepublisher"]
families = ["clip"]
def process(self, instance):
# Collect data.
data = {}
for key, value in instance.data.items():
data[key] = value
data["family"] = "shot"
data["families"] = []
data["subset"] = data["family"] + "Main"
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = (
"{} - {} - tasks:{}".format(
data["asset"],
data["subset"],
data["tasks"]
)
)
# Create instance.
self.log.debug("Creating instance with: {}".format(data["name"]))
instance.context.create_instance(**data)

View file

@ -13,6 +13,10 @@ class ExtractShot(pype.api.Extractor):
hosts = ["standalonepublisher"]
families = ["clip"]
# presets
add_representation = None # ".jpeg"
add_audio = True
def process(self, instance):
# get ffmpet path
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
@ -76,41 +80,69 @@ class ExtractShot(pype.api.Extractor):
self.log.debug(f"Instance data: {pformat(instance.data)}")
# # Generate jpegs.
# clip_thumbnail = os.path.join(
# staging_dir, instance.data["name"] + ".%04d.jpeg"
# )
# args = [ffmpeg_path, "-i", clip_trimed_path, clip_thumbnail]
# self.log.info(f"Processing: {args}")
# output = pype.lib._subprocess(args)
# self.log.info(output)
#
# # collect jpeg sequence if editorial data for publish
# # are image sequence
# collection = clique.Collection(
# head=instance.data["name"] + ".", tail='.jpeg', padding=4
# )
# for f in os.listdir(staging_dir):
# if collection.match(f):
# collection.add(f)
#
# instance.data["representations"].append({
# "name": "jpeg",
# "ext": "jpeg",
# "files": list(collection),
# "stagingDir": staging_dir
# })
#
# # Generate wav file.
# shot_wav = os.path.join(staging_dir, instance.data["name"] + ".wav")
# args = [ffmpeg_path, "-i", clip_trimed_path, shot_wav]
# self.log.info(f"Processing: {args}")
# output = pype.lib._subprocess(args)
# self.log.info(output)
#
# instance.data["representations"].append({
# "name": "wav",
# "ext": "wav",
# "files": os.path.basename(shot_wav),
# "stagingDir": staging_dir
# })
if self.add_representation:
# Generate jpegs.
clip_img_sequence = os.path.join(
staging_dir, instance.data["name"] + ".%04d.jpeg"
)
args = [ffmpeg_path, "-i", clip_trimed_path, clip_img_sequence]
self.log.info(f"Processing: {args}")
output = pype.lib._subprocess(args)
self.log.info(output)
# collect jpeg sequence if editorial data for publish
# are image sequence
collection = clique.Collection(
head=instance.data["name"] + ".", tail='.jpeg', padding=4
)
for f in os.listdir(staging_dir):
if collection.match(f):
collection.add(f)
instance.data["representations"].append({
"name": "jpeg",
"ext": "jpeg",
"files": list(collection),
"stagingDir": staging_dir
})
if self.add_audio:
audio_ext = ".wav"
# Generate wav file.
shot_wav = os.path.join(
staging_dir, instance.data["name"] + audio_ext)
# Collect data.
data = {}
for key, value in instance.data.items():
data[key] = value
data["family"] = "audio"
data["families"] = ["ftrack"]
data["subset"] = "audioMain"
data["source"] = shot_wav
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = "{} - {} - ({})".format(
data['asset'],
data["subset"],
audio_ext
)
# Create instance.
self.log.debug("Creating instance with: {}".format(data["name"]))
instance = instance.context.create_instance(**data)
args = [ffmpeg_path, "-i", clip_trimed_path, shot_wav]
self.log.info(f"Processing: {args}")
output = pype.lib._subprocess(args)
self.log.info(output)
instance.data["representations"] = [{
"name": "wav",
"ext": "wav",
"files": os.path.basename(shot_wav),
"stagingDir": staging_dir
}]