feat(standalone): editorial wip

This commit is contained in:
Jakub Jezek 2020-07-31 18:50:05 +02:00
parent 5205e1773e
commit 123fb5ff86
No known key found for this signature in database
GPG key ID: C4B96E101D2A47F3
10 changed files with 836 additions and 213 deletions

View file

@ -0,0 +1,230 @@
import os
import opentimelineio as otio
from bson import json_util
import pyblish.api
from pype import lib as plib
from avalon import io
class CollectClips(pyblish.api.InstancePlugin):
"""Collect Clips instances from editorial's OTIO sequence"""
order = pyblish.api.CollectorOrder + 0.01
label = "Collect Shots"
hosts = ["standalonepublisher"]
families = ["editorial"]
def process(self, instance):
# get context
context = instance.context
# create asset_names conversion table
if not context.data.get("assetsShared"):
self.log.debug("Created `assetsShared` in context")
context.data["assetsShared"] = dict()
# get timeline otio data
timeline = instance.data["otio_timeline"]
fps = plib.get_asset()["data"]["fps"]
tracks = timeline.each_child(
descended_from_type=otio.schema.track.Track
)
self.log.debug(f"__ tracks: `{tracks}`")
# get data from avalon
asset_entity = instance.context.data["assetEntity"]
asset_data = asset_entity["data"]
asset_name = asset_entity["name"]
self.log.debug(f"__ asset_entity: `{asset_entity}`")
# Project specific prefix naming. This needs to be replaced with some
# options to be more flexible.
asset_name = asset_name.split("_")[0]
instances = []
for track in tracks:
self.log.debug(f"__ track: `{track}`")
try:
track_start_frame = (
abs(track.source_range.start_time.value)
)
except AttributeError:
track_start_frame = 0
self.log.debug(f"__ track: `{track}`")
for clip in track.each_child():
# skip all generators like black ampty
if isinstance(
clip.media_reference,
otio.schema.GeneratorReference):
continue
# Transitions are ignored, because Clips have the full frame
# range.
if isinstance(clip, otio.schema.transition.Transition):
continue
if clip.name is None:
continue
# Hardcoded to expect a shot name of "[name].[extension]"
clip_name = os.path.splitext(clip.name)[0].lower()
name = f"{asset_name}_{clip_name}"
source_in = clip.trimmed_range().start_time.value
clip_in = clip.range_in_parent().start_time.value
clip_out = clip.range_in_parent().end_time_inclusive().value
clip_duration = clip.duration().value
self.log.debug(f"__ source_in: `{source_in}`")
self.log.debug(f"__ clip_in: `{clip_in}`")
self.log.debug(f"__ clip_out: `{clip_out}`")
self.log.debug(f"__ clip_duration: `{clip_duration}`")
label = f"{name} (framerange: {clip_in}-{clip_out})"
instances.append(
instance.context.create_instance(**{
"name": name,
"label": label,
"asset": name,
"subset": "plateRef",
"item": clip,
# timing properities
"trackStartFrame": track_start_frame,
"sourceIn": source_in,
"sourceOut": source_in + clip_duration,
"clipIn": clip_in,
"clipOut": clip_out,
"clipDuration": clip_duration,
"handleStart": asset_data["handleStart"],
"handleEnd": asset_data["handleEnd"],
"fps": fps,
# instance properities
"family": "clip",
"families": ["review", "ftrack"],
"ftrackFamily": "review",
"representations": []
})
)
#
# def process_old(self, instance):
# representation = instance.data["representations"][0]
# file_path = os.path.join(
# representation["stagingDir"], representation["files"]
# )
# instance.context.data["editorialPath"] = file_path
#
# extension = os.path.splitext(file_path)[1][1:]
# kwargs = {}
# if extension == "edl":
# # EDL has no frame rate embedded so needs explicit frame rate else
# # 24 is asssumed.
# kwargs["rate"] = plib.get_asset()["data"]["fps"]
#
# timeline = otio.adapters.read_from_file(file_path, **kwargs)
# tracks = timeline.each_child(
# descended_from_type=otio.schema.track.Track
# )
# asset_entity = instance.context.data["assetEntity"]
# asset_name = asset_entity["name"]
#
# # Ask user for sequence start. Usually 10:00:00:00.
# sequence_start_frame = 900000
#
# # Project specific prefix naming. This needs to be replaced with some
# # options to be more flexible.
# asset_name = asset_name.split("_")[0]
#
# instances = []
# for track in tracks:
# track_start_frame = (
# abs(track.source_range.start_time.value) - sequence_start_frame
# )
# for child in track.each_child():
# # skip all generators like black ampty
# if isinstance(
# child.media_reference,
# otio.schema.GeneratorReference):
# continue
#
# # Transitions are ignored, because Clips have the full frame
# # range.
# if isinstance(child, otio.schema.transition.Transition):
# continue
#
# if child.name is None:
# continue
#
# # Hardcoded to expect a shot name of "[name].[extension]"
# child_name = os.path.splitext(child.name)[0].lower()
# name = f"{asset_name}_{child_name}"
#
# frame_start = track_start_frame
# frame_start += child.range_in_parent().start_time.value
# frame_end = track_start_frame
# frame_end += child.range_in_parent().end_time_inclusive().value
#
# label = f"{name} (framerange: {frame_start}-{frame_end})"
# instances.append(
# instance.context.create_instance(**{
# "name": name,
# "label": label,
# "frameStart": frame_start,
# "frameEnd": frame_end,
# "family": "shot",
# "families": ["review", "ftrack"],
# "ftrackFamily": "review",
# "asset": name,
# "subset": "shotMain",
# "representations": [],
# "source": file_path
# })
# )
#
# visual_hierarchy = [asset_entity]
# while True:
# visual_parent = io.find_one(
# {"_id": visual_hierarchy[-1]["data"]["visualParent"]}
# )
# if visual_parent:
# visual_hierarchy.append(visual_parent)
# else:
# visual_hierarchy.append(instance.context.data["projectEntity"])
# break
#
# context_hierarchy = None
# for entity in visual_hierarchy:
# childs = {}
# if context_hierarchy:
# name = context_hierarchy.pop("name")
# childs = {name: context_hierarchy}
# else:
# for instance in instances:
# childs[instance.data["name"]] = {
# "childs": {},
# "entity_type": "Shot",
# "custom_attributes": {
# "frameStart": instance.data["frameStart"],
# "frameEnd": instance.data["frameEnd"]
# }
# }
#
# context_hierarchy = {
# "entity_type": entity["data"]["entityType"],
# "childs": childs,
# "name": entity["name"]
# }
#
# name = context_hierarchy.pop("name")
# context_hierarchy = {name: context_hierarchy}
# instance.context.data["hierarchyContext"] = context_hierarchy
# self.log.info(
# "Hierarchy:\n" +
# json_util.dumps(context_hierarchy, sort_keys=True, indent=4)
# )

View file

@ -0,0 +1,76 @@
import os
import opentimelineio as otio
import pyblish.api
from pype import lib as plib
import pype.api
class OTIO_View(pyblish.api.Action):
"""Currently disabled because OTIO requires PySide2. Issue on Qt.py:
https://github.com/PixarAnimationStudios/OpenTimelineIO/issues/289
"""
label = "OTIO View"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
instance = context[0]
representation = instance.data["representations"][0]
file_path = os.path.join(
representation["stagingDir"], representation["files"]
)
plib._subprocess(["otioview", file_path])
class CollectEditorial(pyblish.api.InstancePlugin):
"""Collect Editorial OTIO timeline"""
order = pyblish.api.CollectorOrder
label = "Collect Editorial"
hosts = ["standalonepublisher"]
families = ["editorial"]
actions = []
# presets
extensions = [".mov", ".mp4"]
def process(self, instance):
self.log.debug(f"__ instance: `{instance}`")
# get representation with editorial file
representation = instance.data["representations"][0]
# make editorial sequence file path
staging_dir = representation["stagingDir"]
file_path = os.path.join(
staging_dir, representation["files"]
)
# get video file path
video_path = None
basename = os.path.splitext(os.path.basename(file_path))[0]
for f in os.listdir(staging_dir):
self.log.debug(f"__ test file: `{f}`")
# filter out by not sharing the same name
if os.path.splitext(f)[0] not in basename:
continue
# filter out by respected extensions
if os.path.splitext(f)[1] not in self.extensions:
continue
video_path = os.path.join(
staging_dir, f
)
self.log.debug(f"__ video_path: `{video_path}`")
instance.context.data["editorialVideoPath"] = video_path
# get editorial sequence file into otio timeline object
extension = os.path.splitext(file_path)[1]
kwargs = {}
if extension == ".edl":
# EDL has no frame rate embedded so needs explicit frame rate else
# 24 is asssumed.
kwargs["rate"] = plib.get_asset()["data"]["fps"]
instance.data["otio_timeline"] = otio.adapters.read_from_file(
file_path, **kwargs)
self.log.info(f"Added OTIO timeline from: `{file_path}`")

View file

@ -0,0 +1,56 @@
import pyblish.api
class CollectClipFrameRanges(pyblish.api.InstancePlugin):
"""Collect all frame range data"""
order = pyblish.api.CollectorOrder + 0.101
label = "Collect Frame Ranges"
hosts = ["standalonepublisher"]
families = ["clip"]
# presets
start_frame_offset = None # if 900000 for edl default then -900000
custom_start_frame = None
def process(self, instance):
data = dict()
# Timeline data.
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
source_in_h = instance.data("sourceInH",
instance.data("sourceIn") - handle_start)
source_out_h = instance.data("sourceOutH",
instance.data("sourceOut") + handle_end)
timeline_in = instance.data["clipIn"]
timeline_out = instance.data["clipOut"]
timeline_in_h = timeline_in - handle_start
timeline_out_h = timeline_out + handle_end
# define starting frame for future shot
frame_start = self.custom_start_frame or timeline_in
# add offset in case there is any
if self.start_frame_offset:
frame_start += self.start_frame_offset
frame_end = frame_start + (timeline_out - timeline_in)
data.update({
"sourceInH": source_in_h,
"sourceOutH": source_out_h,
"frameStart": frame_start,
"frameEnd": frame_end,
"clipInH": timeline_in_h,
"clipOutH": timeline_out_h,
"clipDurationH": instance.data.get(
"clipDuration") + handle_start + handle_end
}
)
self.log.debug("__ data: {}".format(data))
instance.data.update(data)

View file

@ -0,0 +1,358 @@
import pyblish.api
import avalon.api as avalon
import re
import os
class CollectHierarchyInstance(pyblish.api.InstancePlugin):
"""Collecting hierarchy context from `parents` and `hierarchy` data
present in `clip` family instances coming from the request json data file
It will add `hierarchical_context` into each instance for integrate
plugins to be able to create needed parents for the context if they
don't exist yet
"""
label = "Collect Hierarchy Clip"
order = pyblish.api.CollectorOrder + 0.101
hosts = ["standalonepublisher"]
families = ["clip"]
# presets
search_patterns = {
"sequence": r"sc\d{3}",
"shot": r"sh\d{3}",
"episode": r"ep\d{2}"
}
shot_name_template = "{project[code]}{episode}{clip_name}"
shot_hierarchy = "{episode}{sequence}/{clip_name}"
shot_tasks = ["Animation", "Layout"]
def convert_to_entity(self, key, value):
# ftrack compatible entity types
types = {"shot": "Shot",
"folder": "Folder",
"episode": "Episode",
"sequence": "Sequence",
"track": "Sequence",
}
# convert to entity type
entity_type = types.get(key, None)
# return if any
if entity_type:
return {"entityType": entity_type, "entityName": value}
def process(self, instance):
search_text = ""
context = instance.context
anatomy_data = context.data["anatomyData"]
asset_entity = context.data["assetEntity"]
asset_name = asset_entity["name"]
assets_shared = context.data.get("assetsShared")
clip = instance.data["item"]
clip_name = os.path.splitext(clip.name)[0].lower()
asset = instance.data["asset"]
clip_in = instance.data["clipIn"]
clip_out = instance.data["clipOut"]
fps = instance.data["fps"]
hierarchy_data = dict(anatomy_data)
if self.search_patterns:
search_text += clip_name + asset_name
hierarchy_data.update({"clip_name": clip_name})
for type, pattern in self.search_patterns.items():
p = re.compile(pattern)
match = p.findall(search_text)
if not match:
continue
hierarchy_data[type] = match[-1]
self.log.debug("__ hierarchy_data: {}".format(hierarchy_data))
shot_name = self.shot_name_template.format(**hierarchy_data)
self.log.debug("__ shot_name: {}".format(shot_name))
shot_hierarchy = self.shot_hierarchy.format(**hierarchy_data)
self.log.debug("__ shot_hierarchy: {}".format(shot_hierarchy))
# # build data for inner nukestudio project property
# data = {
# "sequence": (
# context.data['activeSequence'].name().replace(' ', '_')
# ),
# "track": clip.parent().name().replace(' ', '_'),
# "clip": asset
# }
# self.log.debug("__ data: {}".format(data))
#
# # Check for clips with the same range
# # this is for testing if any vertically neighbouring
# # clips has been already processed
# match = next((
# k for k, v in assets_shared.items()
# if (v["_clipIn"] == clip_in)
# and (v["_clipOut"] == clip_out)
# ), False)
#
# self.log.debug(
# "__ assets_shared[match]: {}".format(
# assets_shared[match]))
#
# # check if hierarchy key is present in matched
# # vertically neighbouring clip
# if not assets_shared[match].get("hierarchy"):
# match = False
#
# # rise exception if multiple hierarchy tag found
# assert not match, (
# "Two clips above each other with"
# " hierarchy tag are not allowed"
# " >> keep hierarchy tag only in one of them <<"
# )
#
# d_metadata = dict()
# parents = list()
#
# # main template from Tag.note
# template = t_note
#
# # if shot in template then remove it
# if "shot" in template.lower():
# instance.data["asset"] = [
# t for t in template.split('/')][-1]
# template = "/".join(
# [t for t in template.split('/')][0:-1])
#
# # take template from Tag.note and break it into parts
# template_split = template.split("/")
# patern = re.compile(r"\{([a-z]*?)\}")
# par_split = [patern.findall(t)
# for t in template.split("/")]
#
# # format all {} in two layers
# for k, v in t_metadata.items():
# new_k = k.split(".")[1]
#
# # ignore all help strings
# if 'help' in k:
# continue
# # self.log.info("__ new_k: `{}`".format(new_k))
# try:
# # first try all data and context data to
# # add to individual properties
# new_v = str(v).format(
# **dict(context.data, **data))
# d_metadata[new_k] = new_v
#
# # create parents
# # find matching index of order
# p_match_i = [i for i, p in enumerate(par_split)
# if new_k in p]
#
# # if any is matching then convert to entity_types
# if p_match_i:
# parent = self.convert_to_entity(
# new_k, template_split[p_match_i[0]])
# parents.insert(p_match_i[0], parent)
# except Exception:
# d_metadata[new_k] = v
#
# # create new shot asset name
# instance.data["asset"] = instance.data["asset"].format(
# **d_metadata)
# self.log.debug(
# "__ instance.data[asset]: "
# "{}".format(instance.data["asset"])
# )
#
# # lastly fill those individual properties itno
# # format the string with collected data
# parents = [{"entityName": p["entityName"].format(
# **d_metadata), "entityType": p["entityType"]}
# for p in parents]
# self.log.debug("__ parents: {}".format(parents))
#
# hierarchy = template.format(
# **d_metadata)
# self.log.debug("__ hierarchy: {}".format(hierarchy))
#
# # check if hierarchy attribute is already created
# # it should not be so return warning if it is
# hd = instance.data.get("hierarchy")
# assert not hd, (
# "Only one Hierarchy Tag is allowed. "
# "Clip: `{}`".format(asset)
# )
#
# # add formated hierarchy path into instance data
# instance.data["hierarchy"] = hierarchy
# instance.data["parents"] = parents
#
# self.log.info(
# "clip: {asset}[{clip_in}:{clip_out}]".format(
# **locals()))
# # adding to asset shared dict
# self.log.debug(
# "__ assets_shared: {}".format(assets_shared))
# if assets_shared.get(asset):
# self.log.debug("Adding to shared assets: `{}`".format(
# asset))
# asset_shared = assets_shared.get(asset)
# else:
# asset_shared = assets_shared[asset]
#
# asset_shared.update({
# "asset": asset,
# "hierarchy": hierarchy,
# "parents": parents,
# "fps": fps,
# "tasks": instance.data["tasks"]
# })
#
# # adding frame start if any on instance
# start_frame = instance.data.get("startingFrame")
# if start_frame:
# asset_shared.update({
# "startingFrame": start_frame
# })
# self.log.debug(
# "assets_shared: {assets_shared}".format(**locals()))
class CollectHierarchyContext(pyblish.api.ContextPlugin):
'''Collecting Hierarchy from instaces and building
context hierarchy tree
'''
label = "Collect Hierarchy Context"
order = pyblish.api.CollectorOrder + 0.102
hosts = ["standalonepublisher"]
def update_dict(self, ex_dict, new_dict):
for key in ex_dict:
if key in new_dict and isinstance(ex_dict[key], dict):
new_dict[key] = self.update_dict(ex_dict[key], new_dict[key])
else:
if ex_dict.get(key) and new_dict.get(key):
continue
else:
new_dict[key] = ex_dict[key]
return new_dict
def process(self, context):
instances = context[:]
# create hierarchyContext attr if context has none
temp_context = {}
for instance in instances:
if 'projectfile' in instance.data.get('family', ''):
continue
name = instance.data["asset"]
# get handles
handle_start = int(instance.data["handleStart"])
handle_end = int(instance.data["handleEnd"])
# inject assetsShared to other plates types
assets_shared = context.data.get("assetsShared")
if assets_shared:
s_asset_data = assets_shared.get(name)
if s_asset_data:
self.log.debug("__ s_asset_data: {}".format(s_asset_data))
name = instance.data["asset"] = s_asset_data["asset"]
instance.data["parents"] = s_asset_data["parents"]
instance.data["hierarchy"] = s_asset_data["hierarchy"]
instance.data["tasks"] = s_asset_data["tasks"]
instance.data["resolutionWidth"] = s_asset_data[
"resolutionWidth"]
instance.data["resolutionHeight"] = s_asset_data[
"resolutionHeight"]
instance.data["pixelAspect"] = s_asset_data["pixelAspect"]
instance.data["fps"] = s_asset_data["fps"]
# adding frame start if any on instance
start_frame = s_asset_data.get("startingFrame")
if start_frame:
instance.data["frameStart"] = start_frame
instance.data["frameEnd"] = start_frame + (
instance.data["clipOut"] -
instance.data["clipIn"])
self.log.debug(
"__ instance.data[parents]: {}".format(
instance.data["parents"]
)
)
self.log.debug(
"__ instance.data[hierarchy]: {}".format(
instance.data["hierarchy"]
)
)
self.log.debug(
"__ instance.data[name]: {}".format(instance.data["name"])
)
in_info = {}
in_info["inputs"] = [
x["_id"] for x in instance.data.get("assetbuilds", [])
]
# suppose that all instances are Shots
in_info['entity_type'] = 'Shot'
# get custom attributes of the shot
if instance.data.get("main"):
in_info['custom_attributes'] = {
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"clipIn": instance.data["clipIn"],
"clipOut": instance.data["clipOut"],
'fps': instance.context.data["fps"]
}
# adding SourceResolution if Tag was present
if instance.data.get("main"):
in_info['custom_attributes'].update({
"resolutionWidth": instance.data["resolutionWidth"],
"resolutionHeight": instance.data["resolutionHeight"],
"pixelAspect": instance.data["pixelAspect"]
})
in_info['tasks'] = instance.data['tasks']
parents = instance.data.get('parents', [])
self.log.debug("__ in_info: {}".format(in_info))
actual = {name: in_info}
for parent in reversed(parents):
next_dict = {}
parent_name = parent["entityName"]
next_dict[parent_name] = {}
next_dict[parent_name]["entity_type"] = parent["entityType"]
next_dict[parent_name]["childs"] = actual
actual = next_dict
temp_context = self.update_dict(temp_context, actual)
# TODO: 100% sure way of get project! Will be Name or Code?
project_name = avalon.Session["AVALON_PROJECT"]
final_context = {}
final_context[project_name] = {}
final_context[project_name]['entity_type'] = 'Project'
final_context[project_name]['childs'] = temp_context
# adding hierarchy context to instance
context.data["hierarchyContext"] = final_context
self.log.debug("context.data[hierarchyContext] is: {}".format(
context.data["hierarchyContext"]))

View file

@ -0,0 +1,15 @@
import pyblish.api
import re
class CollectShotNames(pyblish.api.InstancePlugin):
"""
Collecting shot names
"""
label = "Collect shot names"
order = pyblish.api.CollectorOrder + 0.01
hosts = ["standalonepublisher"]
def process(self, instance):
self.log.info("Instance name: `{}`".format(instance.data["name"]))

View file

@ -1,147 +0,0 @@
import os
import opentimelineio as otio
from bson import json_util
import pyblish.api
from pype import lib
from avalon import io
class OTIO_View(pyblish.api.Action):
"""Currently disabled because OTIO requires PySide2. Issue on Qt.py:
https://github.com/PixarAnimationStudios/OpenTimelineIO/issues/289
"""
label = "OTIO View"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
instance = context[0]
representation = instance.data["representations"][0]
file_path = os.path.join(
representation["stagingDir"], representation["files"]
)
lib._subprocess(["otioview", file_path])
class CollectShots(pyblish.api.InstancePlugin):
"""Collect Anatomy object into Context"""
order = pyblish.api.CollectorOrder
label = "Collect Shots"
hosts = ["standalonepublisher"]
families = ["editorial"]
actions = []
def process(self, instance):
representation = instance.data["representations"][0]
file_path = os.path.join(
representation["stagingDir"], representation["files"]
)
instance.context.data["editorialPath"] = file_path
extension = os.path.splitext(file_path)[1][1:]
kwargs = {}
if extension == "edl":
# EDL has no frame rate embedded so needs explicit frame rate else
# 24 is asssumed.
kwargs["rate"] = lib.get_asset()["data"]["fps"]
timeline = otio.adapters.read_from_file(file_path, **kwargs)
tracks = timeline.each_child(
descended_from_type=otio.schema.track.Track
)
asset_entity = instance.context.data["assetEntity"]
asset_name = asset_entity["name"]
# Ask user for sequence start. Usually 10:00:00:00.
sequence_start_frame = 900000
# Project specific prefix naming. This needs to be replaced with some
# options to be more flexible.
asset_name = asset_name.split("_")[0]
instances = []
for track in tracks:
track_start_frame = (
abs(track.source_range.start_time.value) - sequence_start_frame
)
for child in track.each_child():
# Transitions are ignored, because Clips have the full frame
# range.
if isinstance(child, otio.schema.transition.Transition):
continue
if child.name is None:
continue
# Hardcoded to expect a shot name of "[name].[extension]"
child_name = os.path.splitext(child.name)[0].lower()
name = f"{asset_name}_{child_name}"
frame_start = track_start_frame
frame_start += child.range_in_parent().start_time.value
frame_end = track_start_frame
frame_end += child.range_in_parent().end_time_inclusive().value
label = f"{name} (framerange: {frame_start}-{frame_end})"
instances.append(
instance.context.create_instance(**{
"name": name,
"label": label,
"frameStart": frame_start,
"frameEnd": frame_end,
"family": "shot",
"families": ["review", "ftrack"],
"ftrackFamily": "review",
"asset": name,
"subset": "shotMain",
"representations": [],
"source": file_path
})
)
visual_hierarchy = [asset_entity]
while True:
visual_parent = io.find_one(
{"_id": visual_hierarchy[-1]["data"]["visualParent"]}
)
if visual_parent:
visual_hierarchy.append(visual_parent)
else:
visual_hierarchy.append(instance.context.data["projectEntity"])
break
context_hierarchy = None
for entity in visual_hierarchy:
childs = {}
if context_hierarchy:
name = context_hierarchy.pop("name")
childs = {name: context_hierarchy}
else:
for instance in instances:
childs[instance.data["name"]] = {
"childs": {},
"entity_type": "Shot",
"custom_attributes": {
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"]
}
}
context_hierarchy = {
"entity_type": entity["data"]["entityType"],
"childs": childs,
"name": entity["name"]
}
name = context_hierarchy.pop("name")
context_hierarchy = {name: context_hierarchy}
instance.context.data["hierarchyContext"] = context_hierarchy
self.log.info(
"Hierarchy:\n" +
json_util.dumps(context_hierarchy, sort_keys=True, indent=4)
)

View file

@ -1,9 +1,7 @@
import os
import clique
import pype.api
import pype.lib
import pype.lib as plib
class ExtractShot(pype.api.Extractor):
@ -11,42 +9,52 @@ class ExtractShot(pype.api.Extractor):
label = "Extract Shot"
hosts = ["standalonepublisher"]
families = ["shot"]
families = ["clip"]
def process(self, instance):
staging_dir = self.staging_dir(instance)
self.log.info("Outputting shot to {}".format(staging_dir))
# get context
context = instance.context
editorial_path = instance.context.data["editorialPath"]
basename = os.path.splitext(os.path.basename(editorial_path))[0]
# get ffmpet path
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# get staging dir
staging_dir = self.staging_dir(instance)
self.log.info("Staging dir set to: `{}`".format(staging_dir))
# Generate mov file.
fps = pype.lib.get_asset()["data"]["fps"]
input_path = os.path.join(
os.path.dirname(editorial_path), basename + ".mov"
)
shot_mov = os.path.join(staging_dir, instance.data["name"] + ".mov")
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
fps = instance.data["fps"]
video_file_path = context.data["editorialVideoPath"]
ext = os.path.splitext(os.path.basename(video_file_path))[-1]
clip_trimed_path = os.path.join(
staging_dir, instance.data["name"] + ext)
# check video file metadata
input_data = plib.ffprobe_streams(video_file_path)[0]
self.log.debug(f"__ input_data: `{input_data}`")
args = [
ffmpeg_path,
"-ss", str(instance.data["frameStart"] / fps),
"-i", input_path,
"-ss", str(instance.data["clipIn"] / fps),
"-i", video_file_path,
"-t", str(
(instance.data["frameEnd"] - instance.data["frameStart"] + 1) /
(instance.data["clipOut"] - instance.data["clipIn"] + 1) /
fps
),
"-crf", "18",
"-pix_fmt", "yuv420p",
shot_mov
clip_trimed_path
]
self.log.info(f"Processing: {args}")
output = pype.lib._subprocess(args)
ffmpeg_args = " ".join(args)
output = pype.api.subprocess(ffmpeg_args)
self.log.info(output)
instance.data["representations"].append({
"name": "mov",
"ext": "mov",
"files": os.path.basename(shot_mov),
"name": ext[1:],
"ext": ext[1:],
"files": os.path.basename(clip_trimed_path),
"stagingDir": staging_dir,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
@ -55,42 +63,41 @@ class ExtractShot(pype.api.Extractor):
"tags": ["review", "ftrackreview"]
})
# Generate jpegs.
shot_jpegs = os.path.join(
staging_dir, instance.data["name"] + ".%04d.jpeg"
)
args = [ffmpeg_path, "-i", shot_mov, shot_jpegs]
self.log.info(f"Processing: {args}")
output = pype.lib._subprocess(args)
self.log.info(output)
collection = clique.Collection(
head=instance.data["name"] + ".", tail='.jpeg', padding=4
)
for f in os.listdir(staging_dir):
if collection.match(f):
collection.add(f)
instance.data["representations"].append({
"name": "jpeg",
"ext": "jpeg",
"files": list(collection),
"stagingDir": staging_dir
})
# Generate wav file.
shot_wav = os.path.join(staging_dir, instance.data["name"] + ".wav")
args = [ffmpeg_path, "-i", shot_mov, shot_wav]
self.log.info(f"Processing: {args}")
output = pype.lib._subprocess(args)
self.log.info(output)
instance.data["representations"].append({
"name": "wav",
"ext": "wav",
"files": os.path.basename(shot_wav),
"stagingDir": staging_dir
})
# Required for extract_review plugin (L222 onwards).
instance.data["fps"] = fps
# # Generate jpegs.
# clip_thumbnail = os.path.join(
# staging_dir, instance.data["name"] + ".%04d.jpeg"
# )
# args = [ffmpeg_path, "-i", clip_trimed_path, clip_thumbnail]
# self.log.info(f"Processing: {args}")
# output = pype.lib._subprocess(args)
# self.log.info(output)
#
# # collect jpeg sequence if editorial data for publish
# # are image sequence
# collection = clique.Collection(
# head=instance.data["name"] + ".", tail='.jpeg', padding=4
# )
# for f in os.listdir(staging_dir):
# if collection.match(f):
# collection.add(f)
#
# instance.data["representations"].append({
# "name": "jpeg",
# "ext": "jpeg",
# "files": list(collection),
# "stagingDir": staging_dir
# })
#
# # Generate wav file.
# shot_wav = os.path.join(staging_dir, instance.data["name"] + ".wav")
# args = [ffmpeg_path, "-i", clip_trimed_path, shot_wav]
# self.log.info(f"Processing: {args}")
# output = pype.lib._subprocess(args)
# self.log.info(output)
#
# instance.data["representations"].append({
# "name": "wav",
# "ext": "wav",
# "files": os.path.basename(shot_wav),
# "stagingDir": staging_dir
# })

View file

@ -0,0 +1,24 @@
# Check for clips with the same range
# this is for testing if any vertically neighbouring
# clips has been already processed
clip_matching_with_range = next(
(k for k, v in context.data["assetsShared"].items()
if (v.get("_clipIn", 0) == clip_in)
and (v.get("_clipOut", 0) == clip_out)
), False)
# check if clip name is the same in matched
# vertically neighbouring clip
# if it is then it is correct and resent variable to False
# not to be rised wrong name exception
if asset in str(clip_matching_with_range):
clip_matching_with_range = False
# rise wrong name exception if found one
assert (not clip_matching_with_range), (
"matching clip: {asset}"
" timeline range ({clip_in}:{clip_out})"
" conflicting with {clip_matching_with_range}"
" >> rename any of clips to be the same as the other <<"
).format(
**locals())

View file

@ -12,7 +12,11 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin):
families = ["editorial"]
order = pype.api.ValidateContentsOrder
# presets
check_ext = None
def process(self, instance):
check_ext = self.check_ext or "mov"
representation = instance.data["representations"][0]
staging_dir = representation["stagingDir"]
basename = os.path.splitext(
@ -21,8 +25,8 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin):
files = [x for x in os.listdir(staging_dir)]
# Check for "mov" file.
filename = basename + ".mov"
# Check for correct extansion in file name.
filename = basename + check_ext
filepath = os.path.join(staging_dir, filename)
msg = f"Missing \"{filepath}\"."
assert filename in files, msg

View file

@ -2,10 +2,10 @@ import pyblish.api
import pype.api
class ValidateShots(pyblish.api.ContextPlugin):
"""Validate there is a "mov" next to the editorial file."""
class ValidateShotDuplicates(pyblish.api.ContextPlugin):
"""Validating no duplicate names are in context."""
label = "Validate Shots"
label = "Validate Shot Duplicates"
hosts = ["standalonepublisher"]
order = pype.api.ValidateContentsOrder