feat(SP): wip editorial expansion to image sequences

This commit is contained in:
Jakub Jezek 2020-11-05 19:27:56 +01:00
parent d5336b2e89
commit c43a58efa9
No known key found for this signature in database
GPG key ID: C4B96E101D2A47F3
8 changed files with 277 additions and 133 deletions

View file

@ -1,3 +1,19 @@
"""
Optional:
presets -> extensions (
example of use:
[".mov", ".mp4"]
)
presets -> source_dir (
example of use:
"C:/pathToFolder"
"{root}/{project[name]}/inputs"
"{root[work]}/{project[name]}/inputs"
"./input"
"../input"
)
"""
import os
import opentimelineio as otio
import pyblish.api
@ -33,8 +49,10 @@ class CollectEditorial(pyblish.api.InstancePlugin):
# presets
extensions = [".mov", ".mp4"]
source_dir = None
def process(self, instance):
root_dir = None
# remove context test attribute
if instance.context.data.get("subsetNamesCheck"):
instance.context.data.pop("subsetNamesCheck")
@ -53,19 +71,42 @@ class CollectEditorial(pyblish.api.InstancePlugin):
# get video file path
video_path = None
basename = os.path.splitext(os.path.basename(file_path))[0]
for f in os.listdir(staging_dir):
self.log.debug(f"__ test file: `{f}`")
# filter out by not sharing the same name
if os.path.splitext(f)[0] not in basename:
continue
# filter out by respected extensions
if os.path.splitext(f)[1] not in self.extensions:
continue
video_path = os.path.join(
staging_dir, f
)
self.log.debug(f"__ video_path: `{video_path}`")
instance.data["editorialVideoPath"] = video_path
if self.source_dir:
source_dir = self.source_dir.replace("\\", "/")
if ("./" in source_dir) or ("../" in source_dir):
# get current working dir
cwd = os.getcwd()
# set cwd to staging dir for absolute path solving
os.chdir(staging_dir)
root_dir = os.path.abspath(source_dir)
# set back original cwd
os.chdir(cwd)
elif "{" in source_dir:
root_dir = source_dir
else:
root_dir = os.path.normpath(source_dir)
if root_dir:
# search for source data will need to be done
instance.data["editorialSourceRoot"] = root_dir
instance.data["editorialSourcePath"] = None
else:
# source data are already found
for f in os.listdir(staging_dir):
# filter out by not sharing the same name
if os.path.splitext(f)[0] not in basename:
continue
# filter out by respected extensions
if os.path.splitext(f)[1] not in self.extensions:
continue
video_path = os.path.join(
staging_dir, f
)
self.log.debug(f"__ video_path: `{video_path}`")
instance.data["editorialSourceRoot"] = staging_dir
instance.data["editorialSourcePath"] = video_path
instance.data["stagingDir"] = staging_dir
# get editorial sequence file into otio timeline object

View file

@ -2,7 +2,7 @@ import pyblish.api
import re
import os
from avalon import io
from copy import deepcopy
class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"""Collecting hierarchy context from `parents` and `hierarchy` data
@ -60,7 +60,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
def create_hierarchy(self, instance):
parents = list()
hierarchy = ""
hierarchy = list()
visual_hierarchy = [instance.context.data["assetEntity"]]
while True:
visual_parent = io.find_one(
@ -81,22 +81,51 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
})
if self.shot_add_hierarchy:
parent_template_patern = re.compile(r"\{([a-z]*?)\}")
# fill the parents parts from presets
shot_add_hierarchy = self.shot_add_hierarchy.copy()
hierarchy_parents = shot_add_hierarchy["parents"].copy()
for parent in hierarchy_parents:
hierarchy_parents[parent] = hierarchy_parents[parent].format(
**instance.data["anatomyData"])
# fill parent keys data template from anatomy data
for parent_key in hierarchy_parents:
hierarchy_parents[parent_key] = hierarchy_parents[
parent_key].format(**instance.data["anatomyData"])
for _index, _parent in enumerate(
shot_add_hierarchy["parents_path"].split("/")):
parent_filled = _parent.format(**hierarchy_parents)
parent_key = parent_template_patern.findall(_parent).pop()
# in case SP context is set to the same folder
if (_index == 0) and ("folder" in parent_key) \
and (parents[-1]["entityName"] == parent_filled):
self.log.debug(f" skiping : {parent_filled}")
continue
# in case first parent is project then start parents from start
if (_index == 0) and ("project" in parent_key):
self.log.debug("rebuilding parents from scratch")
project_parent = parents[0]
parents = [project_parent]
self.log.debug(f"project_parent: {project_parent}")
self.log.debug(f"parents: {parents}")
continue
prnt = self.convert_to_entity(
parent, hierarchy_parents[parent])
parent_key, parent_filled)
parents.append(prnt)
hierarchy.append(parent_filled)
hierarchy = shot_add_hierarchy[
"parents_path"].format(**hierarchy_parents)
# convert hierarchy to string
hierarchy = "/".join(hierarchy)
# assing to instance data
instance.data["hierarchy"] = hierarchy
instance.data["parents"] = parents
# print
self.log.debug(f"Hierarchy: {hierarchy}")
self.log.debug(f"parents: {parents}")
if self.shot_add_tasks:
instance.data["tasks"] = self.shot_add_tasks
@ -117,7 +146,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
def processing_instance(self, instance):
self.log.info(f"_ instance: {instance}")
# adding anatomyData for burnins
instance.data["anatomyData"] = instance.context.data["anatomyData"]
instance.data["anatomyData"] = deepcopy(
instance.context.data["anatomyData"])
asset = instance.data["asset"]
assets_shared = instance.context.data.get("assetsShared")
@ -133,9 +163,6 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
shot_name = instance.data["asset"]
self.log.debug(f"Shot Name: {shot_name}")
if instance.data["hierarchy"] not in shot_name:
self.log.warning("wrong parent")
label = f"{shot_name} ({frame_start}-{frame_end})"
instance.data["label"] = label
@ -150,7 +177,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"asset": instance.data["asset"],
"hierarchy": instance.data["hierarchy"],
"parents": instance.data["parents"],
"tasks": instance.data["tasks"]
"tasks": instance.data["tasks"],
"anatomyData": instance.data["anatomyData"]
})
@ -194,6 +222,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
instance.data["parents"] = s_asset_data["parents"]
instance.data["hierarchy"] = s_asset_data["hierarchy"]
instance.data["tasks"] = s_asset_data["tasks"]
instance.data["anatomyData"] = s_asset_data["anatomyData"]
# generate hierarchy data only on shot instances
if 'shot' not in instance.data.get('family', ''):
@ -224,7 +253,9 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
in_info['tasks'] = instance.data['tasks']
from pprint import pformat
parents = instance.data.get('parents', [])
self.log.debug(f"parents: {pformat(parents)}")
actual = {name: in_info}
@ -240,4 +271,5 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
# adding hierarchy context to instance
context.data["hierarchyContext"] = final_context
self.log.debug(f"hierarchyContext: {pformat(final_context)}")
self.log.info("Hierarchy instance collected")

View file

@ -22,7 +22,7 @@ class CollectInstanceData(pyblish.api.InstancePlugin):
hosts = ["standalonepublisher"]
def process(self, instance):
fps = instance.data["assetEntity"]["data"]["fps"]
fps = instance.context.data["assetEntity"]["data"]["fps"]
instance.data.update({
"fps": fps
})

View file

@ -0,0 +1,57 @@
import os
import tempfile
import pyblish.api
from copy import deepcopy
class CollectInstanceResources(pyblish.api.InstancePlugin):
"""Collect instance's resources"""
# must be after `CollectInstances`
order = pyblish.api.CollectorOrder + 0.011
label = "Collect Instance Resources"
hosts = ["standalonepublisher"]
families = ["clip"]
def process(self, instance):
anatomy = instance.context.data["anatomy"]
anatomy_data = deepcopy(instance.context.data["anatomyData"])
anatomy_data.update({"root": anatomy.roots})
subset = instance.data["subset"]
clip_name = instance.data["clipName"]
editorial_source_root = instance.data["editorialSourceRoot"]
editorial_source_path = instance.data["editorialSourcePath"]
if editorial_source_path:
# add family if mov or mp4 found which is longer for
# cutting `trimming` to enable `ExtractTrimmingVideoAudio` plugin
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
instance.data["stagingDir"] = staging_dir
instance.data["families"] += ["trimming"]
return
if "{" in editorial_source_root:
editorial_source_root = editorial_source_root.format(
**anatomy_data)
self.log.debug(f"root: {editorial_source_root}")
for root, dirs, files in os.walk(editorial_source_root):
if subset in root and clip_name in root:
staging_dir = root
self.log.debug(f"staging_dir: {staging_dir}")
# add `editorialSourceRoot` as staging dir
# if `editorialSourcePath` is none then loop
# trough `editorialSourceRoot`
# if image sequence then create representation > match
# with subset name in dict
# idenfify as image sequence `isSequence` on instance data

View file

@ -1,15 +1,14 @@
import os
import opentimelineio as otio
import tempfile
import pyblish.api
from pype import lib as plib
class CollectClipInstances(pyblish.api.InstancePlugin):
"""Collect Clips instances from editorial's OTIO sequence"""
class CollectInstances(pyblish.api.InstancePlugin):
"""Collect instances from editorial's OTIO sequence"""
order = pyblish.api.CollectorOrder + 0.01
label = "Collect Clips"
label = "Collect Instances"
hosts = ["standalonepublisher"]
families = ["editorial"]
@ -19,13 +18,13 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
"family": "review",
"families": ["clip", "ftrack"],
# "ftrackFamily": "review",
"extension": ".mp4"
"extensions": [".mp4"]
},
"audioMain": {
"family": "audio",
"families": ["clip", "ftrack"],
# "ftrackFamily": "audio",
"extension": ".wav",
"extensions": [".wav"],
# "version": 1
},
"shotMain": {
@ -37,12 +36,14 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
custom_start_frame = None
def process(self, instance):
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
# get context
context = instance.context
instance_data_filter = [
"editorialSourceRoot",
"editorialSourcePath"
]
# attribute for checking duplicity during creation
if not context.data.get("assetNameCheck"):
context.data["assetNameCheck"] = list()
@ -103,7 +104,10 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
# frame ranges data
clip_in = clip.range_in_parent().start_time.value
clip_in += track_start_frame
clip_out = clip.range_in_parent().end_time_inclusive().value
clip_out += track_start_frame
self.log.info(f"clip_in: {clip_in} | clip_out: {clip_out}")
# add offset in case there is any
if self.timeline_frame_offset:
@ -131,14 +135,11 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
# create shared new instance data
instance_data = {
"stagingDir": staging_dir,
# shared attributes
"asset": name,
"assetShareName": name,
"editorialVideoPath": instance.data[
"editorialVideoPath"],
"item": clip,
"clipName": clip_name,
# parent time properities
"trackStartFrame": track_start_frame,
@ -167,6 +168,10 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
"frameEndH": frame_end + handle_end
}
for data_key in instance_data_filter:
instance_data.update({
data_key: instance.data.get(data_key)})
# adding subsets to context as instances
for subset, properities in self.subsets.items():
# adding Review-able instance

View file

@ -1,92 +0,0 @@
import os
import clique
import pype.api
from pprint import pformat
class ExtractShotData(pype.api.Extractor):
"""Extract shot "mov" and "wav" files."""
label = "Extract Shot Data"
hosts = ["standalonepublisher"]
families = ["clip"]
# presets
def process(self, instance):
representation = instance.data.get("representations")
self.log.debug(f"_ representation: {representation}")
if not representation:
instance.data["representations"] = list()
# get ffmpet path
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# get staging dir
staging_dir = self.staging_dir(instance)
self.log.info("Staging dir set to: `{}`".format(staging_dir))
# Generate mov file.
fps = instance.data["fps"]
video_file_path = instance.data["editorialVideoPath"]
ext = instance.data.get("extension", ".mov")
clip_trimed_path = os.path.join(
staging_dir, instance.data["name"] + ext)
#
# # check video file metadata
# input_data = plib.ffprobe_streams(video_file_path)[0]
# self.log.debug(f"__ input_data: `{input_data}`")
start = float(instance.data["clipInH"])
dur = float(instance.data["clipDurationH"])
if ext in ".wav":
start += 0.5
args = [
ffmpeg_path,
"-ss", str(start / fps),
"-i", f"\"{video_file_path}\"",
"-t", str(dur / fps)
]
if ext in [".mov", ".mp4"]:
args.extend([
"-crf", "18",
"-pix_fmt", "yuv420p"])
elif ext in ".wav":
args.extend([
"-vn -acodec pcm_s16le",
"-ar 48000 -ac 2"
])
# add output path
args.append(f"\"{clip_trimed_path}\"")
self.log.info(f"Processing: {args}")
ffmpeg_args = " ".join(args)
output = pype.api.subprocess(ffmpeg_args, shell=True)
self.log.info(output)
repr = {
"name": ext[1:],
"ext": ext[1:],
"files": os.path.basename(clip_trimed_path),
"stagingDir": staging_dir,
"frameStart": int(instance.data["frameStart"]),
"frameEnd": int(instance.data["frameEnd"]),
"frameStartFtrack": int(instance.data["frameStartH"]),
"frameEndFtrack": int(instance.data["frameEndH"]),
"fps": fps,
}
if ext[1:] in ["mov", "mp4"]:
repr.update({
"thumbnail": True,
"tags": ["review", "ftrackreview", "delete"]})
instance.data["representations"].append(repr)
self.log.debug(f"Instance data: {pformat(instance.data)}")

View file

@ -0,0 +1,101 @@
import os
import pyblish.api
import pype.api
from pprint import pformat
class ExtractTrimVideoAudio(pype.api.Extractor):
"""Trim with ffmpeg "mov" and "wav" files."""
label = "Extract Trim Video/Audio"
hosts = ["standalonepublisher"]
families = ["clip", "trimming"]
# make sure it is enabled only if at least both families are available
match = pyblish.api.Subset
# presets
def process(self, instance):
representation = instance.data.get("representations")
self.log.debug(f"_ representation: {representation}")
if not representation:
instance.data["representations"] = list()
# get ffmpet path
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# get staging dir
staging_dir = self.staging_dir(instance)
self.log.info("Staging dir set to: `{}`".format(staging_dir))
# Generate mov file.
fps = instance.data["fps"]
video_file_path = instance.data["editorialSourcePath"]
extensions = instance.data.get("extensions", [".mov"])
for ext in extensions:
clip_trimed_path = os.path.join(
staging_dir, instance.data["name"] + ext)
# # check video file metadata
# input_data = plib.ffprobe_streams(video_file_path)[0]
# self.log.debug(f"__ input_data: `{input_data}`")
start = float(instance.data["clipInH"])
dur = float(instance.data["clipDurationH"])
if ext in ".wav":
# offset time as ffmpeg is having bug
start += 0.5
# remove "review" from families
instance.data["families"] = [
fml for fml in instance.data["families"]
if "trimming" not in fml
]
args = [
ffmpeg_path,
"-ss", str(start / fps),
"-i", f"\"{video_file_path}\"",
"-t", str(dur / fps)
]
if ext in [".mov", ".mp4"]:
args.extend([
"-crf", "18",
"-pix_fmt", "yuv420p"])
elif ext in ".wav":
args.extend([
"-vn -acodec pcm_s16le",
"-ar 48000 -ac 2"
])
# add output path
args.append(f"\"{clip_trimed_path}\"")
self.log.info(f"Processing: {args}")
ffmpeg_args = " ".join(args)
output = pype.api.subprocess(ffmpeg_args, shell=True)
self.log.info(output)
repr = {
"name": ext[1:],
"ext": ext[1:],
"files": os.path.basename(clip_trimed_path),
"stagingDir": staging_dir,
"frameStart": int(instance.data["frameStart"]),
"frameEnd": int(instance.data["frameEnd"]),
"frameStartFtrack": int(instance.data["frameStartH"]),
"frameEndFtrack": int(instance.data["frameEndH"]),
"fps": fps,
}
if ext[1:] in ["mov", "mp4"]:
repr.update({
"thumbnail": True,
"tags": ["review", "ftrackreview", "delete"]})
instance.data["representations"].append(repr)
self.log.debug(f"Instance data: {pformat(instance.data)}")

View file

@ -15,6 +15,6 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin):
self.log.debug(
f"Instance: {instance}, Families: "
f"{[instance.data['family']] + instance.data['families']}")
check_file = instance.data["editorialVideoPath"]
check_file = instance.data["editorialSourcePath"]
msg = f"Missing \"{check_file}\"."
assert check_file, msg