Merge pull request #699 from pypeclub/feature/686-standalonepublisher-editorial-from-image-sequences

686 standalonepublisher editorial from image sequences
This commit is contained in:
Milan Kolar 2020-11-20 12:14:12 +01:00 committed by GitHub
commit 14f348811d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 555 additions and 145 deletions

View file

@ -0,0 +1,21 @@
"""
Optional:
instance.data["remove"] -> mareker for removing
"""
import pyblish.api
class CollectClearInstances(pyblish.api.InstancePlugin):
"""Clear all marked instances"""
order = pyblish.api.CollectorOrder + 0.4999
label = "Clear Instances"
hosts = ["standalonepublisher"]
def process(self, instance):
self.log.debug(
f"Instance: `{instance}` | "
f"families: `{instance.data['families']}`")
if instance.data.get("remove"):
self.log.info(f"Removing: {instance}")
instance.context.remove(instance)

View file

@ -1,3 +1,19 @@
"""
Optional:
presets -> extensions (
example of use:
[".mov", ".mp4"]
)
presets -> source_dir (
example of use:
"C:/pathToFolder"
"{root}/{project[name]}/inputs"
"{root[work]}/{project[name]}/inputs"
"./input"
"../input"
)
"""
import os import os
import opentimelineio as otio import opentimelineio as otio
import pyblish.api import pyblish.api
@ -33,8 +49,10 @@ class CollectEditorial(pyblish.api.InstancePlugin):
# presets # presets
extensions = [".mov", ".mp4"] extensions = [".mov", ".mp4"]
source_dir = None
def process(self, instance): def process(self, instance):
root_dir = None
# remove context test attribute # remove context test attribute
if instance.context.data.get("subsetNamesCheck"): if instance.context.data.get("subsetNamesCheck"):
instance.context.data.pop("subsetNamesCheck") instance.context.data.pop("subsetNamesCheck")
@ -53,19 +71,42 @@ class CollectEditorial(pyblish.api.InstancePlugin):
# get video file path # get video file path
video_path = None video_path = None
basename = os.path.splitext(os.path.basename(file_path))[0] basename = os.path.splitext(os.path.basename(file_path))[0]
for f in os.listdir(staging_dir):
self.log.debug(f"__ test file: `{f}`") if self.source_dir:
# filter out by not sharing the same name source_dir = self.source_dir.replace("\\", "/")
if os.path.splitext(f)[0] not in basename: if ("./" in source_dir) or ("../" in source_dir):
continue # get current working dir
# filter out by respected extensions cwd = os.getcwd()
if os.path.splitext(f)[1] not in self.extensions: # set cwd to staging dir for absolute path solving
continue os.chdir(staging_dir)
video_path = os.path.join( root_dir = os.path.abspath(source_dir)
staging_dir, f # set back original cwd
) os.chdir(cwd)
self.log.debug(f"__ video_path: `{video_path}`") elif "{" in source_dir:
instance.data["editorialVideoPath"] = video_path root_dir = source_dir
else:
root_dir = os.path.normpath(source_dir)
if root_dir:
# search for source data will need to be done
instance.data["editorialSourceRoot"] = root_dir
instance.data["editorialSourcePath"] = None
else:
# source data are already found
for f in os.listdir(staging_dir):
# filter out by not sharing the same name
if os.path.splitext(f)[0] not in basename:
continue
# filter out by respected extensions
if os.path.splitext(f)[1] not in self.extensions:
continue
video_path = os.path.join(
staging_dir, f
)
self.log.debug(f"__ video_path: `{video_path}`")
instance.data["editorialSourceRoot"] = staging_dir
instance.data["editorialSourcePath"] = video_path
instance.data["stagingDir"] = staging_dir instance.data["stagingDir"] = staging_dir
# get editorial sequence file into otio timeline object # get editorial sequence file into otio timeline object

View file

@ -2,7 +2,7 @@ import pyblish.api
import re import re
import os import os
from avalon import io from avalon import io
from copy import deepcopy
class CollectHierarchyInstance(pyblish.api.ContextPlugin): class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"""Collecting hierarchy context from `parents` and `hierarchy` data """Collecting hierarchy context from `parents` and `hierarchy` data
@ -60,7 +60,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
def create_hierarchy(self, instance): def create_hierarchy(self, instance):
parents = list() parents = list()
hierarchy = "" hierarchy = list()
visual_hierarchy = [instance.context.data["assetEntity"]] visual_hierarchy = [instance.context.data["assetEntity"]]
while True: while True:
visual_parent = io.find_one( visual_parent = io.find_one(
@ -81,27 +81,74 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
}) })
if self.shot_add_hierarchy: if self.shot_add_hierarchy:
parent_template_patern = re.compile(r"\{([a-z]*?)\}")
# fill the parents parts from presets # fill the parents parts from presets
shot_add_hierarchy = self.shot_add_hierarchy.copy() shot_add_hierarchy = self.shot_add_hierarchy.copy()
hierarchy_parents = shot_add_hierarchy["parents"].copy() hierarchy_parents = shot_add_hierarchy["parents"].copy()
for parent in hierarchy_parents:
hierarchy_parents[parent] = hierarchy_parents[parent].format( # fill parent keys data template from anatomy data
**instance.data["anatomyData"]) for parent_key in hierarchy_parents:
hierarchy_parents[parent_key] = hierarchy_parents[
parent_key].format(**instance.data["anatomyData"])
for _index, _parent in enumerate(
shot_add_hierarchy["parents_path"].split("/")):
parent_filled = _parent.format(**hierarchy_parents)
parent_key = parent_template_patern.findall(_parent).pop()
# in case SP context is set to the same folder
if (_index == 0) and ("folder" in parent_key) \
and (parents[-1]["entityName"] == parent_filled):
self.log.debug(f" skiping : {parent_filled}")
continue
# in case first parent is project then start parents from start
if (_index == 0) and ("project" in parent_key):
self.log.debug("rebuilding parents from scratch")
project_parent = parents[0]
parents = [project_parent]
self.log.debug(f"project_parent: {project_parent}")
self.log.debug(f"parents: {parents}")
continue
prnt = self.convert_to_entity( prnt = self.convert_to_entity(
parent, hierarchy_parents[parent]) parent_key, parent_filled)
parents.append(prnt) parents.append(prnt)
hierarchy.append(parent_filled)
hierarchy = shot_add_hierarchy[ # convert hierarchy to string
"parents_path"].format(**hierarchy_parents) hierarchy = "/".join(hierarchy)
# assing to instance data
instance.data["hierarchy"] = hierarchy instance.data["hierarchy"] = hierarchy
instance.data["parents"] = parents instance.data["parents"] = parents
# print
self.log.debug(f"Hierarchy: {hierarchy}") self.log.debug(f"Hierarchy: {hierarchy}")
self.log.debug(f"parents: {parents}")
if self.shot_add_tasks: if self.shot_add_tasks:
instance.data["tasks"] = self.shot_add_tasks tasks_to_add = dict()
project_tasks = io.find_one({"type": "project"})["config"]["tasks"]
for task_name, task_data in self.shot_add_tasks.items():
try:
if task_data["type"] in project_tasks.keys():
tasks_to_add.update({task_name: task_data})
else:
raise KeyError(
"Wrong FtrackTaskType `{}` for `{}` is not"
" existing in `{}``".format(
task_data["type"],
task_name,
list(project_tasks.keys())))
except KeyError as error:
raise KeyError(
"Wrong presets: `{0}`".format(error)
)
instance.data["tasks"] = tasks_to_add
else: else:
instance.data["tasks"] = list() instance.data["tasks"] = dict()
# updating hierarchy data # updating hierarchy data
instance.data["anatomyData"].update({ instance.data["anatomyData"].update({
@ -117,7 +164,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
def processing_instance(self, instance): def processing_instance(self, instance):
self.log.info(f"_ instance: {instance}") self.log.info(f"_ instance: {instance}")
# adding anatomyData for burnins # adding anatomyData for burnins
instance.data["anatomyData"] = instance.context.data["anatomyData"] instance.data["anatomyData"] = deepcopy(
instance.context.data["anatomyData"])
asset = instance.data["asset"] asset = instance.data["asset"]
assets_shared = instance.context.data.get("assetsShared") assets_shared = instance.context.data.get("assetsShared")
@ -133,9 +181,6 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
shot_name = instance.data["asset"] shot_name = instance.data["asset"]
self.log.debug(f"Shot Name: {shot_name}") self.log.debug(f"Shot Name: {shot_name}")
if instance.data["hierarchy"] not in shot_name:
self.log.warning("wrong parent")
label = f"{shot_name} ({frame_start}-{frame_end})" label = f"{shot_name} ({frame_start}-{frame_end})"
instance.data["label"] = label instance.data["label"] = label
@ -150,7 +195,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"asset": instance.data["asset"], "asset": instance.data["asset"],
"hierarchy": instance.data["hierarchy"], "hierarchy": instance.data["hierarchy"],
"parents": instance.data["parents"], "parents": instance.data["parents"],
"tasks": instance.data["tasks"] "tasks": instance.data["tasks"],
"anatomyData": instance.data["anatomyData"]
}) })
@ -194,6 +240,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
instance.data["parents"] = s_asset_data["parents"] instance.data["parents"] = s_asset_data["parents"]
instance.data["hierarchy"] = s_asset_data["hierarchy"] instance.data["hierarchy"] = s_asset_data["hierarchy"]
instance.data["tasks"] = s_asset_data["tasks"] instance.data["tasks"] = s_asset_data["tasks"]
instance.data["anatomyData"] = s_asset_data["anatomyData"]
# generate hierarchy data only on shot instances # generate hierarchy data only on shot instances
if 'shot' not in instance.data.get('family', ''): if 'shot' not in instance.data.get('family', ''):
@ -224,7 +271,9 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
in_info['tasks'] = instance.data['tasks'] in_info['tasks'] = instance.data['tasks']
from pprint import pformat
parents = instance.data.get('parents', []) parents = instance.data.get('parents', [])
self.log.debug(f"parents: {pformat(parents)}")
actual = {name: in_info} actual = {name: in_info}
@ -240,4 +289,5 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
# adding hierarchy context to instance # adding hierarchy context to instance
context.data["hierarchyContext"] = final_context context.data["hierarchyContext"] = final_context
self.log.debug(f"hierarchyContext: {pformat(final_context)}")
self.log.info("Hierarchy instance collected") self.log.info("Hierarchy instance collected")

View file

@ -23,6 +23,7 @@ class CollectInstanceData(pyblish.api.InstancePlugin):
def process(self, instance): def process(self, instance):
fps = instance.context.data["fps"] fps = instance.context.data["fps"]
instance.data.update({ instance.data.update({
"fps": fps "fps": fps
}) })

View file

@ -0,0 +1,266 @@
import os
import re
import tempfile
import pyblish.api
from copy import deepcopy
import clique
class CollectInstanceResources(pyblish.api.InstancePlugin):
"""Collect instance's resources"""
# must be after `CollectInstances`
order = pyblish.api.CollectorOrder + 0.011
label = "Collect Instance Resources"
hosts = ["standalonepublisher"]
families = ["clip"]
def process(self, instance):
self.context = instance.context
self.log.info(f"Processing instance: {instance}")
self.new_instances = []
subset_files = dict()
subset_dirs = list()
anatomy = self.context.data["anatomy"]
anatomy_data = deepcopy(self.context.data["anatomyData"])
anatomy_data.update({"root": anatomy.roots})
subset = instance.data["subset"]
clip_name = instance.data["clipName"]
editorial_source_root = instance.data["editorialSourceRoot"]
editorial_source_path = instance.data["editorialSourcePath"]
# if `editorial_source_path` then loop trough
if editorial_source_path:
# add family if mov or mp4 found which is longer for
# cutting `trimming` to enable `ExtractTrimmingVideoAudio` plugin
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
instance.data["stagingDir"] = staging_dir
instance.data["families"] += ["trimming"]
return
# if template patern in path then fill it with `anatomy_data`
if "{" in editorial_source_root:
editorial_source_root = editorial_source_root.format(
**anatomy_data)
self.log.debug(f"root: {editorial_source_root}")
# loop `editorial_source_root` and find clip name in folders
# and look for any subset name alternatives
for root, dirs, _files in os.walk(editorial_source_root):
# search only for directories related to clip name
correct_clip_dir = None
for _d_search in dirs:
# avoid all non clip dirs
if _d_search not in clip_name:
continue
# found correct dir for clip
correct_clip_dir = _d_search
# continue if clip dir was not found
if not correct_clip_dir:
continue
clip_dir_path = os.path.join(root, correct_clip_dir)
subset_files_items = list()
# list content of clip dir and search for subset items
for subset_item in os.listdir(clip_dir_path):
# avoid all items which are not defined as subsets by name
if subset not in subset_item:
continue
subset_item_path = os.path.join(
clip_dir_path, subset_item)
# if it is dir store it to `subset_dirs` list
if os.path.isdir(subset_item_path):
subset_dirs.append(subset_item_path)
# if it is file then store it to `subset_files` list
if os.path.isfile(subset_item_path):
subset_files_items.append(subset_item_path)
if subset_files_items:
subset_files.update({clip_dir_path: subset_files_items})
# break the loop if correct_clip_dir was captured
# no need to cary on if corect folder was found
if correct_clip_dir:
break
if subset_dirs:
# look all dirs and check for subset name alternatives
for _dir in subset_dirs:
instance_data = deepcopy(
{k: v for k, v in instance.data.items()})
sub_dir = os.path.basename(_dir)
# if subset name is only alternative then create new instance
if sub_dir != subset:
instance_data = self.duplicate_instance(
instance_data, subset, sub_dir)
# create all representations
self.create_representations(
os.listdir(_dir), instance_data, _dir)
if sub_dir == subset:
self.new_instances.append(instance_data)
# instance.data.update(instance_data)
if subset_files:
unique_subset_names = list()
root_dir = list(subset_files.keys()).pop()
files_list = subset_files[root_dir]
search_patern = f"({subset}[A-Za-z0-9]+)(?=[\\._\\s])"
for _file in files_list:
patern = re.compile(search_patern)
match = patern.findall(_file)
if not match:
continue
match_subset = match.pop()
if match_subset in unique_subset_names:
continue
unique_subset_names.append(match_subset)
self.log.debug(f"unique_subset_names: {unique_subset_names}")
for _un_subs in unique_subset_names:
instance_data = self.duplicate_instance(
instance.data, subset, _un_subs)
# create all representations
self.create_representations(
[os.path.basename(f) for f in files_list
if _un_subs in f],
instance_data, root_dir)
# remove the original instance as it had been used only
# as template and is duplicated
self.context.remove(instance)
# create all instances in self.new_instances into context
for new_instance in self.new_instances:
_new_instance = self.context.create_instance(
new_instance["name"])
_new_instance.data.update(new_instance)
def duplicate_instance(self, instance_data, subset, new_subset):
new_instance_data = dict()
for _key, _value in instance_data.items():
new_instance_data[_key] = _value
if not isinstance(_value, str):
continue
if subset in _value:
new_instance_data[_key] = _value.replace(
subset, new_subset)
self.log.info(f"Creating new instance: {new_instance_data['name']}")
self.new_instances.append(new_instance_data)
return new_instance_data
def create_representations(
self, files_list, instance_data, staging_dir):
""" Create representations from Collection object
"""
# collecting frames for later frame start/end reset
frames = list()
# break down Collection object to collections and reminders
collections, remainder = clique.assemble(files_list)
# add staging_dir to instance_data
instance_data["stagingDir"] = staging_dir
# add representations to instance_data
instance_data["representations"] = list()
collection_head_name = None
# loop trough collections and create representations
for _collection in collections:
ext = _collection.tail
collection_head_name = _collection.head
frame_start = list(_collection.indexes)[0]
frame_end = list(_collection.indexes)[-1]
repre_data = {
"frameStart": frame_start,
"frameEnd": frame_end,
"name": ext[1:],
"ext": ext[1:],
"files": [item for item in _collection],
"stagingDir": staging_dir
}
if "review" in instance_data["families"]:
repre_data.update({
"thumbnail": True,
"frameStartFtrack": frame_start,
"frameEndFtrack": frame_end,
"step": 1,
"fps": self.context.data.get("fps"),
"name": "review",
"tags": ["review", "ftrackreview", "delete"],
})
instance_data["representations"].append(repre_data)
# add to frames for frame range reset
frames.append(frame_start)
frames.append(frame_end)
# loop trough reminders and create representations
for _reminding_file in remainder:
ext = os.path.splitext(_reminding_file)[-1]
if ext not in instance_data["extensions"]:
continue
if collection_head_name and (
(collection_head_name + ext[1:]) not in _reminding_file
) and (ext in [".mp4", ".mov"]):
self.log.info(f"Skipping file: {_reminding_file}")
continue
frame_start = 1
frame_end = 1
repre_data = {
"name": ext[1:],
"ext": ext[1:],
"files": _reminding_file,
"stagingDir": staging_dir
}
# exception for thumbnail
if "thumb" in _reminding_file:
repre_data.update({
'name': "thumbnail",
'thumbnail': True
})
# exception for mp4 preview
if ".mp4" in _reminding_file:
frame_start = 0
frame_end = (
(instance_data["frameEnd"] - instance_data["frameStart"])
+ 1)
# add review ftrack family into families
for _family in ["review", "ftrack"]:
if _family not in instance_data["families"]:
instance_data["families"].append(_family)
repre_data.update({
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartFtrack": frame_start,
"frameEndFtrack": frame_end,
"step": 1,
"fps": self.context.data.get("fps"),
"name": "review",
"tags": ["review", "ftrackreview", "delete"],
})
# add to frames for frame range reset only if no collection
if not collections:
frames.append(frame_start)
frames.append(frame_end)
instance_data["representations"].append(repre_data)
# reset frame start / end
instance_data["frameStart"] = min(frames)
instance_data["frameEnd"] = max(frames)

View file

@ -1,15 +1,14 @@
import os import os
import opentimelineio as otio import opentimelineio as otio
import tempfile
import pyblish.api import pyblish.api
from pype import lib as plib from pype import lib as plib
class CollectClipInstances(pyblish.api.InstancePlugin): class CollectInstances(pyblish.api.InstancePlugin):
"""Collect Clips instances from editorial's OTIO sequence""" """Collect instances from editorial's OTIO sequence"""
order = pyblish.api.CollectorOrder + 0.01 order = pyblish.api.CollectorOrder + 0.01
label = "Collect Clips" label = "Collect Instances"
hosts = ["standalonepublisher"] hosts = ["standalonepublisher"]
families = ["editorial"] families = ["editorial"]
@ -18,31 +17,31 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
"referenceMain": { "referenceMain": {
"family": "review", "family": "review",
"families": ["clip", "ftrack"], "families": ["clip", "ftrack"],
# "ftrackFamily": "review", "extensions": [".mp4"]
"extension": ".mp4"
}, },
"audioMain": { "audioMain": {
"family": "audio", "family": "audio",
"families": ["clip", "ftrack"], "families": ["clip", "ftrack"],
# "ftrackFamily": "audio", "extensions": [".wav"],
"extension": ".wav",
# "version": 1
}, },
"shotMain": { "shotMain": {
"family": "shot", "family": "shot",
"families": [] "families": []
} }
} }
timeline_frame_offset = None # if 900000 for edl default then -900000 timeline_frame_start = 900000 # starndard edl default (10:00:00:00)
timeline_frame_offset = None
custom_start_frame = None custom_start_frame = None
def process(self, instance): def process(self, instance):
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
# get context # get context
context = instance.context context = instance.context
instance_data_filter = [
"editorialSourceRoot",
"editorialSourcePath"
]
# attribute for checking duplicity during creation # attribute for checking duplicity during creation
if not context.data.get("assetNameCheck"): if not context.data.get("assetNameCheck"):
context.data["assetNameCheck"] = list() context.data["assetNameCheck"] = list()
@ -68,15 +67,19 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
handle_start = int(asset_data["handleStart"]) handle_start = int(asset_data["handleStart"])
handle_end = int(asset_data["handleEnd"]) handle_end = int(asset_data["handleEnd"])
instances = []
for track in tracks: for track in tracks:
self.log.debug(f"track.name: {track.name}")
try: try:
track_start_frame = ( track_start_frame = (
abs(track.source_range.start_time.value) abs(track.source_range.start_time.value)
) )
self.log.debug(f"track_start_frame: {track_start_frame}")
track_start_frame -= self.timeline_frame_start
except AttributeError: except AttributeError:
track_start_frame = 0 track_start_frame = 0
self.log.debug(f"track_start_frame: {track_start_frame}")
for clip in track.each_child(): for clip in track.each_child():
if clip.name is None: if clip.name is None:
continue continue
@ -103,7 +106,10 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
# frame ranges data # frame ranges data
clip_in = clip.range_in_parent().start_time.value clip_in = clip.range_in_parent().start_time.value
clip_in += track_start_frame
clip_out = clip.range_in_parent().end_time_inclusive().value clip_out = clip.range_in_parent().end_time_inclusive().value
clip_out += track_start_frame
self.log.info(f"clip_in: {clip_in} | clip_out: {clip_out}")
# add offset in case there is any # add offset in case there is any
if self.timeline_frame_offset: if self.timeline_frame_offset:
@ -131,14 +137,11 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
# create shared new instance data # create shared new instance data
instance_data = { instance_data = {
"stagingDir": staging_dir,
# shared attributes # shared attributes
"asset": name, "asset": name,
"assetShareName": name, "assetShareName": name,
"editorialVideoPath": instance.data[
"editorialVideoPath"],
"item": clip, "item": clip,
"clipName": clip_name,
# parent time properities # parent time properities
"trackStartFrame": track_start_frame, "trackStartFrame": track_start_frame,
@ -167,6 +170,10 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
"frameEndH": frame_end + handle_end "frameEndH": frame_end + handle_end
} }
for data_key in instance_data_filter:
instance_data.update({
data_key: instance.data.get(data_key)})
# adding subsets to context as instances # adding subsets to context as instances
for subset, properities in self.subsets.items(): for subset, properities in self.subsets.items():
# adding Review-able instance # adding Review-able instance
@ -174,14 +181,20 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
subset_instance_data.update(properities) subset_instance_data.update(properities)
subset_instance_data.update({ subset_instance_data.update({
# unique attributes # unique attributes
"name": f"{subset}_{name}", "name": f"{name}_{subset}",
"label": f"{subset} {name} ({clip_in}-{clip_out})", "label": f"{name} {subset} ({clip_in}-{clip_out})",
"subset": subset "subset": subset
}) })
instances.append(instance.context.create_instance( # create new instance
**subset_instance_data)) _instance = instance.context.create_instance(
**subset_instance_data)
self.log.debug(
f"Instance: `{_instance}` | "
f"families: `{subset_instance_data['families']}`")
context.data["assetsShared"][name] = { context.data["assetsShared"][name] = {
"_clipIn": clip_in, "_clipIn": clip_in,
"_clipOut": clip_out "_clipOut": clip_out
} }
self.log.debug("Instance: `{}` | families: `{}`")

View file

@ -1,92 +0,0 @@
import os
import clique
import pype.api
from pprint import pformat
class ExtractShotData(pype.api.Extractor):
"""Extract shot "mov" and "wav" files."""
label = "Extract Shot Data"
hosts = ["standalonepublisher"]
families = ["clip"]
# presets
def process(self, instance):
representation = instance.data.get("representations")
self.log.debug(f"_ representation: {representation}")
if not representation:
instance.data["representations"] = list()
# get ffmpet path
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# get staging dir
staging_dir = self.staging_dir(instance)
self.log.info("Staging dir set to: `{}`".format(staging_dir))
# Generate mov file.
fps = instance.data["fps"]
video_file_path = instance.data["editorialVideoPath"]
ext = instance.data.get("extension", ".mov")
clip_trimed_path = os.path.join(
staging_dir, instance.data["name"] + ext)
#
# # check video file metadata
# input_data = plib.ffprobe_streams(video_file_path)[0]
# self.log.debug(f"__ input_data: `{input_data}`")
start = float(instance.data["clipInH"])
dur = float(instance.data["clipDurationH"])
if ext in ".wav":
start += 0.5
args = [
"\"{}\"".format(ffmpeg_path),
"-ss", str(start / fps),
"-i", f"\"{video_file_path}\"",
"-t", str(dur / fps)
]
if ext in [".mov", ".mp4"]:
args.extend([
"-crf", "18",
"-pix_fmt", "yuv420p"])
elif ext in ".wav":
args.extend([
"-vn -acodec pcm_s16le",
"-ar 48000 -ac 2"
])
# add output path
args.append(f"\"{clip_trimed_path}\"")
self.log.info(f"Processing: {args}")
ffmpeg_args = " ".join(args)
output = pype.api.subprocess(ffmpeg_args, shell=True)
self.log.info(output)
repr = {
"name": ext[1:],
"ext": ext[1:],
"files": os.path.basename(clip_trimed_path),
"stagingDir": staging_dir,
"frameStart": int(instance.data["frameStart"]),
"frameEnd": int(instance.data["frameEnd"]),
"frameStartFtrack": int(instance.data["frameStartH"]),
"frameEndFtrack": int(instance.data["frameEndH"]),
"fps": fps,
}
if ext[1:] in ["mov", "mp4"]:
repr.update({
"thumbnail": True,
"tags": ["review", "ftrackreview", "delete"]})
instance.data["representations"].append(repr)
self.log.debug(f"Instance data: {pformat(instance.data)}")

View file

@ -46,6 +46,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
files_len = 1 files_len = 1
file = files file = files
staging_dir = None
is_jpeg = False is_jpeg = False
if file.endswith(".jpeg") or file.endswith(".jpg"): if file.endswith(".jpeg") or file.endswith(".jpg"):
is_jpeg = True is_jpeg = True
@ -106,7 +107,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
thumbnail_repre.pop("thumbnail") thumbnail_repre.pop("thumbnail")
filename = os.path.basename(full_thumbnail_path) filename = os.path.basename(full_thumbnail_path)
staging_dir = os.path.dirname(full_thumbnail_path) staging_dir = staging_dir or os.path.dirname(full_thumbnail_path)
# create new thumbnail representation # create new thumbnail representation
representation = { representation = {
@ -121,4 +122,5 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
if not is_jpeg: if not is_jpeg:
representation["tags"].append("delete") representation["tags"].append("delete")
self.log.info(f"New representation {representation}")
instance.data["representations"].append(representation) instance.data["representations"].append(representation)

View file

@ -0,0 +1,105 @@
import os
import pyblish.api
import pype.api
from pprint import pformat
class ExtractTrimVideoAudio(pype.api.Extractor):
"""Trim with ffmpeg "mov" and "wav" files."""
# must be before `ExtractThumbnailSP`
order = pyblish.api.ExtractorOrder - 0.01
label = "Extract Trim Video/Audio"
hosts = ["standalonepublisher"]
families = ["clip", "trimming"]
# make sure it is enabled only if at least both families are available
match = pyblish.api.Subset
# presets
def process(self, instance):
representation = instance.data.get("representations")
self.log.debug(f"_ representation: {representation}")
if not representation:
instance.data["representations"] = list()
# get ffmpet path
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# get staging dir
staging_dir = self.staging_dir(instance)
self.log.info("Staging dir set to: `{}`".format(staging_dir))
# Generate mov file.
fps = instance.data["fps"]
video_file_path = instance.data["editorialSourcePath"]
extensions = instance.data.get("extensions", [".mov"])
for ext in extensions:
self.log.info("Processing ext: `{}`".format(ext))
clip_trimed_path = os.path.join(
staging_dir, instance.data["name"] + ext)
# # check video file metadata
# input_data = plib.ffprobe_streams(video_file_path)[0]
# self.log.debug(f"__ input_data: `{input_data}`")
start = float(instance.data["clipInH"])
dur = float(instance.data["clipDurationH"])
if ext == ".wav":
# offset time as ffmpeg is having bug
start += 0.5
# remove "review" from families
instance.data["families"] = [
fml for fml in instance.data["families"]
if "trimming" not in fml
]
args = [
ffmpeg_path,
"-ss", str(start / fps),
"-i", f"\"{video_file_path}\"",
"-t", str(dur / fps)
]
if ext in [".mov", ".mp4"]:
args.extend([
"-crf", "18",
"-pix_fmt", "yuv420p"])
elif ext in ".wav":
args.extend([
"-vn -acodec pcm_s16le",
"-ar 48000 -ac 2"
])
# add output path
args.append(f"\"{clip_trimed_path}\"")
self.log.info(f"Processing: {args}")
ffmpeg_args = " ".join(args)
output = pype.api.subprocess(ffmpeg_args, shell=True)
self.log.info(output)
repr = {
"name": ext[1:],
"ext": ext[1:],
"files": os.path.basename(clip_trimed_path),
"stagingDir": staging_dir,
"frameStart": int(instance.data["frameStart"]),
"frameEnd": int(instance.data["frameEnd"]),
"frameStartFtrack": int(instance.data["frameStartH"]),
"frameEndFtrack": int(instance.data["frameEndH"]),
"fps": fps,
}
if ext in [".mov", ".mp4"]:
repr.update({
"thumbnail": True,
"tags": ["review", "ftrackreview", "delete"]})
instance.data["representations"].append(repr)
self.log.debug(f"Instance data: {pformat(instance.data)}")

View file

@ -7,7 +7,10 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin):
label = "Validate Editorial Resources" label = "Validate Editorial Resources"
hosts = ["standalonepublisher"] hosts = ["standalonepublisher"]
families = ["clip"] families = ["clip", "trimming"]
# make sure it is enabled only if at least both families are available
match = pyblish.api.Subset
order = pype.api.ValidateContentsOrder order = pype.api.ValidateContentsOrder
@ -15,6 +18,6 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin):
self.log.debug( self.log.debug(
f"Instance: {instance}, Families: " f"Instance: {instance}, Families: "
f"{[instance.data['family']] + instance.data['families']}") f"{[instance.data['family']] + instance.data['families']}")
check_file = instance.data["editorialVideoPath"] check_file = instance.data["editorialSourcePath"]
msg = f"Missing \"{check_file}\"." msg = f"Missing \"{check_file}\"."
assert check_file, msg assert check_file, msg