feat(sp): adding audio and shot instancing

This commit is contained in:
Jakub Jezek 2020-08-05 14:05:16 +02:00
parent 8f57ae45fe
commit 8f9e0c9d28
No known key found for this signature in database
GPG key ID: C4B96E101D2A47F3
13 changed files with 318 additions and 515 deletions

View file

@ -35,7 +35,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
order = pyblish.api.IntegratorOrder - 0.04
label = 'Integrate Hierarchy To Ftrack'
families = ["clip", "shot"]
families = ["shot"]
optional = False
def process(self, context):

View file

@ -49,6 +49,9 @@ class ExtractBurnin(pype.api.Extractor):
fields = None
def process(self, instance):
representation = instance.data["representations"]
self.log.debug(f"_ representation: {representation}")
# ffmpeg doesn't support multipart exrs
if instance.data.get("multipartExr") is True:
instance_label = (

View file

@ -7,7 +7,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
order = pyblish.api.ExtractorOrder - 0.01
label = "Extract Hierarchy To Avalon"
families = ["clip", "shot", "editorial"]
families = ["clip", "shot"]
def process(self, context):
if "hierarchyContext" not in context.data:

View file

@ -50,6 +50,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
to_height = 1080
def process(self, instance):
representation = instance.data["representations"]
self.log.debug(f"_ representation: {representation}")
# ffmpeg doesn't support multipart exrs
if instance.data.get("multipartExr") is True:
instance_label = (

View file

@ -0,0 +1,179 @@
import os
import opentimelineio as otio
import tempfile
import pyblish.api
from pype import lib as plib
class CollectClipInstances(pyblish.api.InstancePlugin):
"""Collect Clips instances from editorial's OTIO sequence"""
order = pyblish.api.CollectorOrder + 0.01
label = "Collect Clips"
hosts = ["standalonepublisher"]
families = ["editorial"]
# presets
subsets = {
"referenceMain": {
"family": "review",
"families": ["review", "ftrack"],
"ftrackFamily": "review",
"extension": ".mp4"
},
"audioMain": {
"family": "audio",
"families": ["ftrack"],
"ftrackFamily": "audio",
"extension": ".wav"
},
"shotMain": {
"family": "shot",
"families": []
}
}
start_frame_offset = None # if 900000 for edl default then -900000
custom_start_frame = None
def process(self, instance):
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
# get context
context = instance.context
# create asset_names conversion table
if not context.data.get("assetsShared"):
self.log.debug("Created `assetsShared` in context")
context.data["assetsShared"] = dict()
# get timeline otio data
timeline = instance.data["otio_timeline"]
fps = plib.get_asset()["data"]["fps"]
tracks = timeline.each_child(
descended_from_type=otio.schema.track.Track
)
self.log.debug(f"__ tracks: `{tracks}`")
# get data from avalon
asset_entity = instance.context.data["assetEntity"]
asset_data = asset_entity["data"]
asset_name = asset_entity["name"]
self.log.debug(f"__ asset_entity: `{asset_entity}`")
# Timeline data.
handle_start = int(asset_data["handleStart"])
handle_end = int(asset_data["handleEnd"])
instances = []
for track in tracks:
self.log.debug(f"__ track: `{track}`")
try:
track_start_frame = (
abs(track.source_range.start_time.value)
)
except AttributeError:
track_start_frame = 0
self.log.debug(f"__ track: `{track}`")
for clip in track.each_child():
# skip all generators like black ampty
if isinstance(
clip.media_reference,
otio.schema.GeneratorReference):
continue
# Transitions are ignored, because Clips have the full frame
# range.
if isinstance(clip, otio.schema.transition.Transition):
continue
if clip.name is None:
continue
# basic unique asset name
clip_name = os.path.splitext(clip.name)[0].lower()
name = f"{asset_name.split('_')[0]}_{clip_name}"
# frame ranges data
clip_in = clip.range_in_parent().start_time.value
clip_out = clip.range_in_parent().end_time_inclusive().value
clip_duration = clip.duration().value
source_in = clip.trimmed_range().start_time.value
source_out = source_in + clip_duration
source_in_h = source_in - handle_start
source_out_h = source_out + handle_end
clip_in_h = clip_in - handle_start
clip_out_h = clip_out + handle_end
# define starting frame for future shot
frame_start = self.custom_start_frame or clip_in
# add offset in case there is any
if self.start_frame_offset:
frame_start += self.start_frame_offset
frame_end = frame_start + clip_duration
# create shared new instance data
instance_data = {
"stagingDir": staging_dir,
# shared attributes
"asset": name,
"assetShareName": name,
"editorialVideoPath": instance.data[
"editorialVideoPath"],
"item": clip,
# parent time properities
"trackStartFrame": track_start_frame,
"handleStart": handle_start,
"handleEnd": handle_end,
"fps": fps,
# media source
"sourceIn": source_in,
"sourceOut": source_out,
"sourceInH": source_in_h,
"sourceOutH": source_out_h,
# timeline
"clipIn": clip_in,
"clipOut": clip_out,
"clipDuration": clip_duration,
"clipInH": clip_in_h,
"clipOutH": clip_out_h,
"clipDurationH": clip_duration + handle_start + handle_end,
# task
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartH": frame_start - handle_start,
"frameEndH": frame_end + handle_end
}
# adding subsets to context as instances
for subset, properities in self.subsets.items():
# adding Review-able instance
subset_instance_data = instance_data.copy()
subset_instance_data.update(properities)
subset_instance_data.update({
# unique attributes
"name": f"{subset}_{name}",
"label": f"{subset} {name} ({clip_in}-{clip_out})",
"subset": subset
})
instances.append(instance.context.create_instance(
**subset_instance_data))
self.log.debug(instance_data)
context.data["assetsShared"][name] = {
"_clipIn": clip_in,
"_clipOut": clip_out
}

View file

@ -1,242 +0,0 @@
import os
import opentimelineio as otio
from bson import json_util
import pyblish.api
from pype import lib as plib
from avalon import io
class CollectClips(pyblish.api.InstancePlugin):
"""Collect Clips instances from editorial's OTIO sequence"""
order = pyblish.api.CollectorOrder + 0.01
label = "Collect Clips"
hosts = ["standalonepublisher"]
families = ["editorial"]
def process(self, instance):
# get context
context = instance.context
# create asset_names conversion table
if not context.data.get("assetsShared"):
self.log.debug("Created `assetsShared` in context")
context.data["assetsShared"] = dict()
# get timeline otio data
timeline = instance.data["otio_timeline"]
fps = plib.get_asset()["data"]["fps"]
tracks = timeline.each_child(
descended_from_type=otio.schema.track.Track
)
self.log.debug(f"__ tracks: `{tracks}`")
# get data from avalon
asset_entity = instance.context.data["assetEntity"]
asset_data = asset_entity["data"]
asset_name = asset_entity["name"]
self.log.debug(f"__ asset_entity: `{asset_entity}`")
# split selected context asset name
asset_name = asset_name.split("_")[0]
instances = []
for track in tracks:
self.log.debug(f"__ track: `{track}`")
try:
track_start_frame = (
abs(track.source_range.start_time.value)
)
except AttributeError:
track_start_frame = 0
self.log.debug(f"__ track: `{track}`")
for clip in track.each_child():
# skip all generators like black ampty
if isinstance(
clip.media_reference,
otio.schema.GeneratorReference):
continue
# Transitions are ignored, because Clips have the full frame
# range.
if isinstance(clip, otio.schema.transition.Transition):
continue
if clip.name is None:
continue
clip_name = os.path.splitext(clip.name)[0].lower()
name = f"{asset_name}_{clip_name}"
source_in = clip.trimmed_range().start_time.value
clip_in = clip.range_in_parent().start_time.value
clip_out = clip.range_in_parent().end_time_inclusive().value
clip_duration = clip.duration().value
self.log.debug(f"__ source_in: `{source_in}`")
self.log.debug(f"__ clip_in: `{clip_in}`")
self.log.debug(f"__ clip_out: `{clip_out}`")
self.log.debug(f"__ clip_duration: `{clip_duration}`")
label = f"{name} (framerange: {clip_in}-{clip_out})"
new_instance_data = {
# shared attributes
"representations": [],
# timing properities
"trackStartFrame": track_start_frame,
"sourceIn": source_in,
"sourceOut": source_in + clip_duration,
"clipIn": clip_in,
"clipOut": clip_out,
"clipDuration": clip_duration,
"handleStart": int(asset_data["handleStart"]),
"handleEnd": int(asset_data["handleEnd"]),
"fps": fps
}
# adding Review-able instance
shot_instance_data = new_instance_data.copy()
shot_instance_data.update({
# unique attributes
"name": name,
"label": label,
"asset": name,
"subset": "plateRef",
"item": clip,
# instance properities
"family": "clip",
"families": ["review", "ftrack"],
"ftrackFamily": "review",
"editorialVideoPath": instance.data[
"editorialVideoPath"]
})
instances.append(
instance.context.create_instance(**shot_instance_data)
)
context.data["assetsShared"][name] = {
"_clipIn": clip_in,
"_clipOut": clip_out
}
# def process_old(self, instance):
# representation = instance.data["representations"][0]
# file_path = os.path.join(
# representation["stagingDir"], representation["files"]
# )
# instance.context.data["editorialPath"] = file_path
#
# extension = os.path.splitext(file_path)[1][1:]
# kwargs = {}
# if extension == "edl":
# # EDL has no frame rate embedded so needs explicit frame rate else
# # 24 is asssumed.
# kwargs["rate"] = plib.get_asset()["data"]["fps"]
#
# timeline = otio.adapters.read_from_file(file_path, **kwargs)
# tracks = timeline.each_child(
# descended_from_type=otio.schema.track.Track
# )
# asset_entity = instance.context.data["assetEntity"]
# asset_name = asset_entity["name"]
#
# # Ask user for sequence start. Usually 10:00:00:00.
# sequence_start_frame = 900000
#
# # Project specific prefix naming. This needs to be replaced with some
# # options to be more flexible.
# asset_name = asset_name.split("_")[0]
#
# instances = []
# for track in tracks:
# track_start_frame = (
# abs(track.source_range.start_time.value) - sequence_start_frame
# )
# for child in track.each_child():
# # skip all generators like black ampty
# if isinstance(
# child.media_reference,
# otio.schema.GeneratorReference):
# continue
#
# # Transitions are ignored, because Clips have the full frame
# # range.
# if isinstance(child, otio.schema.transition.Transition):
# continue
#
# if child.name is None:
# continue
#
# # Hardcoded to expect a shot name of "[name].[extension]"
# child_name = os.path.splitext(child.name)[0].lower()
# name = f"{asset_name}_{child_name}"
#
# frame_start = track_start_frame
# frame_start += child.range_in_parent().start_time.value
# frame_end = track_start_frame
# frame_end += child.range_in_parent().end_time_inclusive().value
#
# label = f"{name} (framerange: {frame_start}-{frame_end})"
# instances.append(
# instance.context.create_instance(**{
# "name": name,
# "label": label,
# "frameStart": frame_start,
# "frameEnd": frame_end,
# "family": "shot",
# "families": ["review", "ftrack"],
# "ftrackFamily": "review",
# "asset": name,
# "subset": "shotMain",
# "representations": [],
# "source": file_path
# })
# )
#
# visual_hierarchy = [asset_entity]
# while True:
# visual_parent = io.find_one(
# {"_id": visual_hierarchy[-1]["data"]["visualParent"]}
# )
# if visual_parent:
# visual_hierarchy.append(visual_parent)
# else:
# visual_hierarchy.append(instance.context.data["projectEntity"])
# break
#
# context_hierarchy = None
# for entity in visual_hierarchy:
# childs = {}
# if context_hierarchy:
# name = context_hierarchy.pop("name")
# childs = {name: context_hierarchy}
# else:
# for instance in instances:
# childs[instance.data["name"]] = {
# "childs": {},
# "entity_type": "Shot",
# "custom_attributes": {
# "frameStart": instance.data["frameStart"],
# "frameEnd": instance.data["frameEnd"]
# }
# }
#
# context_hierarchy = {
# "entity_type": entity["data"]["entityType"],
# "childs": childs,
# "name": entity["name"]
# }
#
# name = context_hierarchy.pop("name")
# context_hierarchy = {name: context_hierarchy}
# instance.context.data["hierarchyContext"] = context_hierarchy
# self.log.info(
# "Hierarchy:\n" +
# json_util.dumps(context_hierarchy, sort_keys=True, indent=4)
# )

View file

@ -62,6 +62,7 @@ class CollectEditorial(pyblish.api.InstancePlugin):
)
self.log.debug(f"__ video_path: `{video_path}`")
instance.data["editorialVideoPath"] = video_path
instance.data["stagingDir"] = staging_dir
# get editorial sequence file into otio timeline object
extension = os.path.splitext(file_path)[1]

View file

@ -1,56 +0,0 @@
import pyblish.api
class CollectClipFrameRanges(pyblish.api.InstancePlugin):
"""Collect all frame range data"""
order = pyblish.api.CollectorOrder + 0.101
label = "Collect Frame Ranges"
hosts = ["standalonepublisher"]
families = ["clip"]
# presets
start_frame_offset = None # if 900000 for edl default then -900000
custom_start_frame = None
def process(self, instance):
data = dict()
# Timeline data.
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
source_in_h = instance.data("sourceInH",
instance.data("sourceIn") - handle_start)
source_out_h = instance.data("sourceOutH",
instance.data("sourceOut") + handle_end)
timeline_in = instance.data["clipIn"]
timeline_out = instance.data["clipOut"]
timeline_in_h = timeline_in - handle_start
timeline_out_h = timeline_out + handle_end
# define starting frame for future shot
frame_start = self.custom_start_frame or timeline_in
# add offset in case there is any
if self.start_frame_offset:
frame_start += self.start_frame_offset
frame_end = frame_start + (timeline_out - timeline_in)
data.update({
"sourceInH": source_in_h,
"sourceOutH": source_out_h,
"frameStart": frame_start,
"frameEnd": frame_end,
"clipInH": timeline_in_h,
"clipOutH": timeline_out_h,
"clipDurationH": instance.data.get(
"clipDuration") + handle_start + handle_end
}
)
self.log.debug("__ data: {}".format(data))
instance.data.update(data)

View file

@ -16,7 +16,7 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin):
label = "Collect Hierarchy Clip"
order = pyblish.api.CollectorOrder + 0.101
hosts = ["standalonepublisher"]
families = ["clip"]
families = ["shot"]
# presets
shot_rename_template = None
@ -141,7 +141,6 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin):
# dealing with shared attributes across instances
# with the same asset name
if assets_shared.get(asset):
self.log.debug("Adding to shared assets: `{}`".format(
asset))
@ -153,7 +152,6 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin):
"asset": instance.data["asset"],
"hierarchy": instance.data["hierarchy"],
"parents": instance.data["parents"],
"fps": instance.data["fps"],
"tasks": instance.data["tasks"]
})
@ -185,35 +183,22 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
final_context = {}
for instance in instances:
if 'clip' not in instance.data.get('family', ''):
if 'editorial' in instance.data.get('family', ''):
continue
name = instance.data["asset"]
# get handles
handle_start = int(instance.data["handleStart"])
handle_end = int(instance.data["handleEnd"])
# inject assetsShared to other plates types
# inject assetsShared to other instances with
# the same `assetShareName` attribute in data
assets_shared = context.data.get("assetsShared")
asset_shared_name = instance.data.get("assetShareName")
self.log.debug(f"_ assets_shared: {assets_shared}")
self.log.debug(f"_ asset_shared_name: {asset_shared_name}")
if assets_shared:
s_asset_data = assets_shared.get(name)
if s_asset_data:
self.log.debug("__ s_asset_data: {}".format(s_asset_data))
name = instance.data["asset"] = s_asset_data["asset"]
instance.data["parents"] = s_asset_data["parents"]
instance.data["hierarchy"] = s_asset_data["hierarchy"]
instance.data["tasks"] = s_asset_data["tasks"]
instance.data["fps"] = s_asset_data["fps"]
# adding frame start if any on instance
start_frame = s_asset_data.get("startingFrame")
if start_frame:
instance.data["frameStart"] = start_frame
instance.data["frameEnd"] = start_frame + (
instance.data["clipOut"] -
instance.data["clipIn"])
s_asset_data = assets_shared.get(asset_shared_name)
if s_asset_data:
self.log.debug("__ s_asset_data: {}".format(s_asset_data))
instance.data["asset"] = s_asset_data["asset"]
instance.data["parents"] = s_asset_data["parents"]
instance.data["hierarchy"] = s_asset_data["hierarchy"]
instance.data["tasks"] = s_asset_data["tasks"]
self.log.debug(
"__ instance.data[parents]: {}".format(
@ -229,6 +214,17 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
"__ instance.data[name]: {}".format(instance.data["name"])
)
# generate hierarchy data only on shot instances
if 'shot' not in instance.data.get('family', ''):
continue
name = instance.data["asset"]
# get handles
handle_start = int(instance.data["handleStart"])
handle_end = int(instance.data["handleEnd"])
in_info = {}
# suppose that all instances are Shots

View file

@ -1,37 +0,0 @@
from pyblish import api
class CollectShots(api.InstancePlugin):
"""Collect Shot from Clip."""
# Run just before CollectClipSubsets
order = api.CollectorOrder + 0.1021
label = "Collect Shots"
hosts = ["standalonepublisher"]
families = ["clip"]
def process(self, instance):
# Collect data.
data = {}
for key, value in instance.data.items():
data[key] = value
data["family"] = "shot"
data["families"] = []
data["subset"] = data["family"] + "Main"
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = (
"{} - {} - tasks:{}".format(
data["asset"],
data["subset"],
data["tasks"]
)
)
# Create instance.
self.log.debug("Creating instance with: {}".format(data["name"]))
instance.context.create_instance(**data)

View file

@ -1,148 +0,0 @@
import os
import clique
import pype.api
import pype.lib as plib
from pprint import pformat
class ExtractShot(pype.api.Extractor):
"""Extract shot "mov" and "wav" files."""
label = "Extract Shot"
hosts = ["standalonepublisher"]
families = ["clip"]
# presets
add_representation = None # ".jpeg"
add_audio = True
def process(self, instance):
# get ffmpet path
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# get staging dir
staging_dir = self.staging_dir(instance)
self.log.info("Staging dir set to: `{}`".format(staging_dir))
# Generate mov file.
fps = instance.data["fps"]
video_file_path = instance.data["editorialVideoPath"]
ext = os.path.splitext(os.path.basename(video_file_path))[-1]
clip_trimed_path = os.path.join(
staging_dir, instance.data["name"] + ext)
# check video file metadata
input_data = plib.ffprobe_streams(video_file_path)[0]
self.log.debug(f"__ input_data: `{input_data}`")
args = [
ffmpeg_path,
"-ss", str(instance.data["clipIn"] / fps),
"-i", video_file_path,
"-t", str(
(instance.data["clipOut"] - instance.data["clipIn"] + 1) /
fps
),
"-crf", "18",
"-pix_fmt", "yuv420p",
clip_trimed_path
]
self.log.info(f"Processing: {args}")
ffmpeg_args = " ".join(args)
output = pype.api.subprocess(ffmpeg_args)
self.log.info(output)
instance.data["families"].remove("review")
instance.data["families"].append("clip")
instance.data["family"] = "review"
# frame ranges
frame_start = int(instance.data["frameStart"])
frame_end = int(instance.data["frameEnd"])
handle_start = int(instance.data["handleStart"])
handle_end = int(instance.data["handleEnd"])
instance.data["representations"].append({
"name": ext[1:],
"ext": ext[1:],
"files": os.path.basename(clip_trimed_path),
"stagingDir": staging_dir,
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartFtrack": frame_start - handle_start,
"frameEndFtrack": frame_end - handle_end,
"fps": fps,
"thumbnail": True,
"tags": ["review", "ftrackreview", "delete"]
})
self.log.debug(f"Instance data: {pformat(instance.data)}")
if self.add_representation:
# Generate jpegs.
clip_img_sequence = os.path.join(
staging_dir, instance.data["name"] + ".%04d.jpeg"
)
args = [ffmpeg_path, "-i", clip_trimed_path, clip_img_sequence]
self.log.info(f"Processing: {args}")
output = pype.lib._subprocess(args)
self.log.info(output)
# collect jpeg sequence if editorial data for publish
# are image sequence
collection = clique.Collection(
head=instance.data["name"] + ".", tail='.jpeg', padding=4
)
for f in os.listdir(staging_dir):
if collection.match(f):
collection.add(f)
instance.data["representations"].append({
"name": "jpeg",
"ext": "jpeg",
"files": list(collection),
"stagingDir": staging_dir
})
if self.add_audio:
audio_ext = ".wav"
# Generate wav file.
shot_wav = os.path.join(
staging_dir, instance.data["name"] + audio_ext)
# Collect data.
data = {}
for key, value in instance.data.items():
data[key] = value
data["family"] = "audio"
data["families"] = ["ftrack"]
data["subset"] = "audioMain"
data["source"] = shot_wav
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = "{} - {} - ({})".format(
data['asset'],
data["subset"],
audio_ext
)
# Create instance.
self.log.debug("Creating instance with: {}".format(data["name"]))
instance = instance.context.create_instance(**data)
args = [ffmpeg_path, "-i", clip_trimed_path, shot_wav]
self.log.info(f"Processing: {args}")
output = pype.lib._subprocess(args)
self.log.info(output)
instance.data["representations"] = [{
"name": "wav",
"ext": "wav",
"files": os.path.basename(shot_wav),
"stagingDir": staging_dir
}]

View file

@ -0,0 +1,104 @@
import os
import clique
import pype.api
import pype.lib as plib
from pprint import pformat
class ExtractShotData(pype.api.Extractor):
"""Extract shot "mov" and "wav" files."""
label = "Extract Shot Data"
hosts = ["standalonepublisher"]
families = ["review", "audio"]
# presets
add_representation = None # ".jpeg"
def process(self, instance):
representation = instance.data.get("representations")
self.log.debug(f"_ representation: {representation}")
if not representation:
instance.data["representations"] = list()
# get ffmpet path
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# get staging dir
staging_dir = self.staging_dir(instance)
self.log.info("Staging dir set to: `{}`".format(staging_dir))
# Generate mov file.
fps = instance.data["fps"]
video_file_path = instance.data["editorialVideoPath"]
ext = instance.data.get("extension", ".mov")
clip_trimed_path = os.path.join(
staging_dir, instance.data["name"] + ext)
#
# # check video file metadata
# input_data = plib.ffprobe_streams(video_file_path)[0]
# self.log.debug(f"__ input_data: `{input_data}`")
args = [
ffmpeg_path,
"-ss", str(instance.data["clipInH"] / fps),
"-i", video_file_path,
"-t", str(instance.data["clipDurationH"] / fps),
"-crf", "18",
"-pix_fmt", "yuv420p",
clip_trimed_path
]
self.log.info(f"Processing: {args}")
ffmpeg_args = " ".join(args)
output = pype.api.subprocess(ffmpeg_args)
self.log.info(output)
repr = {
"name": ext[1:],
"ext": ext[1:],
"files": os.path.basename(clip_trimed_path),
"stagingDir": staging_dir,
"frameStart": int(instance.data["frameStart"]),
"frameEnd": int(instance.data["frameEnd"]),
"frameStartFtrack": int(instance.data["frameStartH"]),
"frameEndFtrack": int(instance.data["frameEndH"]),
"fps": fps,
}
if ext[1:] in ["mov", "mp4"]:
repr.update({
"thumbnail": True,
"tags": ["review", "ftrackreview", "delete"]})
instance.data["representations"].append(repr)
if self.add_representation:
# Generate jpegs.
clip_img_sequence = os.path.join(
staging_dir, instance.data["name"] + ".%04d.jpeg"
)
args = [ffmpeg_path, "-i", clip_trimed_path, clip_img_sequence]
self.log.info(f"Processing: {args}")
output = pype.lib._subprocess(args)
self.log.info(output)
# collect jpeg sequence if editorial data for publish
# are image sequence
collection = clique.Collection(
head=instance.data["name"] + ".", tail='.jpeg', padding=4
)
for f in os.listdir(staging_dir):
if collection.match(f):
collection.add(f)
instance.data["representations"].append({
"name": "jpeg",
"ext": "jpeg",
"files": list(collection),
"stagingDir": staging_dir
})
self.log.debug(f"Instance data: {pformat(instance.data)}")

View file

@ -9,7 +9,7 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin):
label = "Validate Editorial Resources"
hosts = ["standalonepublisher"]
families = ["clip"]
families = ["audio", "review"]
order = pype.api.ValidateContentsOrder
def process(self, instance):