Audio on review.

Audio needs to be collected in host plugin. Audio is a collection of dicts with "offset" and "filename" members.
This commit is contained in:
Toke Jepsen 2019-07-23 12:41:05 +01:00
parent 15a6cbb820
commit 5547cec38c
2 changed files with 71 additions and 2 deletions

View file

@ -53,7 +53,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
ext = "mov"
self.log.warning(
"`ext` attribute not in output profile. Setting to default ext: `mov`")
self.log.debug("instance.families: {}".format(instance.data['families']))
self.log.debug("profile.families: {}".format(profile['families']))
@ -114,6 +114,36 @@ class ExtractReview(pyblish.api.InstancePlugin):
input_args.append("-i {}".format(full_input_path))
for audio in instance.data.get("audio", []):
offset_frames = (
instance.data.get("startFrameReview") -
audio["offset"]
)
offset_seconds = offset_frames / fps
if offset_seconds > 0:
input_args.append("-ss")
else:
input_args.append("-itsoffset")
input_args.append(str(abs(offset_seconds)))
input_args.extend(
["-i", audio["filename"]]
)
# Need to merge audio if there are more
# than 1 input.
if len(instance.data["audio"]) > 1:
input_args.extend(
[
"-filter_complex",
"amerge",
"-ac",
"2"
]
)
output_args = []
# preset's output data
output_args.extend(profile.get('output', []))
@ -125,6 +155,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
output_args.append(
"-filter:v drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black".format(lb))
# In case audio is longer than video.
output_args.append("-shortest")
# output filename
output_args.append(full_output_path)
mov_args = [

View file

@ -1,4 +1,4 @@
from maya import cmds
from maya import cmds, mel
import pymel.core as pm
import pyblish.api
@ -74,3 +74,39 @@ class CollectReview(pyblish.api.InstancePlugin):
instance.data["families"] = ['ftrack']
cmds.setAttr(str(instance) + '.active', 1)
# Collect audio
playback_slider = mel.eval('$tmpVar=$gPlayBackSlider')
audio_name = cmds.timeControl(playback_slider, q=True, s=True)
display_sounds = cmds.timeControl(
playback_slider, q=True, displaySound=True
)
audio_nodes = []
if audio_name:
audio_nodes.append(pm.PyNode(audio_name))
if not audio_name and display_sounds:
start_frame = int(pm.playbackOptions(q=True, min=True))
end_frame = float(pm.playbackOptions(q=True, max=True))
frame_range = range(int(start_frame), int(end_frame))
for node in pm.ls(type="audio"):
# Check if frame range and audio range intersections,
# for whether to include this audio node or not.
start_audio = node.offset.get()
end_audio = node.offset.get() + node.duration.get()
audio_range = range(int(start_audio), int(end_audio))
if bool(set(frame_range).intersection(audio_range)):
audio_nodes.append(node)
instance.data["audio"] = []
for node in audio_nodes:
instance.data["audio"].append(
{
"offset": node.offset.get(),
"filename": node.filename.get()
}
)