Merged in tokejepsen/pype/feature/review_audio (pull request #214)

Feature/review audio

Approved-by: Milan Kolar <milan@orbi.tools>
This commit is contained in:
Toke Jepsen 2019-07-31 03:07:51 +00:00 committed by Milan Kolar
commit 534ba6451a
4 changed files with 82 additions and 4 deletions

View file

@ -109,10 +109,19 @@ class ExtractBurnin(pype.api.Extractor):
if os.path.exists(full_burnin_path):
repre_update = {
"files": movieFileBurnin,
"name": repre["name"]
"name": repre["name"],
"tags": [x for x in repre["tags"] if x != "delete"]
}
instance.data["representations"][i].update(repre_update)
# removing the source mov file
os.remove(full_movie_path)
self.log.debug("Removed: `{}`".format(full_movie_path))
# Remove any representations tagged for deletion.
for repre in instance.data["representations"]:
if "delete" in repre.get("tags", []):
self.log.debug("Removing representation: {}".format(repre))
instance.data["representations"].remove(repre)
self.log.debug(instance.data["representations"])

View file

@ -109,12 +109,42 @@ class ExtractReview(pyblish.api.InstancePlugin):
# necessary input data
# adds start arg only if image sequence
if "mov" not in repre_new['ext']:
if isinstance(repre["files"], list):
input_args.append("-start_number {0} -framerate {1}".format(
start_frame, fps))
input_args.append("-i {}".format(full_input_path))
for audio in instance.data.get("audio", []):
offset_frames = (
instance.data.get("startFrameReview") -
audio["offset"]
)
offset_seconds = offset_frames / fps
if offset_seconds > 0:
input_args.append("-ss")
else:
input_args.append("-itsoffset")
input_args.append(str(abs(offset_seconds)))
input_args.extend(
["-i", audio["filename"]]
)
# Need to merge audio if there are more
# than 1 input.
if len(instance.data["audio"]) > 1:
input_args.extend(
[
"-filter_complex",
"amerge",
"-ac",
"2"
]
)
output_args = []
# preset's output data
output_args.extend(profile.get('output', []))
@ -126,6 +156,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
output_args.append(
"-filter:v drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black".format(lb))
# In case audio is longer than video.
output_args.append("-shortest")
# output filename
output_args.append(full_output_path)
mov_args = [

View file

@ -1,4 +1,4 @@
from maya import cmds
from maya import cmds, mel
import pymel.core as pm
import pyblish.api
@ -76,3 +76,39 @@ class CollectReview(pyblish.api.InstancePlugin):
instance.data["families"] = ['ftrack']
cmds.setAttr(str(instance) + '.active', 1)
# Collect audio
playback_slider = mel.eval('$tmpVar=$gPlayBackSlider')
audio_name = cmds.timeControl(playback_slider, q=True, s=True)
display_sounds = cmds.timeControl(
playback_slider, q=True, displaySound=True
)
audio_nodes = []
if audio_name:
audio_nodes.append(pm.PyNode(audio_name))
if not audio_name and display_sounds:
start_frame = int(pm.playbackOptions(q=True, min=True))
end_frame = float(pm.playbackOptions(q=True, max=True))
frame_range = range(int(start_frame), int(end_frame))
for node in pm.ls(type="audio"):
# Check if frame range and audio range intersections,
# for whether to include this audio node or not.
start_audio = node.offset.get()
end_audio = node.offset.get() + node.duration.get()
audio_range = range(int(start_audio), int(end_audio))
if bool(set(frame_range).intersection(audio_range)):
audio_nodes.append(node)
instance.data["audio"] = []
for node in audio_nodes:
instance.data["audio"].append(
{
"offset": node.offset.get(),
"filename": node.filename.get()
}
)

View file

@ -118,7 +118,7 @@ class ExtractQuicktime(pype.api.Extractor):
'endFrame': end,
'frameRate': fps,
'preview': True,
'tags': ['review']
'tags': ['review', 'delete']
}
instance.data["representations"].append(representation)