Merge branch 'develop' into enhancement/remove-qdesktop-usage

This commit is contained in:
Jakub Trllo 2025-11-07 14:14:13 +01:00 committed by GitHub
commit b87a7615e5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 150 additions and 65 deletions

View file

@ -52,7 +52,7 @@ class CollectAudio(pyblish.api.ContextPlugin):
context, self.__class__ context, self.__class__
): ):
# Skip instances that already have audio filled # Skip instances that already have audio filled
if instance.data.get("audio"): if "audio" in instance.data:
self.log.debug( self.log.debug(
"Skipping Audio collection. It is already collected" "Skipping Audio collection. It is already collected"
) )

View file

@ -1,12 +1,83 @@
import collections
import hashlib
import os import os
import tempfile import tempfile
import uuid
from pathlib import Path
import pyblish import pyblish
from ayon_core.lib import get_ffmpeg_tool_args, run_subprocess
from ayon_core.lib import (
get_ffmpeg_tool_args, def get_audio_instances(context):
run_subprocess """Return only instances which are having audio in families
)
Args:
context (pyblish.context): context of publisher
Returns:
list: list of selected instances
"""
audio_instances = []
for instance in context:
if not instance.data.get("parent_instance_id"):
continue
if (
instance.data["productType"] == "audio"
or instance.data.get("reviewAudio")
):
audio_instances.append(instance)
return audio_instances
def map_instances_by_parent_id(context):
"""Create a mapping of instances by their parent id
Args:
context (pyblish.context): context of publisher
Returns:
dict: mapping of instances by their parent id
"""
instances_by_parent_id = collections.defaultdict(list)
for instance in context:
parent_instance_id = instance.data.get("parent_instance_id")
if not parent_instance_id:
continue
instances_by_parent_id[parent_instance_id].append(instance)
return instances_by_parent_id
class CollectParentAudioInstanceAttribute(pyblish.api.ContextPlugin):
"""Collect audio instance attribute"""
order = pyblish.api.CollectorOrder
label = "Collect Audio Instance Attribute"
def process(self, context):
audio_instances = get_audio_instances(context)
# no need to continue if no audio instances found
if not audio_instances:
return
# create mapped instances by parent id
instances_by_parent_id = map_instances_by_parent_id(context)
# distribute audio related attribute
for audio_instance in audio_instances:
parent_instance_id = audio_instance.data["parent_instance_id"]
for sibl_instance in instances_by_parent_id[parent_instance_id]:
# exclude the same audio instance
if sibl_instance.id == audio_instance.id:
continue
self.log.info(
"Adding audio to Sibling instance: "
f"{sibl_instance.data['label']}"
)
sibl_instance.data["audio"] = None
class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
@ -19,7 +90,8 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
order = pyblish.api.ExtractorOrder - 0.44 order = pyblish.api.ExtractorOrder - 0.44
label = "Extract OTIO Audio Tracks" label = "Extract OTIO Audio Tracks"
hosts = ["hiero", "resolve", "flame"]
temp_dir_path = None
def process(self, context): def process(self, context):
"""Convert otio audio track's content to audio representations """Convert otio audio track's content to audio representations
@ -28,13 +100,14 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
context (pyblish.Context): context of publisher context (pyblish.Context): context of publisher
""" """
# split the long audio file to peces devided by isntances # split the long audio file to peces devided by isntances
audio_instances = self.get_audio_instances(context) audio_instances = get_audio_instances(context)
self.log.debug("Audio instances: {}".format(len(audio_instances)))
if len(audio_instances) < 1: # no need to continue if no audio instances found
self.log.info("No audio instances available") if not audio_instances:
return return
self.log.debug("Audio instances: {}".format(len(audio_instances)))
# get sequence # get sequence
otio_timeline = context.data["otioTimeline"] otio_timeline = context.data["otioTimeline"]
@ -44,8 +117,8 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
if not audio_inputs: if not audio_inputs:
return return
# temp file # Convert all available audio into single file for trimming
audio_temp_fpath = self.create_temp_file("audio") audio_temp_fpath = self.create_temp_file("timeline_audio_track")
# create empty audio with longest duration # create empty audio with longest duration
empty = self.create_empty(audio_inputs) empty = self.create_empty(audio_inputs)
@ -59,19 +132,25 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
# remove empty # remove empty
os.remove(empty["mediaPath"]) os.remove(empty["mediaPath"])
# create mapped instances by parent id
instances_by_parent_id = map_instances_by_parent_id(context)
# cut instance framerange and add to representations # cut instance framerange and add to representations
self.add_audio_to_instances(audio_temp_fpath, audio_instances) self.add_audio_to_instances(
audio_temp_fpath, audio_instances, instances_by_parent_id)
# remove full mixed audio file # remove full mixed audio file
os.remove(audio_temp_fpath) os.remove(audio_temp_fpath)
def add_audio_to_instances(self, audio_file, instances): def add_audio_to_instances(
self, audio_file, audio_instances, instances_by_parent_id):
created_files = [] created_files = []
for inst in instances: for audio_instance in audio_instances:
name = inst.data["folderPath"] folder_path = audio_instance.data["folderPath"]
file_suffix = folder_path.replace("/", "-")
recycling_file = [f for f in created_files if name in f] recycling_file = [f for f in created_files if file_suffix in f]
audio_clip = inst.data["otioClip"] audio_clip = audio_instance.data["otioClip"]
audio_range = audio_clip.range_in_parent() audio_range = audio_clip.range_in_parent()
duration = audio_range.duration.to_frames() duration = audio_range.duration.to_frames()
@ -84,68 +163,70 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
start_sec = relative_start_time.to_seconds() start_sec = relative_start_time.to_seconds()
duration_sec = audio_range.duration.to_seconds() duration_sec = audio_range.duration.to_seconds()
# temp audio file # shot related audio file
audio_fpath = self.create_temp_file(name) shot_audio_fpath = self.create_temp_file(file_suffix)
cmd = get_ffmpeg_tool_args( cmd = get_ffmpeg_tool_args(
"ffmpeg", "ffmpeg",
"-ss", str(start_sec), "-ss", str(start_sec),
"-t", str(duration_sec), "-t", str(duration_sec),
"-i", audio_file, "-i", audio_file,
audio_fpath shot_audio_fpath
) )
# run subprocess # run subprocess
self.log.debug("Executing: {}".format(" ".join(cmd))) self.log.debug("Executing: {}".format(" ".join(cmd)))
run_subprocess(cmd, logger=self.log) run_subprocess(cmd, logger=self.log)
else:
audio_fpath = recycling_file.pop()
if "audio" in ( # add generated audio file to created files for recycling
inst.data["families"] + [inst.data["productType"]] if shot_audio_fpath not in created_files:
): created_files.append(shot_audio_fpath)
else:
shot_audio_fpath = recycling_file.pop()
# audio file needs to be published as representation
if audio_instance.data["productType"] == "audio":
# create empty representation attr # create empty representation attr
if "representations" not in inst.data: if "representations" not in audio_instance.data:
inst.data["representations"] = [] audio_instance.data["representations"] = []
# add to representations # add to representations
inst.data["representations"].append({ audio_instance.data["representations"].append({
"files": os.path.basename(audio_fpath), "files": os.path.basename(shot_audio_fpath),
"name": "wav", "name": "wav",
"ext": "wav", "ext": "wav",
"stagingDir": os.path.dirname(audio_fpath), "stagingDir": os.path.dirname(shot_audio_fpath),
"frameStart": 0, "frameStart": 0,
"frameEnd": duration "frameEnd": duration
}) })
elif "reviewAudio" in inst.data.keys(): # audio file needs to be reviewable too
audio_attr = inst.data.get("audio") or [] elif "reviewAudio" in audio_instance.data.keys():
audio_attr = audio_instance.data.get("audio") or []
audio_attr.append({ audio_attr.append({
"filename": audio_fpath, "filename": shot_audio_fpath,
"offset": 0 "offset": 0
}) })
inst.data["audio"] = audio_attr audio_instance.data["audio"] = audio_attr
# add generated audio file to created files for recycling # Make sure if the audio instance is having siblink instances
if audio_fpath not in created_files: # which needs audio for reviewable media so it is also added
created_files.append(audio_fpath) # to its instance data
# Retrieve instance data from parent instance shot instance.
def get_audio_instances(self, context): parent_instance_id = audio_instance.data["parent_instance_id"]
"""Return only instances which are having audio in families for sibl_instance in instances_by_parent_id[parent_instance_id]:
# exclude the same audio instance
Args: if sibl_instance.id == audio_instance.id:
context (pyblish.context): context of publisher continue
self.log.info(
Returns: "Adding audio to Sibling instance: "
list: list of selected instances f"{sibl_instance.data['label']}"
""" )
return [ audio_attr = sibl_instance.data.get("audio") or []
_i for _i in context audio_attr.append({
# filter only those with audio product type or family "filename": shot_audio_fpath,
# and also with reviewAudio data key "offset": 0
if bool("audio" in ( })
_i.data.get("families", []) + [_i.data["productType"]]) sibl_instance.data["audio"] = audio_attr
) or _i.data.get("reviewAudio")
]
def get_audio_track_items(self, otio_timeline): def get_audio_track_items(self, otio_timeline):
"""Get all audio clips form OTIO audio tracks """Get all audio clips form OTIO audio tracks
@ -321,19 +402,23 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
os.remove(filters_tmp_filepath) os.remove(filters_tmp_filepath)
def create_temp_file(self, name): def create_temp_file(self, file_suffix):
"""Create temp wav file """Create temp wav file
Args: Args:
name (str): name to be used in file name file_suffix (str): name to be used in file name
Returns: Returns:
str: temp fpath str: temp fpath
""" """
name = name.replace("/", "_") extension = ".wav"
return os.path.normpath( # get 8 characters
tempfile.mktemp( hash = hashlib.md5(str(uuid.uuid4()).encode()).hexdigest()[:8]
prefix="pyblish_tmp_{}_".format(name), file_name = f"{hash}_{file_suffix}{extension}"
suffix=".wav"
) if not self.temp_dir_path:
) audio_temp_dir_path = tempfile.mkdtemp(prefix="AYON_audio_")
self.temp_dir_path = Path(audio_temp_dir_path)
self.temp_dir_path.mkdir(parents=True, exist_ok=True)
return (self.temp_dir_path / file_name).as_posix()