diff --git a/openpype/hosts/hiero/otio/utils.py b/openpype/hosts/hiero/otio/utils.py
index f882a5d1f2..4c5d46bd51 100644
--- a/openpype/hosts/hiero/otio/utils.py
+++ b/openpype/hosts/hiero/otio/utils.py
@@ -68,7 +68,11 @@ def get_rate(item):
return None
num, den = item.framerate().toRational()
- rate = float(num) / float(den)
+
+ try:
+ rate = float(num) / float(den)
+ except ZeroDivisionError:
+ return None
if rate.is_integer():
return rate
diff --git a/openpype/hosts/hiero/plugins/publish/precollect_instances.py b/openpype/hosts/hiero/plugins/publish/precollect_instances.py
index a1dee711b7..8cccdec99a 100644
--- a/openpype/hosts/hiero/plugins/publish/precollect_instances.py
+++ b/openpype/hosts/hiero/plugins/publish/precollect_instances.py
@@ -24,7 +24,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
for track_item in selected_timeline_items:
- data = dict()
+ data = {}
clip_name = track_item.name()
# get openpype tag data
@@ -43,6 +43,11 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
tag_data["handleEnd"] = min(
tag_data["handleEnd"], int(track_item.handleOutLength()))
+ # add audio to families
+ with_audio = False
+ if tag_data.pop("audio"):
+ with_audio = True
+
# add tag data to instance data
data.update({
k: v for k, v in tag_data.items()
@@ -94,6 +99,17 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
+ if not with_audio:
+ return
+
+ # create audio subset instance
+ self.create_audio_instance(context, **data)
+
+ # add audioReview attribute to plate instance data
+ # if reviewTrack is on
+ if tag_data.get("reviewTrack") is not None:
+ instance.data["reviewAudio"] = True
+
def get_resolution_to_data(self, data, context):
assert data.get("otioClip"), "Missing `otioClip` data"
@@ -159,6 +175,46 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
+ def create_audio_instance(self, context, **data):
+ master_layer = data.get("heroTrack")
+
+ if not master_layer:
+ return
+
+ asset = data.get("asset")
+ item = data.get("item")
+ clip_name = item.name()
+
+ asset = data["asset"]
+ subset = "audioMain"
+
+ # insert family into families
+ family = "audio"
+
+ # form label
+ label = asset
+ if asset != clip_name:
+ label += " ({}) ".format(clip_name)
+ label += " {}".format(subset)
+ label += " [{}]".format(family)
+
+ data.update({
+ "name": "{}_{}".format(asset, subset),
+ "label": label,
+ "subset": subset,
+ "asset": asset,
+ "family": family,
+ "families": ["clip"]
+ })
+ # remove review track attr if any
+ data.pop("reviewTrack")
+
+ # create instance
+ instance = context.create_instance(**data)
+ self.log.info("Creating instance: {}".format(instance))
+ self.log.debug(
+ "_ instance.data: {}".format(pformat(instance.data)))
+
def get_otio_clip_instance_data(self, otio_timeline, track_item):
"""
Return otio objects for timeline, track and clip
diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py
index 457ceb1d56..838c5aa7a1 100644
--- a/openpype/lib/__init__.py
+++ b/openpype/lib/__init__.py
@@ -139,6 +139,7 @@ from .editorial import (
trim_media_range,
range_from_frames,
frames_to_secons,
+ frames_to_timecode,
make_sequence_collection
)
@@ -246,5 +247,6 @@ __all__ = [
"trim_media_range",
"range_from_frames",
"frames_to_secons",
+ "frames_to_timecode",
"make_sequence_collection"
]
diff --git a/openpype/lib/editorial.py b/openpype/lib/editorial.py
index 1dbc4d7954..bf9a0cb506 100644
--- a/openpype/lib/editorial.py
+++ b/openpype/lib/editorial.py
@@ -137,6 +137,11 @@ def frames_to_secons(frames, framerate):
return _ot.to_seconds(rt)
+def frames_to_timecode(frames, framerate):
+ rt = _ot.from_frames(frames, framerate)
+ return _ot.to_timecode(rt)
+
+
def make_sequence_collection(path, otio_range, metadata):
"""
Make collection from path otio range and otio metadata.
diff --git a/openpype/plugins/publish/collect_otio_subset_resources.py b/openpype/plugins/publish/collect_otio_subset_resources.py
index d687c1920a..cebfc90630 100644
--- a/openpype/plugins/publish/collect_otio_subset_resources.py
+++ b/openpype/plugins/publish/collect_otio_subset_resources.py
@@ -22,6 +22,10 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
hosts = ["resolve", "hiero"]
def process(self, instance):
+
+ if "audio" in instance.data["family"]:
+ return
+
if not instance.data.get("representations"):
instance.data["representations"] = list()
version_data = dict()
diff --git a/openpype/plugins/publish/extract_otio_audio_tracks.py b/openpype/plugins/publish/extract_otio_audio_tracks.py
new file mode 100644
index 0000000000..43e40097f7
--- /dev/null
+++ b/openpype/plugins/publish/extract_otio_audio_tracks.py
@@ -0,0 +1,295 @@
+import os
+import pyblish
+import openpype.api
+from openpype.lib import (
+ get_ffmpeg_tool_path
+)
+import tempfile
+import opentimelineio as otio
+
+
+class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
+ """Extract Audio tracks from OTIO timeline.
+
+ Process will merge all found audio tracks into one long .wav file at frist
+ stage. Then it will trim it into individual short audio files relative to
+ asset length and add it to each marked instance data representation. This
+ is influenced by instance data audio attribute """
+
+ order = pyblish.api.ExtractorOrder - 0.44
+ label = "Extract OTIO Audio Tracks"
+ hosts = ["hiero", "resolve"]
+
+ # FFmpeg tools paths
+ ffmpeg_path = get_ffmpeg_tool_path("ffmpeg")
+
+ def process(self, context):
+ """Convert otio audio track's content to audio representations
+
+ Args:
+ context (pyblish.Context): context of publisher
+ """
+ # split the long audio file to peces devided by isntances
+ audio_instances = self.get_audio_instances(context)
+ self.log.debug("Audio instances: {}".format(len(audio_instances)))
+
+ if len(audio_instances) < 1:
+ self.log.info("No audio instances available")
+ return
+
+ # get sequence
+ otio_timeline = context.data["otioTimeline"]
+
+ # temp file
+ audio_temp_fpath = self.create_temp_file("audio")
+
+ # get all audio inputs from otio timeline
+ audio_inputs = self.get_audio_track_items(otio_timeline)
+
+ # create empty audio with longest duration
+ empty = self.create_empty(audio_inputs)
+
+ # add empty to list of audio inputs
+ audio_inputs.insert(0, empty)
+
+ # create cmd
+ cmd = self.ffmpeg_path + " "
+ cmd += self.create_cmd(audio_inputs)
+ cmd += audio_temp_fpath
+
+ # run subprocess
+ self.log.debug("Executing: {}".format(cmd))
+ openpype.api.run_subprocess(
+ cmd, shell=True, logger=self.log
+ )
+
+ # remove empty
+ os.remove(empty["mediaPath"])
+
+ # cut instance framerange and add to representations
+ self.add_audio_to_instances(audio_temp_fpath, audio_instances)
+
+ # remove full mixed audio file
+ os.remove(audio_temp_fpath)
+
+ def add_audio_to_instances(self, audio_file, instances):
+ created_files = []
+ for inst in instances:
+ name = inst.data["asset"]
+
+ recycling_file = [f for f in created_files if name in f]
+
+ # frameranges
+ timeline_in_h = inst.data["clipInH"]
+ timeline_out_h = inst.data["clipOutH"]
+ fps = inst.data["fps"]
+
+ # create duration
+ duration = (timeline_out_h - timeline_in_h) + 1
+
+ # ffmpeg generate new file only if doesnt exists already
+ if not recycling_file:
+ # convert to seconds
+ start_sec = float(timeline_in_h / fps)
+ duration_sec = float(duration / fps)
+
+ # temp audio file
+ audio_fpath = self.create_temp_file(name)
+
+ cmd = " ".join([
+ self.ffmpeg_path,
+ "-ss {}".format(start_sec),
+ "-t {}".format(duration_sec),
+ "-i {}".format(audio_file),
+ audio_fpath
+ ])
+
+ # run subprocess
+ self.log.debug("Executing: {}".format(cmd))
+ openpype.api.run_subprocess(
+ cmd, shell=True, logger=self.log
+ )
+ else:
+ audio_fpath = recycling_file.pop()
+
+ if "audio" in (inst.data["families"] + [inst.data["family"]]):
+ # create empty representation attr
+ if "representations" not in inst.data:
+ inst.data["representations"] = []
+ # add to representations
+ inst.data["representations"].append({
+ "files": os.path.basename(audio_fpath),
+ "name": "wav",
+ "ext": "wav",
+ "stagingDir": os.path.dirname(audio_fpath),
+ "frameStart": 0,
+ "frameEnd": duration
+ })
+
+ elif "reviewAudio" in inst.data.keys():
+ audio_attr = inst.data.get("audio") or []
+ audio_attr.append({
+ "filename": audio_fpath,
+ "offset": 0
+ })
+ inst.data["audio"] = audio_attr
+
+ # add generated audio file to created files for recycling
+ if audio_fpath not in created_files:
+ created_files.append(audio_fpath)
+
+ def get_audio_instances(self, context):
+ """Return only instances which are having audio in families
+
+ Args:
+ context (pyblish.context): context of publisher
+
+ Returns:
+ list: list of selected instances
+ """
+ return [
+ _i for _i in context
+ # filter only those with audio family
+ # and also with reviewAudio data key
+ if bool("audio" in (
+ _i.data.get("families", []) + [_i.data["family"]])
+ ) or _i.data.get("reviewAudio")
+ ]
+
+ def get_audio_track_items(self, otio_timeline):
+ """Get all audio clips form OTIO audio tracks
+
+ Args:
+ otio_timeline (otio.schema.timeline): timeline object
+
+ Returns:
+ list: list of audio clip dictionaries
+ """
+ output = []
+ # go trough all audio tracks
+ for otio_track in otio_timeline.tracks:
+ if "Audio" not in otio_track.kind:
+ continue
+ self.log.debug("_" * 50)
+ playhead = 0
+ for otio_clip in otio_track:
+ self.log.debug(otio_clip)
+ if isinstance(otio_clip, otio.schema.Gap):
+ playhead += otio_clip.source_range.duration.value
+ elif isinstance(otio_clip, otio.schema.Clip):
+ start = otio_clip.source_range.start_time.value
+ duration = otio_clip.source_range.duration.value
+ fps = otio_clip.source_range.start_time.rate
+ media_path = otio_clip.media_reference.target_url
+ input = {
+ "mediaPath": media_path,
+ "delayFrame": playhead,
+ "startFrame": start,
+ "durationFrame": duration,
+ "delayMilSec": int(float(playhead / fps) * 1000),
+ "startSec": float(start / fps),
+ "durationSec": float(duration / fps),
+ "fps": fps
+ }
+ if input not in output:
+ output.append(input)
+ self.log.debug("__ input: {}".format(input))
+ playhead += otio_clip.source_range.duration.value
+
+ return output
+
+ def create_empty(self, inputs):
+ """Create an empty audio file used as duration placeholder
+
+ Args:
+ inputs (list): list of audio clip dictionaries
+
+ Returns:
+ dict: audio clip dictionary
+ """
+ # temp file
+ empty_fpath = self.create_temp_file("empty")
+
+ # get all end frames
+ end_secs = [(_i["delayFrame"] + _i["durationFrame"]) / _i["fps"]
+ for _i in inputs]
+ # get the max of end frames
+ max_duration_sec = max(end_secs)
+
+ # create empty cmd
+ cmd = " ".join([
+ self.ffmpeg_path,
+ "-f lavfi",
+ "-i anullsrc=channel_layout=stereo:sample_rate=48000",
+ "-t {}".format(max_duration_sec),
+ empty_fpath
+ ])
+
+ # generate empty with ffmpeg
+ # run subprocess
+ self.log.debug("Executing: {}".format(cmd))
+
+ openpype.api.run_subprocess(
+ cmd, shell=True, logger=self.log
+ )
+
+ # return dict with output
+ return {
+ "mediaPath": empty_fpath,
+ "delayMilSec": 0,
+ "startSec": 0.00,
+ "durationSec": max_duration_sec
+ }
+
+ def create_cmd(self, inputs):
+ """Creating multiple input cmd string
+
+ Args:
+ inputs (list): list of input dicts. Order mater.
+
+ Returns:
+ str: the command body
+
+ """
+ # create cmd segments
+ _inputs = ""
+ _filters = "-filter_complex \""
+ _channels = ""
+ for index, input in enumerate(inputs):
+ input_format = input.copy()
+ input_format.update({"i": index})
+ _inputs += (
+ "-ss {startSec} "
+ "-t {durationSec} "
+ "-i \"{mediaPath}\" "
+ ).format(**input_format)
+
+ _filters += "[{i}]adelay={delayMilSec}:all=1[r{i}]; ".format(
+ **input_format)
+ _channels += "[r{}]".format(index)
+
+ # merge all cmd segments together
+ cmd = _inputs + _filters + _channels
+ cmd += str(
+ "amix=inputs={inputs}:duration=first:"
+ "dropout_transition=1000,volume={inputs}[a]\" "
+ ).format(inputs=len(inputs))
+ cmd += "-map \"[a]\" "
+
+ return cmd
+
+ def create_temp_file(self, name):
+ """Create temp wav file
+
+ Args:
+ name (str): name to be used in file name
+
+ Returns:
+ str: temp fpath
+ """
+ return os.path.normpath(
+ tempfile.mktemp(
+ prefix="pyblish_tmp_{}_".format(name),
+ suffix=".wav"
+ )
+ )
diff --git a/openpype/tools/standalonepublish/widgets/widget_family.py b/openpype/tools/standalonepublish/widgets/widget_family.py
index 50335e3109..86663c8ee0 100644
--- a/openpype/tools/standalonepublish/widgets/widget_family.py
+++ b/openpype/tools/standalonepublish/widgets/widget_family.py
@@ -255,9 +255,9 @@ class FamilyWidget(QtWidgets.QWidget):
defaults = list(plugin.defaults)
# Replace
- compare_regex = re.compile(
- subset_name.replace(user_input_text, "(.+)")
- )
+ compare_regex = re.compile(re.sub(
+ user_input_text, "(.+)", subset_name, flags=re.IGNORECASE
+ ))
subset_hints = set()
if user_input_text:
for _name in existing_subset_names:
diff --git a/website/docs/admin_hosts_resolve.md b/website/docs/admin_hosts_resolve.md
new file mode 100644
index 0000000000..d2e027205d
--- /dev/null
+++ b/website/docs/admin_hosts_resolve.md
@@ -0,0 +1,103 @@
+---
+id: admin_hosts_resolve
+title: DaVinci Resolve Setup
+sidebar_label: DaVinci Resolve
+---
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+## Resolve requirements
+Due to the way resolve handles python and python scripts there are a few steps required steps needed to be done on any machine that will be using OpenPype with resolve.
+
+### Installing Resolve's own python 3.6 interpreter.
+Resolve uses a hardcoded method to look for the python executable path. All of tho following paths are defined automatically by Python msi installer. We are using Python 3.6.2.
+
+