Merge branch 'develop' into feature/1159-maya-safer-handling-of-expected-render-output-names

This commit is contained in:
Ondřej Samohel 2021-05-12 18:40:17 +02:00 committed by GitHub
commit 6e88935ea9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
32 changed files with 694 additions and 6 deletions

View file

@ -68,7 +68,11 @@ def get_rate(item):
return None
num, den = item.framerate().toRational()
rate = float(num) / float(den)
try:
rate = float(num) / float(den)
except ZeroDivisionError:
return None
if rate.is_integer():
return rate

View file

@ -24,7 +24,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
for track_item in selected_timeline_items:
data = dict()
data = {}
clip_name = track_item.name()
# get openpype tag data
@ -43,6 +43,11 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
tag_data["handleEnd"] = min(
tag_data["handleEnd"], int(track_item.handleOutLength()))
# add audio to families
with_audio = False
if tag_data.pop("audio"):
with_audio = True
# add tag data to instance data
data.update({
k: v for k, v in tag_data.items()
@ -94,6 +99,17 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
if not with_audio:
return
# create audio subset instance
self.create_audio_instance(context, **data)
# add audioReview attribute to plate instance data
# if reviewTrack is on
if tag_data.get("reviewTrack") is not None:
instance.data["reviewAudio"] = True
def get_resolution_to_data(self, data, context):
assert data.get("otioClip"), "Missing `otioClip` data"
@ -159,6 +175,46 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
def create_audio_instance(self, context, **data):
master_layer = data.get("heroTrack")
if not master_layer:
return
asset = data.get("asset")
item = data.get("item")
clip_name = item.name()
asset = data["asset"]
subset = "audioMain"
# insert family into families
family = "audio"
# form label
label = asset
if asset != clip_name:
label += " ({}) ".format(clip_name)
label += " {}".format(subset)
label += " [{}]".format(family)
data.update({
"name": "{}_{}".format(asset, subset),
"label": label,
"subset": subset,
"asset": asset,
"family": family,
"families": ["clip"]
})
# remove review track attr if any
data.pop("reviewTrack")
# create instance
instance = context.create_instance(**data)
self.log.info("Creating instance: {}".format(instance))
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
def get_otio_clip_instance_data(self, otio_timeline, track_item):
"""
Return otio objects for timeline, track and clip

View file

@ -139,6 +139,7 @@ from .editorial import (
trim_media_range,
range_from_frames,
frames_to_secons,
frames_to_timecode,
make_sequence_collection
)
@ -246,5 +247,6 @@ __all__ = [
"trim_media_range",
"range_from_frames",
"frames_to_secons",
"frames_to_timecode",
"make_sequence_collection"
]

View file

@ -137,6 +137,11 @@ def frames_to_secons(frames, framerate):
return _ot.to_seconds(rt)
def frames_to_timecode(frames, framerate):
rt = _ot.from_frames(frames, framerate)
return _ot.to_timecode(rt)
def make_sequence_collection(path, otio_range, metadata):
"""
Make collection from path otio range and otio metadata.

View file

@ -22,6 +22,10 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
hosts = ["resolve", "hiero"]
def process(self, instance):
if "audio" in instance.data["family"]:
return
if not instance.data.get("representations"):
instance.data["representations"] = list()
version_data = dict()

View file

@ -0,0 +1,295 @@
import os
import pyblish
import openpype.api
from openpype.lib import (
get_ffmpeg_tool_path
)
import tempfile
import opentimelineio as otio
class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
"""Extract Audio tracks from OTIO timeline.
Process will merge all found audio tracks into one long .wav file at frist
stage. Then it will trim it into individual short audio files relative to
asset length and add it to each marked instance data representation. This
is influenced by instance data audio attribute """
order = pyblish.api.ExtractorOrder - 0.44
label = "Extract OTIO Audio Tracks"
hosts = ["hiero", "resolve"]
# FFmpeg tools paths
ffmpeg_path = get_ffmpeg_tool_path("ffmpeg")
def process(self, context):
"""Convert otio audio track's content to audio representations
Args:
context (pyblish.Context): context of publisher
"""
# split the long audio file to peces devided by isntances
audio_instances = self.get_audio_instances(context)
self.log.debug("Audio instances: {}".format(len(audio_instances)))
if len(audio_instances) < 1:
self.log.info("No audio instances available")
return
# get sequence
otio_timeline = context.data["otioTimeline"]
# temp file
audio_temp_fpath = self.create_temp_file("audio")
# get all audio inputs from otio timeline
audio_inputs = self.get_audio_track_items(otio_timeline)
# create empty audio with longest duration
empty = self.create_empty(audio_inputs)
# add empty to list of audio inputs
audio_inputs.insert(0, empty)
# create cmd
cmd = self.ffmpeg_path + " "
cmd += self.create_cmd(audio_inputs)
cmd += audio_temp_fpath
# run subprocess
self.log.debug("Executing: {}".format(cmd))
openpype.api.run_subprocess(
cmd, shell=True, logger=self.log
)
# remove empty
os.remove(empty["mediaPath"])
# cut instance framerange and add to representations
self.add_audio_to_instances(audio_temp_fpath, audio_instances)
# remove full mixed audio file
os.remove(audio_temp_fpath)
def add_audio_to_instances(self, audio_file, instances):
created_files = []
for inst in instances:
name = inst.data["asset"]
recycling_file = [f for f in created_files if name in f]
# frameranges
timeline_in_h = inst.data["clipInH"]
timeline_out_h = inst.data["clipOutH"]
fps = inst.data["fps"]
# create duration
duration = (timeline_out_h - timeline_in_h) + 1
# ffmpeg generate new file only if doesnt exists already
if not recycling_file:
# convert to seconds
start_sec = float(timeline_in_h / fps)
duration_sec = float(duration / fps)
# temp audio file
audio_fpath = self.create_temp_file(name)
cmd = " ".join([
self.ffmpeg_path,
"-ss {}".format(start_sec),
"-t {}".format(duration_sec),
"-i {}".format(audio_file),
audio_fpath
])
# run subprocess
self.log.debug("Executing: {}".format(cmd))
openpype.api.run_subprocess(
cmd, shell=True, logger=self.log
)
else:
audio_fpath = recycling_file.pop()
if "audio" in (inst.data["families"] + [inst.data["family"]]):
# create empty representation attr
if "representations" not in inst.data:
inst.data["representations"] = []
# add to representations
inst.data["representations"].append({
"files": os.path.basename(audio_fpath),
"name": "wav",
"ext": "wav",
"stagingDir": os.path.dirname(audio_fpath),
"frameStart": 0,
"frameEnd": duration
})
elif "reviewAudio" in inst.data.keys():
audio_attr = inst.data.get("audio") or []
audio_attr.append({
"filename": audio_fpath,
"offset": 0
})
inst.data["audio"] = audio_attr
# add generated audio file to created files for recycling
if audio_fpath not in created_files:
created_files.append(audio_fpath)
def get_audio_instances(self, context):
"""Return only instances which are having audio in families
Args:
context (pyblish.context): context of publisher
Returns:
list: list of selected instances
"""
return [
_i for _i in context
# filter only those with audio family
# and also with reviewAudio data key
if bool("audio" in (
_i.data.get("families", []) + [_i.data["family"]])
) or _i.data.get("reviewAudio")
]
def get_audio_track_items(self, otio_timeline):
"""Get all audio clips form OTIO audio tracks
Args:
otio_timeline (otio.schema.timeline): timeline object
Returns:
list: list of audio clip dictionaries
"""
output = []
# go trough all audio tracks
for otio_track in otio_timeline.tracks:
if "Audio" not in otio_track.kind:
continue
self.log.debug("_" * 50)
playhead = 0
for otio_clip in otio_track:
self.log.debug(otio_clip)
if isinstance(otio_clip, otio.schema.Gap):
playhead += otio_clip.source_range.duration.value
elif isinstance(otio_clip, otio.schema.Clip):
start = otio_clip.source_range.start_time.value
duration = otio_clip.source_range.duration.value
fps = otio_clip.source_range.start_time.rate
media_path = otio_clip.media_reference.target_url
input = {
"mediaPath": media_path,
"delayFrame": playhead,
"startFrame": start,
"durationFrame": duration,
"delayMilSec": int(float(playhead / fps) * 1000),
"startSec": float(start / fps),
"durationSec": float(duration / fps),
"fps": fps
}
if input not in output:
output.append(input)
self.log.debug("__ input: {}".format(input))
playhead += otio_clip.source_range.duration.value
return output
def create_empty(self, inputs):
"""Create an empty audio file used as duration placeholder
Args:
inputs (list): list of audio clip dictionaries
Returns:
dict: audio clip dictionary
"""
# temp file
empty_fpath = self.create_temp_file("empty")
# get all end frames
end_secs = [(_i["delayFrame"] + _i["durationFrame"]) / _i["fps"]
for _i in inputs]
# get the max of end frames
max_duration_sec = max(end_secs)
# create empty cmd
cmd = " ".join([
self.ffmpeg_path,
"-f lavfi",
"-i anullsrc=channel_layout=stereo:sample_rate=48000",
"-t {}".format(max_duration_sec),
empty_fpath
])
# generate empty with ffmpeg
# run subprocess
self.log.debug("Executing: {}".format(cmd))
openpype.api.run_subprocess(
cmd, shell=True, logger=self.log
)
# return dict with output
return {
"mediaPath": empty_fpath,
"delayMilSec": 0,
"startSec": 0.00,
"durationSec": max_duration_sec
}
def create_cmd(self, inputs):
"""Creating multiple input cmd string
Args:
inputs (list): list of input dicts. Order mater.
Returns:
str: the command body
"""
# create cmd segments
_inputs = ""
_filters = "-filter_complex \""
_channels = ""
for index, input in enumerate(inputs):
input_format = input.copy()
input_format.update({"i": index})
_inputs += (
"-ss {startSec} "
"-t {durationSec} "
"-i \"{mediaPath}\" "
).format(**input_format)
_filters += "[{i}]adelay={delayMilSec}:all=1[r{i}]; ".format(
**input_format)
_channels += "[r{}]".format(index)
# merge all cmd segments together
cmd = _inputs + _filters + _channels
cmd += str(
"amix=inputs={inputs}:duration=first:"
"dropout_transition=1000,volume={inputs}[a]\" "
).format(inputs=len(inputs))
cmd += "-map \"[a]\" "
return cmd
def create_temp_file(self, name):
"""Create temp wav file
Args:
name (str): name to be used in file name
Returns:
str: temp fpath
"""
return os.path.normpath(
tempfile.mktemp(
prefix="pyblish_tmp_{}_".format(name),
suffix=".wav"
)
)

View file

@ -255,9 +255,9 @@ class FamilyWidget(QtWidgets.QWidget):
defaults = list(plugin.defaults)
# Replace
compare_regex = re.compile(
subset_name.replace(user_input_text, "(.+)")
)
compare_regex = re.compile(re.sub(
user_input_text, "(.+)", subset_name, flags=re.IGNORECASE
))
subset_hints = set()
if user_input_text:
for _name in existing_subset_names:

View file

@ -0,0 +1,103 @@
---
id: admin_hosts_resolve
title: DaVinci Resolve Setup
sidebar_label: DaVinci Resolve
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
## Resolve requirements
Due to the way resolve handles python and python scripts there are a few steps required steps needed to be done on any machine that will be using OpenPype with resolve.
### Installing Resolve's own python 3.6 interpreter.
Resolve uses a hardcoded method to look for the python executable path. All of tho following paths are defined automatically by Python msi installer. We are using Python 3.6.2.
<Tabs
groupId="platforms"
defaultValue="win"
values={[
{label: 'Windows', value: 'win'},
{label: 'Linux', value: 'linux'},
{label: 'Mac', value: 'mac'},
]}>
<TabItem value="win">
`%LOCALAPPDATA%\Programs\Python\Python36`
</TabItem>
<TabItem value="linux">
`/opt/Python/3.6/bin`
</TabItem>
<TabItem value="mac">
`~/Library/Python/3.6/bin`
</TabItem>
</Tabs>
### Installing PySide2 into python 3.6 for correct gui work
OpenPype is using its own window widget inside Resolve, for that reason PySide2 has to be installed into the python 3.6 (as explained above).
<Tabs
groupId="platforms"
defaultValue="win"
values={[
{label: 'Windows', value: 'win'},
{label: 'Linux', value: 'linux'},
{label: 'Mac', value: 'mac'},
]}>
<TabItem value="win">
paste to any terminal of your choice
```bash
%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install PySide2
```
</TabItem>
<TabItem value="linux">
paste to any terminal of your choice
```bash
/opt/Python/3.6/bin/python -m pip install PySide2
```
</TabItem>
<TabItem value="mac">
paste to any terminal of your choice
```bash
~/Library/Python/3.6/bin/python -m pip install PySide2
```
</TabItem>
</Tabs>
<div class="row markdown">
### Set Resolve's Fusion settings for Python 3.6 interpereter
<div class="col col--6 markdown">
As it is shown in bellow picture you have to go to Fusion Tab and then in Fusion menu find Fusion Settings. Go to Fusion/Script and find Default Python Version and swith to Python 3.6
</div>
<div class="col col--6 markdown">
![Create menu](assets/resolve_fusion_tab.png)
![Create menu](assets/resolve_fusion_menu.png)
![Create menu](assets/resolve_fusion_script_settings.png)
</div>
</div>

View file

@ -0,0 +1,216 @@
---
id: artist_hosts_resolve
title: DaVinci Resolve
sidebar_label: DaVinci Resolve
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
:::warning
Before you are able to start with OpenPype tools in DaVinci Resolve, installation of its own Python 3.6 interpreter and PySide 2 has to be done. Go to [Installation of python and pyside](#installation-of-python-and-pyside) link for more information
:::
## OpenPype global tools
- [Work Files](artist_tools.md#workfiles)
- [Create](artist_tools.md#creator)
- [Load](artist_tools.md#loader)
- [Manage (Inventory)](artist_tools.md#inventory)
- [Publish](artist_tools.md#publisher)
<div class="row markdown">
## Creating Shots from timeline items
Before a clip can be published with [Publisher](artist_tools.md#publisher) timeline item has to be marked with OpenPype metadata markers. This way it is converted to a publishable subset.
Lets do it step by step.
</div>
<div class="row markdown">
### Color clips before opening Create menu
Timeline video clips should be colored to `Chocolate` color for OpenPype to se it as selected for subset creation.
<div class="col col--6 markdown">
![Create menu](assets/resolve_select_clips_timeline_chocolate.png)
</div>
</div>
### Rename timeline track names
<div class="row markdown">
<div class="col col --6 markdown">
To be able to work with dynamic subset name, which is based on track names it is recommended to rename those tracks to what type of plates their clips represent. Commonly used ones are `main`, `review`, `fg01`, `fg02`, `bg`, `bg01`, etc. It is completely up to you but we recommend to always have at least `main` plate. For example if a clip is on track **element** and subset family is set to **plate** then the resulting subset name will be **plateElement**
<br></br>
</div>
<div class="col col--6 markdown">
![Create menu](assets/resolve_creator_subset_name.png)
The name of the resulting *subset* can be seen in the **OpenPypeData** marker.
<br></br><br></br>
</div>
<div class="col col--6 markdown">
![Create menu](assets/resolve_remame_track_names.png)
Simple track setup where we are only using `main` and `review` track names.
</div>
<div class="col col--6 markdown">
![Create menu](assets/resolve_create_vertical_rename_timeline.png)
An example of used track names. The yellow frame is highlighting vertically aligned clips - which are going to be renamed and grouped together under one asset (shot) name. The concept of vertical renaming will be explained later in [Vertical Synchronization of Subset Attributes](#vertical-synchronization-of-subset-attributes).
</div>
</div>
### Create menu...
<div class="row markdown">
<div class="col col--6 markdown">
After all clips which are intended to be converted to publishable instances are colored to `Chocolate` color, you can open OpenPype menu.
</div>
<div class="col col--6 markdown">
![Create menu](assets/resolve_menu_openpype.png)
</div>
</div>
<div class="row markdown">
<div class="col col--6 markdown">
After the menu widget is opened (it can take while so be patient please :).
Hit `Create ...` and then set **Use selection** to active and select the family to **Create Publishable Clips**.
The Subset name can stay as it is, it is not going to be used because each clip will generate it's own name.
</div>
<div class="col col--6 markdown">
![Create menu](assets/resolve_create_clips.png)
</div>
</div>
<div class="row markdown">
<div class="col col--6 markdown">
The new windows that opens, let's you define various attributes for your future subsets and shots.
Set Rename clips to active if you wish to use different names of shots in pipeline then the original clip names conformed from EDL/XML.
**Count sequence from** - Start of the shot numbering if `#` is used in one of the keywords
**Stepping number** - Sequential gaps in the numbering
As you can see the in `{shot}` key within *Shot Template Keywords* section, you can use `#` symbol do define padding of the number in sequence and where it's going to be used.
</div>
<div class="col col--6 markdown">
![Create menu](assets/resolve_create_renaming_clips.png)
</div>
</div>
<div class="row markdown">
<div class="col col--6 markdown">
Notice the relationship of following sections. Keys from **Shot Template Keywords** sections will be used for formating of templates in **Shot Hierarchy And Rename Settings** section.
**Shot parent hierarchy** will be forming parents of the asset (shot) *the hidden root for this is project folder*. So for example of this template we will get resulging string `shots/sq01`
**Clip name template** in context of clip sitting on track name `main` in second position `mainsq01sh020`. This is due track key is hosting `{_track_}` which is inheriting name form timeline track name. Other allowed namespases are:
- `{_sequence_}`: timeline name
- `{_clip_}`: clip name
- `{_trackIndex_}`: position of track on timeline from bottom
- `{_clipIndex_}`: clip positon on timeline from left
</div>
<div class="col col--6 markdown">
![Create menu](assets/resolve_create_template_filling.png)
</div>
</div>
### Vertical synchronization of subset attributes
In case you are only working with two tracks on timeline where `main` track is going to be used as plates for compositors and `review` track holds mp4 clips for offlines and web preview. **Enable vertical sync** can be deactivated.
In multiple tracks scenario - as mentioned [here](#rename-timeline-track-names) - it is recommended to activate **Enable vertical sync** and define the hero (driving) track to *main*. This will ensure that all of the clips on corresponding to the same shots will have the same publishing parameters.
<br></br>
<div class="row markdown">
<div class="col col--6 markdown">
![Create menu](assets/resolve_create_single_track_rename_hero_track.png)
</div>
<div class="col col--6 markdown">
![Create menu](assets/resolve_create_vertical_rename_creator_ui.png)
</div>
</div>
## Publishing Shots
<div class="row markdown">
<div class="col--6 markdown">
Once all `Chocolate` colored clips have gone through the [creator](#rcreate-menu), have been colored to `Pink` color and a marker has been created for each of them, it means they have been successfully converted to publishable clips. Now we can run **Publisher** - it's button can be found in the OpenPype menu.
<br></br>
</div>
<div class="row markdown">
<div class="col --6 markdown">
![Create menu](assets/resolve_publish_instance_review_main.png)
Notice that the main track clips and review had been merged into one instance. And since it is main `hero` clip it is also holding all new shot metadata. For that reason it also create secon instance for each with `shot` family. This instance will create all shot hierarchy and pass frame range attributes to shot (asset).
</div>
</div>
<div class="row markdown">
<div class="col --6 markdown">
![Create menu](assets/resolve_publish_instance_other_plateSubsets.png)
Also notice how the subset name is formed form a *track* name and *subset family* from previous steps.
Also important is to notice the asset name in *OpenPypeData* at marker - the name is the same for all **Vertically renamed** shots as they have been grouped together. Unfortunately Resolve is not allowing to rename the clips so the only way to know is to see it in marker's metadata.
</div>
</div>
</div>

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 173 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

View file

@ -22,6 +22,7 @@ module.exports = {
"artist_hosts_blender",
"artist_hosts_harmony",
"artist_hosts_aftereffects",
"artist_hosts_resolve",
"artist_hosts_photoshop",
"artist_hosts_tvpaint",
"artist_hosts_unreal",
@ -83,7 +84,9 @@ module.exports = {
label: "Integrations",
items: [
"admin_hosts_blender",
"admin_hosts_maya"
"admin_hosts_maya",
"admin_hosts_resolve"
],
},
{