diff --git a/openpype/hosts/hiero/otio/utils.py b/openpype/hosts/hiero/otio/utils.py index f882a5d1f2..4c5d46bd51 100644 --- a/openpype/hosts/hiero/otio/utils.py +++ b/openpype/hosts/hiero/otio/utils.py @@ -68,7 +68,11 @@ def get_rate(item): return None num, den = item.framerate().toRational() - rate = float(num) / float(den) + + try: + rate = float(num) / float(den) + except ZeroDivisionError: + return None if rate.is_integer(): return rate diff --git a/openpype/hosts/hiero/plugins/publish/precollect_instances.py b/openpype/hosts/hiero/plugins/publish/precollect_instances.py index a1dee711b7..8cccdec99a 100644 --- a/openpype/hosts/hiero/plugins/publish/precollect_instances.py +++ b/openpype/hosts/hiero/plugins/publish/precollect_instances.py @@ -24,7 +24,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin): for track_item in selected_timeline_items: - data = dict() + data = {} clip_name = track_item.name() # get openpype tag data @@ -43,6 +43,11 @@ class PrecollectInstances(pyblish.api.ContextPlugin): tag_data["handleEnd"] = min( tag_data["handleEnd"], int(track_item.handleOutLength())) + # add audio to families + with_audio = False + if tag_data.pop("audio"): + with_audio = True + # add tag data to instance data data.update({ k: v for k, v in tag_data.items() @@ -94,6 +99,17 @@ class PrecollectInstances(pyblish.api.ContextPlugin): self.log.debug( "_ instance.data: {}".format(pformat(instance.data))) + if not with_audio: + return + + # create audio subset instance + self.create_audio_instance(context, **data) + + # add audioReview attribute to plate instance data + # if reviewTrack is on + if tag_data.get("reviewTrack") is not None: + instance.data["reviewAudio"] = True + def get_resolution_to_data(self, data, context): assert data.get("otioClip"), "Missing `otioClip` data" @@ -159,6 +175,46 @@ class PrecollectInstances(pyblish.api.ContextPlugin): self.log.debug( "_ instance.data: {}".format(pformat(instance.data))) + def create_audio_instance(self, context, **data): + master_layer = data.get("heroTrack") + + if not master_layer: + return + + asset = data.get("asset") + item = data.get("item") + clip_name = item.name() + + asset = data["asset"] + subset = "audioMain" + + # insert family into families + family = "audio" + + # form label + label = asset + if asset != clip_name: + label += " ({}) ".format(clip_name) + label += " {}".format(subset) + label += " [{}]".format(family) + + data.update({ + "name": "{}_{}".format(asset, subset), + "label": label, + "subset": subset, + "asset": asset, + "family": family, + "families": ["clip"] + }) + # remove review track attr if any + data.pop("reviewTrack") + + # create instance + instance = context.create_instance(**data) + self.log.info("Creating instance: {}".format(instance)) + self.log.debug( + "_ instance.data: {}".format(pformat(instance.data))) + def get_otio_clip_instance_data(self, otio_timeline, track_item): """ Return otio objects for timeline, track and clip diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index 457ceb1d56..838c5aa7a1 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -139,6 +139,7 @@ from .editorial import ( trim_media_range, range_from_frames, frames_to_secons, + frames_to_timecode, make_sequence_collection ) @@ -246,5 +247,6 @@ __all__ = [ "trim_media_range", "range_from_frames", "frames_to_secons", + "frames_to_timecode", "make_sequence_collection" ] diff --git a/openpype/lib/editorial.py b/openpype/lib/editorial.py index 1dbc4d7954..bf9a0cb506 100644 --- a/openpype/lib/editorial.py +++ b/openpype/lib/editorial.py @@ -137,6 +137,11 @@ def frames_to_secons(frames, framerate): return _ot.to_seconds(rt) +def frames_to_timecode(frames, framerate): + rt = _ot.from_frames(frames, framerate) + return _ot.to_timecode(rt) + + def make_sequence_collection(path, otio_range, metadata): """ Make collection from path otio range and otio metadata. diff --git a/openpype/plugins/publish/collect_otio_subset_resources.py b/openpype/plugins/publish/collect_otio_subset_resources.py index d687c1920a..cebfc90630 100644 --- a/openpype/plugins/publish/collect_otio_subset_resources.py +++ b/openpype/plugins/publish/collect_otio_subset_resources.py @@ -22,6 +22,10 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin): hosts = ["resolve", "hiero"] def process(self, instance): + + if "audio" in instance.data["family"]: + return + if not instance.data.get("representations"): instance.data["representations"] = list() version_data = dict() diff --git a/openpype/plugins/publish/extract_otio_audio_tracks.py b/openpype/plugins/publish/extract_otio_audio_tracks.py new file mode 100644 index 0000000000..43e40097f7 --- /dev/null +++ b/openpype/plugins/publish/extract_otio_audio_tracks.py @@ -0,0 +1,295 @@ +import os +import pyblish +import openpype.api +from openpype.lib import ( + get_ffmpeg_tool_path +) +import tempfile +import opentimelineio as otio + + +class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): + """Extract Audio tracks from OTIO timeline. + + Process will merge all found audio tracks into one long .wav file at frist + stage. Then it will trim it into individual short audio files relative to + asset length and add it to each marked instance data representation. This + is influenced by instance data audio attribute """ + + order = pyblish.api.ExtractorOrder - 0.44 + label = "Extract OTIO Audio Tracks" + hosts = ["hiero", "resolve"] + + # FFmpeg tools paths + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") + + def process(self, context): + """Convert otio audio track's content to audio representations + + Args: + context (pyblish.Context): context of publisher + """ + # split the long audio file to peces devided by isntances + audio_instances = self.get_audio_instances(context) + self.log.debug("Audio instances: {}".format(len(audio_instances))) + + if len(audio_instances) < 1: + self.log.info("No audio instances available") + return + + # get sequence + otio_timeline = context.data["otioTimeline"] + + # temp file + audio_temp_fpath = self.create_temp_file("audio") + + # get all audio inputs from otio timeline + audio_inputs = self.get_audio_track_items(otio_timeline) + + # create empty audio with longest duration + empty = self.create_empty(audio_inputs) + + # add empty to list of audio inputs + audio_inputs.insert(0, empty) + + # create cmd + cmd = self.ffmpeg_path + " " + cmd += self.create_cmd(audio_inputs) + cmd += audio_temp_fpath + + # run subprocess + self.log.debug("Executing: {}".format(cmd)) + openpype.api.run_subprocess( + cmd, shell=True, logger=self.log + ) + + # remove empty + os.remove(empty["mediaPath"]) + + # cut instance framerange and add to representations + self.add_audio_to_instances(audio_temp_fpath, audio_instances) + + # remove full mixed audio file + os.remove(audio_temp_fpath) + + def add_audio_to_instances(self, audio_file, instances): + created_files = [] + for inst in instances: + name = inst.data["asset"] + + recycling_file = [f for f in created_files if name in f] + + # frameranges + timeline_in_h = inst.data["clipInH"] + timeline_out_h = inst.data["clipOutH"] + fps = inst.data["fps"] + + # create duration + duration = (timeline_out_h - timeline_in_h) + 1 + + # ffmpeg generate new file only if doesnt exists already + if not recycling_file: + # convert to seconds + start_sec = float(timeline_in_h / fps) + duration_sec = float(duration / fps) + + # temp audio file + audio_fpath = self.create_temp_file(name) + + cmd = " ".join([ + self.ffmpeg_path, + "-ss {}".format(start_sec), + "-t {}".format(duration_sec), + "-i {}".format(audio_file), + audio_fpath + ]) + + # run subprocess + self.log.debug("Executing: {}".format(cmd)) + openpype.api.run_subprocess( + cmd, shell=True, logger=self.log + ) + else: + audio_fpath = recycling_file.pop() + + if "audio" in (inst.data["families"] + [inst.data["family"]]): + # create empty representation attr + if "representations" not in inst.data: + inst.data["representations"] = [] + # add to representations + inst.data["representations"].append({ + "files": os.path.basename(audio_fpath), + "name": "wav", + "ext": "wav", + "stagingDir": os.path.dirname(audio_fpath), + "frameStart": 0, + "frameEnd": duration + }) + + elif "reviewAudio" in inst.data.keys(): + audio_attr = inst.data.get("audio") or [] + audio_attr.append({ + "filename": audio_fpath, + "offset": 0 + }) + inst.data["audio"] = audio_attr + + # add generated audio file to created files for recycling + if audio_fpath not in created_files: + created_files.append(audio_fpath) + + def get_audio_instances(self, context): + """Return only instances which are having audio in families + + Args: + context (pyblish.context): context of publisher + + Returns: + list: list of selected instances + """ + return [ + _i for _i in context + # filter only those with audio family + # and also with reviewAudio data key + if bool("audio" in ( + _i.data.get("families", []) + [_i.data["family"]]) + ) or _i.data.get("reviewAudio") + ] + + def get_audio_track_items(self, otio_timeline): + """Get all audio clips form OTIO audio tracks + + Args: + otio_timeline (otio.schema.timeline): timeline object + + Returns: + list: list of audio clip dictionaries + """ + output = [] + # go trough all audio tracks + for otio_track in otio_timeline.tracks: + if "Audio" not in otio_track.kind: + continue + self.log.debug("_" * 50) + playhead = 0 + for otio_clip in otio_track: + self.log.debug(otio_clip) + if isinstance(otio_clip, otio.schema.Gap): + playhead += otio_clip.source_range.duration.value + elif isinstance(otio_clip, otio.schema.Clip): + start = otio_clip.source_range.start_time.value + duration = otio_clip.source_range.duration.value + fps = otio_clip.source_range.start_time.rate + media_path = otio_clip.media_reference.target_url + input = { + "mediaPath": media_path, + "delayFrame": playhead, + "startFrame": start, + "durationFrame": duration, + "delayMilSec": int(float(playhead / fps) * 1000), + "startSec": float(start / fps), + "durationSec": float(duration / fps), + "fps": fps + } + if input not in output: + output.append(input) + self.log.debug("__ input: {}".format(input)) + playhead += otio_clip.source_range.duration.value + + return output + + def create_empty(self, inputs): + """Create an empty audio file used as duration placeholder + + Args: + inputs (list): list of audio clip dictionaries + + Returns: + dict: audio clip dictionary + """ + # temp file + empty_fpath = self.create_temp_file("empty") + + # get all end frames + end_secs = [(_i["delayFrame"] + _i["durationFrame"]) / _i["fps"] + for _i in inputs] + # get the max of end frames + max_duration_sec = max(end_secs) + + # create empty cmd + cmd = " ".join([ + self.ffmpeg_path, + "-f lavfi", + "-i anullsrc=channel_layout=stereo:sample_rate=48000", + "-t {}".format(max_duration_sec), + empty_fpath + ]) + + # generate empty with ffmpeg + # run subprocess + self.log.debug("Executing: {}".format(cmd)) + + openpype.api.run_subprocess( + cmd, shell=True, logger=self.log + ) + + # return dict with output + return { + "mediaPath": empty_fpath, + "delayMilSec": 0, + "startSec": 0.00, + "durationSec": max_duration_sec + } + + def create_cmd(self, inputs): + """Creating multiple input cmd string + + Args: + inputs (list): list of input dicts. Order mater. + + Returns: + str: the command body + + """ + # create cmd segments + _inputs = "" + _filters = "-filter_complex \"" + _channels = "" + for index, input in enumerate(inputs): + input_format = input.copy() + input_format.update({"i": index}) + _inputs += ( + "-ss {startSec} " + "-t {durationSec} " + "-i \"{mediaPath}\" " + ).format(**input_format) + + _filters += "[{i}]adelay={delayMilSec}:all=1[r{i}]; ".format( + **input_format) + _channels += "[r{}]".format(index) + + # merge all cmd segments together + cmd = _inputs + _filters + _channels + cmd += str( + "amix=inputs={inputs}:duration=first:" + "dropout_transition=1000,volume={inputs}[a]\" " + ).format(inputs=len(inputs)) + cmd += "-map \"[a]\" " + + return cmd + + def create_temp_file(self, name): + """Create temp wav file + + Args: + name (str): name to be used in file name + + Returns: + str: temp fpath + """ + return os.path.normpath( + tempfile.mktemp( + prefix="pyblish_tmp_{}_".format(name), + suffix=".wav" + ) + ) diff --git a/openpype/tools/standalonepublish/widgets/widget_family.py b/openpype/tools/standalonepublish/widgets/widget_family.py index 50335e3109..86663c8ee0 100644 --- a/openpype/tools/standalonepublish/widgets/widget_family.py +++ b/openpype/tools/standalonepublish/widgets/widget_family.py @@ -255,9 +255,9 @@ class FamilyWidget(QtWidgets.QWidget): defaults = list(plugin.defaults) # Replace - compare_regex = re.compile( - subset_name.replace(user_input_text, "(.+)") - ) + compare_regex = re.compile(re.sub( + user_input_text, "(.+)", subset_name, flags=re.IGNORECASE + )) subset_hints = set() if user_input_text: for _name in existing_subset_names: diff --git a/website/docs/admin_hosts_resolve.md b/website/docs/admin_hosts_resolve.md new file mode 100644 index 0000000000..d2e027205d --- /dev/null +++ b/website/docs/admin_hosts_resolve.md @@ -0,0 +1,103 @@ +--- +id: admin_hosts_resolve +title: DaVinci Resolve Setup +sidebar_label: DaVinci Resolve +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## Resolve requirements +Due to the way resolve handles python and python scripts there are a few steps required steps needed to be done on any machine that will be using OpenPype with resolve. + +### Installing Resolve's own python 3.6 interpreter. +Resolve uses a hardcoded method to look for the python executable path. All of tho following paths are defined automatically by Python msi installer. We are using Python 3.6.2. + + + + + +`%LOCALAPPDATA%\Programs\Python\Python36` + + + + +`/opt/Python/3.6/bin` + + + + +`~/Library/Python/3.6/bin` + + + + + +### Installing PySide2 into python 3.6 for correct gui work + +OpenPype is using its own window widget inside Resolve, for that reason PySide2 has to be installed into the python 3.6 (as explained above). + + + + + +paste to any terminal of your choice + +```bash +%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install PySide2 +``` + + + + +paste to any terminal of your choice + +```bash +/opt/Python/3.6/bin/python -m pip install PySide2 +``` + + + + +paste to any terminal of your choice + +```bash +~/Library/Python/3.6/bin/python -m pip install PySide2 +``` + + + + +
+ +### Set Resolve's Fusion settings for Python 3.6 interpereter + +
+ + +As it is shown in bellow picture you have to go to Fusion Tab and then in Fusion menu find Fusion Settings. Go to Fusion/Script and find Default Python Version and swith to Python 3.6 + +
+ +
+ +![Create menu](assets/resolve_fusion_tab.png) +![Create menu](assets/resolve_fusion_menu.png) +![Create menu](assets/resolve_fusion_script_settings.png) + +
+
\ No newline at end of file diff --git a/website/docs/artist_hosts_resolve.md b/website/docs/artist_hosts_resolve.md new file mode 100644 index 0000000000..be069eea79 --- /dev/null +++ b/website/docs/artist_hosts_resolve.md @@ -0,0 +1,216 @@ +--- +id: artist_hosts_resolve +title: DaVinci Resolve +sidebar_label: DaVinci Resolve +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +:::warning +Before you are able to start with OpenPype tools in DaVinci Resolve, installation of its own Python 3.6 interpreter and PySide 2 has to be done. Go to [Installation of python and pyside](#installation-of-python-and-pyside) link for more information +::: + + + +## OpenPype global tools + +- [Work Files](artist_tools.md#workfiles) +- [Create](artist_tools.md#creator) +- [Load](artist_tools.md#loader) +- [Manage (Inventory)](artist_tools.md#inventory) +- [Publish](artist_tools.md#publisher) + + +
+ +## Creating Shots from timeline items + +Before a clip can be published with [Publisher](artist_tools.md#publisher) timeline item has to be marked with OpenPype metadata markers. This way it is converted to a publishable subset. + +Lets do it step by step. + +
+ + +
+ +### Color clips before opening Create menu + + +Timeline video clips should be colored to `Chocolate` color for OpenPype to se it as selected for subset creation. + + +
+ +![Create menu](assets/resolve_select_clips_timeline_chocolate.png) + +
+
+ + +### Rename timeline track names + +
+ + +
+ +To be able to work with dynamic subset name, which is based on track names it is recommended to rename those tracks to what type of plates their clips represent. Commonly used ones are `main`, `review`, `fg01`, `fg02`, `bg`, `bg01`, etc. It is completely up to you but we recommend to always have at least `main` plate. For example if a clip is on track **element** and subset family is set to **plate** then the resulting subset name will be **plateElement** + +

+
+ +
+ +![Create menu](assets/resolve_creator_subset_name.png) +The name of the resulting *subset* can be seen in the **OpenPypeData** marker. +



+
+ +
+ +![Create menu](assets/resolve_remame_track_names.png) +Simple track setup where we are only using `main` and `review` track names. + +
+
+ +![Create menu](assets/resolve_create_vertical_rename_timeline.png) +An example of used track names. The yellow frame is highlighting vertically aligned clips - which are going to be renamed and grouped together under one asset (shot) name. The concept of vertical renaming will be explained later in [Vertical Synchronization of Subset Attributes](#vertical-synchronization-of-subset-attributes). + +
+
+ + +### Create menu... + +
+
+ +After all clips which are intended to be converted to publishable instances are colored to `Chocolate` color, you can open OpenPype menu. + +
+
+ +![Create menu](assets/resolve_menu_openpype.png) + +
+ +
+ +
+
+ +After the menu widget is opened (it can take while so be patient please :). + +Hit `Create ...` and then set **Use selection** to active and select the family to **Create Publishable Clips**. + +The Subset name can stay as it is, it is not going to be used because each clip will generate it's own name. + +
+
+ +![Create menu](assets/resolve_create_clips.png) + +
+
+ +
+
+ +The new windows that opens, let's you define various attributes for your future subsets and shots. + +Set Rename clips to active if you wish to use different names of shots in pipeline then the original clip names conformed from EDL/XML. + +**Count sequence from** - Start of the shot numbering if `#` is used in one of the keywords + +**Stepping number** - Sequential gaps in the numbering + +As you can see the in `{shot}` key within *Shot Template Keywords* section, you can use `#` symbol do define padding of the number in sequence and where it's going to be used. + +
+
+ +![Create menu](assets/resolve_create_renaming_clips.png) + +
+
+ +
+
+ +Notice the relationship of following sections. Keys from **Shot Template Keywords** sections will be used for formating of templates in **Shot Hierarchy And Rename Settings** section. + +**Shot parent hierarchy** will be forming parents of the asset (shot) *the hidden root for this is project folder*. So for example of this template we will get resulging string `shots/sq01` + +**Clip name template** in context of clip sitting on track name `main` in second position `mainsq01sh020`. This is due track key is hosting `{_track_}` which is inheriting name form timeline track name. Other allowed namespases are: +- `{_sequence_}`: timeline name +- `{_clip_}`: clip name +- `{_trackIndex_}`: position of track on timeline from bottom +- `{_clipIndex_}`: clip positon on timeline from left + +
+
+ +![Create menu](assets/resolve_create_template_filling.png) + +
+
+ +### Vertical synchronization of subset attributes + +In case you are only working with two tracks on timeline where `main` track is going to be used as plates for compositors and `review` track holds mp4 clips for offlines and web preview. **Enable vertical sync** can be deactivated. + +In multiple tracks scenario - as mentioned [here](#rename-timeline-track-names) - it is recommended to activate **Enable vertical sync** and define the hero (driving) track to *main*. This will ensure that all of the clips on corresponding to the same shots will have the same publishing parameters. + +

+ +
+ +
+ +![Create menu](assets/resolve_create_single_track_rename_hero_track.png) + +
+ +
+ +![Create menu](assets/resolve_create_vertical_rename_creator_ui.png) + +
+
+ + +## Publishing Shots + +
+
+ +Once all `Chocolate` colored clips have gone through the [creator](#rcreate-menu), have been colored to `Pink` color and a marker has been created for each of them, it means they have been successfully converted to publishable clips. Now we can run **Publisher** - it's button can be found in the OpenPype menu. + +

+
+ +
+
+ +![Create menu](assets/resolve_publish_instance_review_main.png) +Notice that the main track clips and review had been merged into one instance. And since it is main `hero` clip it is also holding all new shot metadata. For that reason it also create secon instance for each with `shot` family. This instance will create all shot hierarchy and pass frame range attributes to shot (asset). + +
+
+ +
+
+ +![Create menu](assets/resolve_publish_instance_other_plateSubsets.png) +Also notice how the subset name is formed form a *track* name and *subset family* from previous steps. + +Also important is to notice the asset name in *OpenPypeData* at marker - the name is the same for all **Vertically renamed** shots as they have been grouped together. Unfortunately Resolve is not allowing to rename the clips so the only way to know is to see it in marker's metadata. + +
+
+ +
diff --git a/website/docs/assets/resolve_clip_instances_pink_with_marker_in_middle.png b/website/docs/assets/resolve_clip_instances_pink_with_marker_in_middle.png new file mode 100644 index 0000000000..403f6e9433 Binary files /dev/null and b/website/docs/assets/resolve_clip_instances_pink_with_marker_in_middle.png differ diff --git a/website/docs/assets/resolve_create_audio_resolution.png b/website/docs/assets/resolve_create_audio_resolution.png new file mode 100644 index 0000000000..af22c7467e Binary files /dev/null and b/website/docs/assets/resolve_create_audio_resolution.png differ diff --git a/website/docs/assets/resolve_create_clips.png b/website/docs/assets/resolve_create_clips.png new file mode 100644 index 0000000000..b589bfb61e Binary files /dev/null and b/website/docs/assets/resolve_create_clips.png differ diff --git a/website/docs/assets/resolve_create_object_naming_convention.png b/website/docs/assets/resolve_create_object_naming_convention.png new file mode 100644 index 0000000000..13de366ef6 Binary files /dev/null and b/website/docs/assets/resolve_create_object_naming_convention.png differ diff --git a/website/docs/assets/resolve_create_renaming_clips.png b/website/docs/assets/resolve_create_renaming_clips.png new file mode 100644 index 0000000000..20c303e50a Binary files /dev/null and b/website/docs/assets/resolve_create_renaming_clips.png differ diff --git a/website/docs/assets/resolve_create_single_track_rename_hero_track.png b/website/docs/assets/resolve_create_single_track_rename_hero_track.png new file mode 100644 index 0000000000..5f68258d1d Binary files /dev/null and b/website/docs/assets/resolve_create_single_track_rename_hero_track.png differ diff --git a/website/docs/assets/resolve_create_subset_name_review_track.png b/website/docs/assets/resolve_create_subset_name_review_track.png new file mode 100644 index 0000000000..4efbff8409 Binary files /dev/null and b/website/docs/assets/resolve_create_subset_name_review_track.png differ diff --git a/website/docs/assets/resolve_create_template_filling.png b/website/docs/assets/resolve_create_template_filling.png new file mode 100644 index 0000000000..faa8c51ee3 Binary files /dev/null and b/website/docs/assets/resolve_create_template_filling.png differ diff --git a/website/docs/assets/resolve_create_vertical_rename_creator_ui.png b/website/docs/assets/resolve_create_vertical_rename_creator_ui.png new file mode 100644 index 0000000000..e163844993 Binary files /dev/null and b/website/docs/assets/resolve_create_vertical_rename_creator_ui.png differ diff --git a/website/docs/assets/resolve_create_vertical_rename_timeline.png b/website/docs/assets/resolve_create_vertical_rename_timeline.png new file mode 100644 index 0000000000..3e57db4119 Binary files /dev/null and b/website/docs/assets/resolve_create_vertical_rename_timeline.png differ diff --git a/website/docs/assets/resolve_creator_clip_marker_do_not_change.png b/website/docs/assets/resolve_creator_clip_marker_do_not_change.png new file mode 100644 index 0000000000..11cc5c4618 Binary files /dev/null and b/website/docs/assets/resolve_creator_clip_marker_do_not_change.png differ diff --git a/website/docs/assets/resolve_creator_framestart_handles.png b/website/docs/assets/resolve_creator_framestart_handles.png new file mode 100644 index 0000000000..65328fe041 Binary files /dev/null and b/website/docs/assets/resolve_creator_framestart_handles.png differ diff --git a/website/docs/assets/resolve_creator_subset_name.png b/website/docs/assets/resolve_creator_subset_name.png new file mode 100644 index 0000000000..4a42c5af2c Binary files /dev/null and b/website/docs/assets/resolve_creator_subset_name.png differ diff --git a/website/docs/assets/resolve_fusion_menu.png b/website/docs/assets/resolve_fusion_menu.png new file mode 100644 index 0000000000..ae1939690c Binary files /dev/null and b/website/docs/assets/resolve_fusion_menu.png differ diff --git a/website/docs/assets/resolve_fusion_script_settings.png b/website/docs/assets/resolve_fusion_script_settings.png new file mode 100644 index 0000000000..6d903b3ef4 Binary files /dev/null and b/website/docs/assets/resolve_fusion_script_settings.png differ diff --git a/website/docs/assets/resolve_fusion_tab.png b/website/docs/assets/resolve_fusion_tab.png new file mode 100644 index 0000000000..657d53cb16 Binary files /dev/null and b/website/docs/assets/resolve_fusion_tab.png differ diff --git a/website/docs/assets/resolve_menu_openpype.png b/website/docs/assets/resolve_menu_openpype.png new file mode 100644 index 0000000000..9812858072 Binary files /dev/null and b/website/docs/assets/resolve_menu_openpype.png differ diff --git a/website/docs/assets/resolve_menu_openpype_opened.png b/website/docs/assets/resolve_menu_openpype_opened.png new file mode 100644 index 0000000000..9b0e35569b Binary files /dev/null and b/website/docs/assets/resolve_menu_openpype_opened.png differ diff --git a/website/docs/assets/resolve_publish_instance_other_plateSubsets.png b/website/docs/assets/resolve_publish_instance_other_plateSubsets.png new file mode 100644 index 0000000000..fd5f857da5 Binary files /dev/null and b/website/docs/assets/resolve_publish_instance_other_plateSubsets.png differ diff --git a/website/docs/assets/resolve_publish_instance_review_main.png b/website/docs/assets/resolve_publish_instance_review_main.png new file mode 100644 index 0000000000..0cf5ed3b99 Binary files /dev/null and b/website/docs/assets/resolve_publish_instance_review_main.png differ diff --git a/website/docs/assets/resolve_remame_track_names.png b/website/docs/assets/resolve_remame_track_names.png new file mode 100644 index 0000000000..01174ea644 Binary files /dev/null and b/website/docs/assets/resolve_remame_track_names.png differ diff --git a/website/docs/assets/resolve_select_clips_timeline_chocolate.png b/website/docs/assets/resolve_select_clips_timeline_chocolate.png new file mode 100644 index 0000000000..b4a682e83a Binary files /dev/null and b/website/docs/assets/resolve_select_clips_timeline_chocolate.png differ diff --git a/website/sidebars.js b/website/sidebars.js index aa9cb012de..ffe51075fe 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -22,6 +22,7 @@ module.exports = { "artist_hosts_blender", "artist_hosts_harmony", "artist_hosts_aftereffects", + "artist_hosts_resolve", "artist_hosts_photoshop", "artist_hosts_tvpaint", "artist_hosts_unreal", @@ -83,7 +84,9 @@ module.exports = { label: "Integrations", items: [ "admin_hosts_blender", - "admin_hosts_maya" + "admin_hosts_maya", + "admin_hosts_resolve" + ], }, {