From 3e53a45bfadc50ae5dd2167f714ebba657edeec2 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 11 Jan 2022 12:20:27 +0100 Subject: [PATCH 01/11] Flame: collect timeline ocio plugin --- .../plugins/publish/precollect_workfile.py | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 openpype/hosts/flame/plugins/publish/precollect_workfile.py diff --git a/openpype/hosts/flame/plugins/publish/precollect_workfile.py b/openpype/hosts/flame/plugins/publish/precollect_workfile.py new file mode 100644 index 0000000000..0533d01e00 --- /dev/null +++ b/openpype/hosts/flame/plugins/publish/precollect_workfile.py @@ -0,0 +1,26 @@ +import pyblish.api +import openpype.hosts.flame.api as opfapi +from openpype.hosts.flame.otio import flame_export + + +class PrecollecTimelineOCIO(pyblish.api.ContextPlugin): + """Inject the current working context into publish context""" + + label = "Precollect Timeline OTIO" + order = pyblish.api.CollectorOrder - 0.5 + + def process(self, context): + project = opfapi.get_current_project() + sequence = opfapi.get_current_sequence(opfapi.CTX.selection) + + # adding otio timeline to context + otio_timeline = flame_export.create_otio_timeline(sequence) + + # update context with main project attributes + context.data.update({ + "otioTimeline": otio_timeline, + "currentFile": "Flame/{}/{}".format( + project.name, sequence.name + ), + "fps": float(str(sequence.frame_rate)[:-4]) + }) From 104b57120c64d3095c492848adca11a47a958749 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 11 Jan 2022 12:32:35 +0100 Subject: [PATCH 02/11] Flame: collect instance in otio timeline plugin --- .../flame/plugins/publish/precollect_workfile.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/openpype/hosts/flame/plugins/publish/precollect_workfile.py b/openpype/hosts/flame/plugins/publish/precollect_workfile.py index 0533d01e00..3497d19d15 100644 --- a/openpype/hosts/flame/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/flame/plugins/publish/precollect_workfile.py @@ -1,4 +1,5 @@ import pyblish.api +import avalon.api as avalon import openpype.hosts.flame.api as opfapi from openpype.hosts.flame.otio import flame_export @@ -10,12 +11,25 @@ class PrecollecTimelineOCIO(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder - 0.5 def process(self, context): + asset = avalon.Session["AVALON_ASSET"] + subset = "otioTimeline" project = opfapi.get_current_project() sequence = opfapi.get_current_sequence(opfapi.CTX.selection) # adding otio timeline to context otio_timeline = flame_export.create_otio_timeline(sequence) + instance_data = { + "name": "{}_{}".format(asset, subset), + "asset": asset, + "subset": "{}{}".format(asset, subset.capitalize()), + "family": "workfile" + } + + # create instance with workfile + instance = context.create_instance(**instance_data) + self.log.info("Creating instance: {}".format(instance)) + # update context with main project attributes context.data.update({ "otioTimeline": otio_timeline, From 9e70f67f4716d8af3956af3486ffc47256b9db96 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 11 Jan 2022 12:32:53 +0100 Subject: [PATCH 03/11] Flame: exctracting otio file --- .../plugins/publish/extract_otio_file.py | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 openpype/hosts/flame/plugins/publish/extract_otio_file.py diff --git a/openpype/hosts/flame/plugins/publish/extract_otio_file.py b/openpype/hosts/flame/plugins/publish/extract_otio_file.py new file mode 100644 index 0000000000..7dd75974fc --- /dev/null +++ b/openpype/hosts/flame/plugins/publish/extract_otio_file.py @@ -0,0 +1,43 @@ +import os +import pyblish.api +import openpype.api +import opentimelineio as otio + + +class ExtractOTIOFile(openpype.api.Extractor): + """ + Extractor export OTIO file + """ + + label = "Extract OTIO file" + order = pyblish.api.ExtractorOrder - 0.45 + families = ["workfile"] + hosts = ["flame"] + + def process(self, instance): + # create representation data + if "representations" not in instance.data: + instance.data["representations"] = [] + + name = instance.data["name"] + staging_dir = self.staging_dir(instance) + + otio_timeline = instance.context.data["otioTimeline"] + # create otio timeline representation + otio_file_name = name + ".otio" + otio_file_path = os.path.join(staging_dir, otio_file_name) + + # export otio file to temp dir + otio.adapters.write_to_file(otio_timeline, otio_file_path) + + representation_otio = { + 'name': "otio", + 'ext': "otio", + 'files': otio_file_name, + "stagingDir": staging_dir, + } + + instance.data["representations"].append(representation_otio) + + self.log.info("Added OTIO file representation: {}".format( + representation_otio)) From 32ceb9e9a98fa662bab525be4b8a007f4e8624f6 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 11 Jan 2022 15:24:09 +0100 Subject: [PATCH 04/11] flame: enhancing code of api lib --- openpype/hosts/flame/api/lib.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/openpype/hosts/flame/api/lib.py b/openpype/hosts/flame/api/lib.py index 2cc9fee173..787ecf4569 100644 --- a/openpype/hosts/flame/api/lib.py +++ b/openpype/hosts/flame/api/lib.py @@ -448,6 +448,8 @@ def get_sequence_segments(sequence, selected=False): for segment in track.segments: if segment.name.get_value() == "": continue + if segment.hidden: + continue if ( selected is True and segment.selected.get_value() is not True @@ -522,7 +524,7 @@ def _get_shot_tokens_values(clip, tokens): def get_segment_attributes(segment): - if str(segment.name)[1:-1] == "": + if segment.name.get_value() == "": return None # Add timeline segment to tree From 02af9b69a195dca87a109fbfd28880372f4feaf4 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 11 Jan 2022 15:25:16 +0100 Subject: [PATCH 05/11] flame: adding flameSequnce attribute to publishing context --- openpype/hosts/flame/plugins/publish/precollect_workfile.py | 1 + 1 file changed, 1 insertion(+) diff --git a/openpype/hosts/flame/plugins/publish/precollect_workfile.py b/openpype/hosts/flame/plugins/publish/precollect_workfile.py index 3497d19d15..3d2ce97755 100644 --- a/openpype/hosts/flame/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/flame/plugins/publish/precollect_workfile.py @@ -32,6 +32,7 @@ class PrecollecTimelineOCIO(pyblish.api.ContextPlugin): # update context with main project attributes context.data.update({ + "flameSequence": sequence, "otioTimeline": otio_timeline, "currentFile": "Flame/{}/{}".format( project.name, sequence.name From 281ae76794f2c04ba9081c402b8632bb37b3cafc Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 11 Jan 2022 15:55:18 +0100 Subject: [PATCH 06/11] flame: adding functions to lib and api --- openpype/hosts/flame/api/__init__.py | 10 ++- openpype/hosts/flame/api/lib.py | 114 +++++++++++++++++++++++++++ 2 files changed, 123 insertions(+), 1 deletion(-) diff --git a/openpype/hosts/flame/api/__init__.py b/openpype/hosts/flame/api/__init__.py index dc47488dc1..308682b884 100644 --- a/openpype/hosts/flame/api/__init__.py +++ b/openpype/hosts/flame/api/__init__.py @@ -23,7 +23,11 @@ from .lib import ( get_sequence_segments, maintained_segment_selection, reset_segment_selection, - get_segment_attributes + get_segment_attributes, + get_clips_in_reels, + get_reformated_path, + get_frame_from_path, + get_padding_from_path ) from .utils import ( setup @@ -80,6 +84,10 @@ __all__ = [ "maintained_segment_selection", "reset_segment_selection", "get_segment_attributes", + "get_clips_in_reels", + "get_reformated_path", + "get_frame_from_path", + "get_padding_from_path", # pipeline "install", diff --git a/openpype/hosts/flame/api/lib.py b/openpype/hosts/flame/api/lib.py index 787ecf4569..4404f7a612 100644 --- a/openpype/hosts/flame/api/lib.py +++ b/openpype/hosts/flame/api/lib.py @@ -537,6 +537,12 @@ def get_segment_attributes(segment): "PySegment": segment } + # head and tail with forward compatibility + if segment.head: + clip_data["segment_head"] = int(segment.head) + if segment.tail: + clip_data["segment_tail"] = int(segment.tail) + # add all available shot tokens shot_tokens = _get_shot_tokens_values(segment, [ "", "", "", "", "", @@ -564,3 +570,111 @@ def get_segment_attributes(segment): clip_data["segment_timecodes"] = segment_attrs_data return clip_data + + +def get_clips_in_reels(project): + output_clips = [] + project_desktop = project.current_workspace.desktop + + for reel_group in project_desktop.reel_groups: + for reel in reel_group.reels: + for clip in reel.clips: + clip_data = { + "PyClip": clip, + "fps": float(str(clip.frame_rate)[:-4]) + } + + attrs = [ + "name", "width", "height", + "ratio", "sample_rate", "bit_depth" + ] + + for attr in attrs: + val = getattr(clip, attr) + clip_data[attr] = val + + version = clip.versions[-1] + track = version.tracks[-1] + for segment in track.segments: + segment_data = get_segment_attributes(segment) + clip_data.update(segment_data) + + output_clips.append(clip_data) + + return output_clips + + +def get_reformated_path(path, padded=True): + """ + Return fixed python expression path + + Args: + path (str): path url or simple file name + + Returns: + type: string with reformated path + + Example: + get_reformated_path("plate.1001.exr") > plate.%04d.exr + + """ + padding = get_padding_from_path(path) + found = get_frame_from_path(path) + + if not found: + log.info("Path is not sequence: {}".format(path)) + return path + + if padded: + path = path.replace(found, "%0{}d".format(padding)) + else: + path = path.replace(found, "%d") + + return path + + +def get_padding_from_path(path): + """ + Return padding number from Flame path style + + Args: + path (str): path url or simple file name + + Returns: + int: padding number + + Example: + get_padding_from_path("plate.0001.exr") > 4 + + """ + found = get_frame_from_path(path) + + if found: + return len(found) + else: + return None + + +def get_frame_from_path(path): + """ + Return sequence number from Flame path style + + Args: + path (str): path url or simple file name + + Returns: + int: sequence frame number + + Example: + def get_frame_from_path(path): + ("plate.0001.exr") > 0001 + + """ + frame_pattern = re.compile(r"[._](\d+)[.]") + + found = re.findall(frame_pattern, path) + + if found: + return found.pop() + else: + return None From 50e1cbf31e38e7923aceba97da4d1d37eee7c47c Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 11 Jan 2022 15:55:40 +0100 Subject: [PATCH 07/11] flame: adding flameProject to publishing context attributes --- openpype/hosts/flame/plugins/publish/precollect_workfile.py | 1 + 1 file changed, 1 insertion(+) diff --git a/openpype/hosts/flame/plugins/publish/precollect_workfile.py b/openpype/hosts/flame/plugins/publish/precollect_workfile.py index 3d2ce97755..e7383ddec8 100644 --- a/openpype/hosts/flame/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/flame/plugins/publish/precollect_workfile.py @@ -32,6 +32,7 @@ class PrecollecTimelineOCIO(pyblish.api.ContextPlugin): # update context with main project attributes context.data.update({ + "flameProject": project, "flameSequence": sequence, "otioTimeline": otio_timeline, "currentFile": "Flame/{}/{}".format( From 460048ef4c1a5b6c90ef8161f6394acb85a95d0c Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 11 Jan 2022 15:58:12 +0100 Subject: [PATCH 08/11] flame: collect instances wip --- .../plugins/publish/precollect_instances.py | 251 ++++++++++++++++++ 1 file changed, 251 insertions(+) create mode 100644 openpype/hosts/flame/plugins/publish/precollect_instances.py diff --git a/openpype/hosts/flame/plugins/publish/precollect_instances.py b/openpype/hosts/flame/plugins/publish/precollect_instances.py new file mode 100644 index 0000000000..5f3b71eba4 --- /dev/null +++ b/openpype/hosts/flame/plugins/publish/precollect_instances.py @@ -0,0 +1,251 @@ +import pyblish +# import openpype +import openpype.hosts.flame.api as opfapi + +# # developer reload modules +from pprint import pformat + + +class PrecollectInstances(pyblish.api.ContextPlugin): + """Collect all Track items selection.""" + + order = pyblish.api.CollectorOrder - 0.49 + label = "Precollect Instances" + hosts = ["flame"] + + audio_track_items = [] + + def process(self, context): + project = context.data["flameProject"] + sequence = context.data["flameSequence"] + self.otio_timeline = context.data["otioTimeline"] + self.clips_in_reels = opfapi.get_clips_in_reels(project) + + # return only actually selected and enabled segments + selected_segments = opfapi.get_sequence_segments(sequence, True) + + # only return enabled segments + if not selected_segments: + selected_segments = opfapi.get_sequence_segments( + sequence) + + self.log.info( + "Processing following segments: {}".format( + [s.name for s in selected_segments])) + + # process all sellected timeline track items + for segment in selected_segments: + + clip_data = opfapi.get_segment_attributes(segment) + clip_name = clip_data["segment_name"] + self.log.debug("clip_name: {}".format(clip_name)) + + # get openpype tag data + marker_data = opfapi.get_segment_data_marker(segment) + self.log.debug("__ marker_data: {}".format(pformat(marker_data))) + + if not marker_data: + continue + + if marker_data.get("id") != "pyblish.avalon.instance": + continue + + file_path = clip_data["fpath"] + first_frame = opfapi.get_frame_from_path(file_path) or 0 + + # calculate head and tail with forward compatibility + head = clip_data.get("segment_head") + tail = clip_data.get("segment_tail") + + if not head: + head = int(clip_data["source_in"]) - int(first_frame) + if not tail: + tail = int( + clip_data["source_duration"] - ( + head + clip_data["record_duration"] + ) + ) + + # solve handles length + marker_data["handleStart"] = min( + marker_data["handleStart"], head) + marker_data["handleEnd"] = min( + marker_data["handleEnd"], tail) + + # add audio to families + with_audio = False + if marker_data.pop("audio"): + with_audio = True + + # add tag data to instance data + data = { + k: v for k, v in marker_data.items() + if k not in ("id", "applieswhole", "label") + } + + asset = marker_data["asset"] + subset = marker_data["subset"] + + # insert family into families + family = marker_data["family"] + families = [str(f) for f in marker_data["families"]] + families.insert(0, str(family)) + + # form label + label = asset + if asset != clip_name: + label += " ({})".format(clip_name) + label += " {}".format(subset) + label += " {}".format("[" + ", ".join(families) + "]") + + data.update({ + "name": "{}_{}".format(asset, subset), + "label": label, + "asset": asset, + "item": segment, + "families": families, + "publish": marker_data["publish"], + "fps": context.data["fps"], + }) + + # # otio clip data + # otio_data = self.get_otio_clip_instance_data(segment) or {} + # self.log.debug("__ otio_data: {}".format(pformat(otio_data))) + # data.update(otio_data) + # self.log.debug("__ data: {}".format(pformat(data))) + + # # add resolution + # self.get_resolution_to_data(data, context) + + # create instance + instance = context.create_instance(**data) + + # add colorspace data + instance.data.update({ + "versionData": { + "colorspace": clip_data["colour_space"], + } + }) + + # create shot instance for shot attributes create/update + self.create_shot_instance(context, clip_name, **data) + + self.log.info("Creating instance: {}".format(instance)) + self.log.info( + "_ instance.data: {}".format(pformat(instance.data))) + + if not with_audio: + continue + + # add audioReview attribute to plate instance data + # if reviewTrack is on + if marker_data.get("reviewTrack") is not None: + instance.data["reviewAudio"] = True + + def get_resolution_to_data(self, data, context): + assert data.get("otioClip"), "Missing `otioClip` data" + + # solve source resolution option + if data.get("sourceResolution", None): + otio_clip_metadata = data[ + "otioClip"].media_reference.metadata + data.update({ + "resolutionWidth": otio_clip_metadata[ + "openpype.source.width"], + "resolutionHeight": otio_clip_metadata[ + "openpype.source.height"], + "pixelAspect": otio_clip_metadata[ + "openpype.source.pixelAspect"] + }) + else: + otio_tl_metadata = context.data["otioTimeline"].metadata + data.update({ + "resolutionWidth": otio_tl_metadata["openpype.timeline.width"], + "resolutionHeight": otio_tl_metadata[ + "openpype.timeline.height"], + "pixelAspect": otio_tl_metadata[ + "openpype.timeline.pixelAspect"] + }) + + def create_shot_instance(self, context, clip_name, **data): + master_layer = data.get("heroTrack") + hierarchy_data = data.get("hierarchyData") + asset = data.get("asset") + + if not master_layer: + return + + if not hierarchy_data: + return + + asset = data["asset"] + subset = "shotMain" + + # insert family into families + family = "shot" + + # form label + label = asset + if asset != clip_name: + label += " ({}) ".format(clip_name) + label += " {}".format(subset) + label += " [{}]".format(family) + + data.update({ + "name": "{}_{}".format(asset, subset), + "label": label, + "subset": subset, + "asset": asset, + "family": family, + "families": [] + }) + + instance = context.create_instance(**data) + self.log.info("Creating instance: {}".format(instance)) + self.log.debug( + "_ instance.data: {}".format(pformat(instance.data))) + + # def get_otio_clip_instance_data(self, segment): + # """ + # Return otio objects for timeline, track and clip + + # Args: + # timeline_item_data (dict): timeline_item_data from list returned by + # resolve.get_current_timeline_items() + # otio_timeline (otio.schema.Timeline): otio object + + # Returns: + # dict: otio clip object + + # """ + # ti_track_name = segment.parent().name() + # timeline_range = self.create_otio_time_range_from_timeline_item_data( + # segment) + # for otio_clip in self.otio_timeline.each_clip(): + # track_name = otio_clip.parent().name + # parent_range = otio_clip.range_in_parent() + # if ti_track_name not in track_name: + # continue + # if otio_clip.name not in segment.name(): + # continue + # if openpype.lib.is_overlapping_otio_ranges( + # parent_range, timeline_range, strict=True): + + # # add pypedata marker to otio_clip metadata + # for marker in otio_clip.markers: + # if phiero.pype_tag_name in marker.name: + # otio_clip.metadata.update(marker.metadata) + # return {"otioClip": otio_clip} + + # return None + + # @staticmethod + # def create_otio_time_range_from_timeline_item_data(segment): + # speed = segment.playbackSpeed() + # timeline = phiero.get_current_sequence() + # frame_start = int(segment.timelineIn()) + # frame_duration = int(segment.sourceDuration() / speed) + # fps = timeline.framerate().toFloat() + + # return hiero_export.create_otio_time_range( + # frame_start, frame_duration, fps) From 4fa7eb25ffabc6f83a7af09400f42d3e61addbb3 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 11 Jan 2022 16:32:00 +0100 Subject: [PATCH 09/11] flame: fix selection --- openpype/hosts/flame/api/lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openpype/hosts/flame/api/lib.py b/openpype/hosts/flame/api/lib.py index 4404f7a612..a409e731e3 100644 --- a/openpype/hosts/flame/api/lib.py +++ b/openpype/hosts/flame/api/lib.py @@ -448,7 +448,7 @@ def get_sequence_segments(sequence, selected=False): for segment in track.segments: if segment.name.get_value() == "": continue - if segment.hidden: + if segment.hidden.get_value() is True: continue if ( selected is True From a326ab429040c799ac6b45683b326aba65da3fc4 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 11 Jan 2022 17:01:39 +0100 Subject: [PATCH 10/11] flame: deactivating test plugin --- openpype/hosts/flame/plugins/publish/collect_test_selection.py | 1 + 1 file changed, 1 insertion(+) diff --git a/openpype/hosts/flame/plugins/publish/collect_test_selection.py b/openpype/hosts/flame/plugins/publish/collect_test_selection.py index 0c75b3204f..84fd4fafe8 100644 --- a/openpype/hosts/flame/plugins/publish/collect_test_selection.py +++ b/openpype/hosts/flame/plugins/publish/collect_test_selection.py @@ -14,6 +14,7 @@ class CollectTestSelection(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder label = "test selection" hosts = ["flame"] + active = False def process(self, context): self.log.info( From 1669f1782b08c2906dc2c0a705e66bda8031e73c Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 11 Jan 2022 17:03:03 +0100 Subject: [PATCH 11/11] flame: adding maintained selection to publish plugins --- .../plugins/publish/precollect_instances.py | 198 ++++++++---------- .../plugins/publish/precollect_workfile.py | 3 +- 2 files changed, 95 insertions(+), 106 deletions(-) diff --git a/openpype/hosts/flame/plugins/publish/precollect_instances.py b/openpype/hosts/flame/plugins/publish/precollect_instances.py index 5f3b71eba4..e302bc42a4 100644 --- a/openpype/hosts/flame/plugins/publish/precollect_instances.py +++ b/openpype/hosts/flame/plugins/publish/precollect_instances.py @@ -21,126 +21,114 @@ class PrecollectInstances(pyblish.api.ContextPlugin): self.otio_timeline = context.data["otioTimeline"] self.clips_in_reels = opfapi.get_clips_in_reels(project) - # return only actually selected and enabled segments - selected_segments = opfapi.get_sequence_segments(sequence, True) + # process all sellected + with opfapi.maintained_segment_selection(sequence) as selected_segments: + for segment in selected_segments: + clip_data = opfapi.get_segment_attributes(segment) + clip_name = clip_data["segment_name"] + self.log.debug("clip_name: {}".format(clip_name)) - # only return enabled segments - if not selected_segments: - selected_segments = opfapi.get_sequence_segments( - sequence) + # get openpype tag data + marker_data = opfapi.get_segment_data_marker(segment) + self.log.debug("__ marker_data: {}".format(pformat(marker_data))) - self.log.info( - "Processing following segments: {}".format( - [s.name for s in selected_segments])) + if not marker_data: + continue - # process all sellected timeline track items - for segment in selected_segments: + if marker_data.get("id") != "pyblish.avalon.instance": + continue - clip_data = opfapi.get_segment_attributes(segment) - clip_name = clip_data["segment_name"] - self.log.debug("clip_name: {}".format(clip_name)) + file_path = clip_data["fpath"] + first_frame = opfapi.get_frame_from_path(file_path) or 0 - # get openpype tag data - marker_data = opfapi.get_segment_data_marker(segment) - self.log.debug("__ marker_data: {}".format(pformat(marker_data))) + # calculate head and tail with forward compatibility + head = clip_data.get("segment_head") + tail = clip_data.get("segment_tail") - if not marker_data: - continue - - if marker_data.get("id") != "pyblish.avalon.instance": - continue - - file_path = clip_data["fpath"] - first_frame = opfapi.get_frame_from_path(file_path) or 0 - - # calculate head and tail with forward compatibility - head = clip_data.get("segment_head") - tail = clip_data.get("segment_tail") - - if not head: - head = int(clip_data["source_in"]) - int(first_frame) - if not tail: - tail = int( - clip_data["source_duration"] - ( - head + clip_data["record_duration"] + if not head: + head = int(clip_data["source_in"]) - int(first_frame) + if not tail: + tail = int( + clip_data["source_duration"] - ( + head + clip_data["record_duration"] + ) ) - ) - # solve handles length - marker_data["handleStart"] = min( - marker_data["handleStart"], head) - marker_data["handleEnd"] = min( - marker_data["handleEnd"], tail) + # solve handles length + marker_data["handleStart"] = min( + marker_data["handleStart"], head) + marker_data["handleEnd"] = min( + marker_data["handleEnd"], tail) - # add audio to families - with_audio = False - if marker_data.pop("audio"): - with_audio = True + # add audio to families + with_audio = False + if marker_data.pop("audio"): + with_audio = True - # add tag data to instance data - data = { - k: v for k, v in marker_data.items() - if k not in ("id", "applieswhole", "label") - } - - asset = marker_data["asset"] - subset = marker_data["subset"] - - # insert family into families - family = marker_data["family"] - families = [str(f) for f in marker_data["families"]] - families.insert(0, str(family)) - - # form label - label = asset - if asset != clip_name: - label += " ({})".format(clip_name) - label += " {}".format(subset) - label += " {}".format("[" + ", ".join(families) + "]") - - data.update({ - "name": "{}_{}".format(asset, subset), - "label": label, - "asset": asset, - "item": segment, - "families": families, - "publish": marker_data["publish"], - "fps": context.data["fps"], - }) - - # # otio clip data - # otio_data = self.get_otio_clip_instance_data(segment) or {} - # self.log.debug("__ otio_data: {}".format(pformat(otio_data))) - # data.update(otio_data) - # self.log.debug("__ data: {}".format(pformat(data))) - - # # add resolution - # self.get_resolution_to_data(data, context) - - # create instance - instance = context.create_instance(**data) - - # add colorspace data - instance.data.update({ - "versionData": { - "colorspace": clip_data["colour_space"], + # add tag data to instance data + data = { + k: v for k, v in marker_data.items() + if k not in ("id", "applieswhole", "label") } - }) - # create shot instance for shot attributes create/update - self.create_shot_instance(context, clip_name, **data) + asset = marker_data["asset"] + subset = marker_data["subset"] - self.log.info("Creating instance: {}".format(instance)) - self.log.info( - "_ instance.data: {}".format(pformat(instance.data))) + # insert family into families + family = marker_data["family"] + families = [str(f) for f in marker_data["families"]] + families.insert(0, str(family)) - if not with_audio: - continue + # form label + label = asset + if asset != clip_name: + label += " ({})".format(clip_name) + label += " {}".format(subset) + label += " {}".format("[" + ", ".join(families) + "]") - # add audioReview attribute to plate instance data - # if reviewTrack is on - if marker_data.get("reviewTrack") is not None: - instance.data["reviewAudio"] = True + data.update({ + "name": "{}_{}".format(asset, subset), + "label": label, + "asset": asset, + "item": segment, + "families": families, + "publish": marker_data["publish"], + "fps": context.data["fps"], + }) + + # # otio clip data + # otio_data = self.get_otio_clip_instance_data(segment) or {} + # self.log.debug("__ otio_data: {}".format(pformat(otio_data))) + # data.update(otio_data) + # self.log.debug("__ data: {}".format(pformat(data))) + + # # add resolution + # self.get_resolution_to_data(data, context) + + # create instance + instance = context.create_instance(**data) + + # add colorspace data + instance.data.update({ + "versionData": { + "colorspace": clip_data["colour_space"], + } + }) + + # create shot instance for shot attributes create/update + self.create_shot_instance(context, clip_name, **data) + + self.log.info("Creating instance: {}".format(instance)) + self.log.info( + "_ instance.data: {}".format(pformat(instance.data))) + + if not with_audio: + continue + + # add audioReview attribute to plate instance data + # if reviewTrack is on + if marker_data.get("reviewTrack") is not None: + instance.data["reviewAudio"] = True def get_resolution_to_data(self, data, context): assert data.get("otioClip"), "Missing `otioClip` data" diff --git a/openpype/hosts/flame/plugins/publish/precollect_workfile.py b/openpype/hosts/flame/plugins/publish/precollect_workfile.py index e7383ddec8..aff85e22e6 100644 --- a/openpype/hosts/flame/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/flame/plugins/publish/precollect_workfile.py @@ -17,7 +17,8 @@ class PrecollecTimelineOCIO(pyblish.api.ContextPlugin): sequence = opfapi.get_current_sequence(opfapi.CTX.selection) # adding otio timeline to context - otio_timeline = flame_export.create_otio_timeline(sequence) + with opfapi.maintained_segment_selection(sequence): + otio_timeline = flame_export.create_otio_timeline(sequence) instance_data = { "name": "{}_{}".format(asset, subset),