From 29de28cb5371ced19fdf35368ce8e4a9f4f8b074 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 15 Jul 2022 17:57:05 +0200 Subject: [PATCH] trayp: editorial publishing wip --- openpype/hosts/traypublisher/api/editorial.py | 1 + .../plugins/create/create_editorial.py | 49 +++- .../plugins/publish/collect_clip_instances.py | 32 +++ .../publish/collect_editorial_instances.py | 8 +- .../publish/collect_editorial_resources.py | 271 ++++++++++++++++++ .../plugins/publish/collect_shot_instances.py | 163 +++++++++++ .../publish/extract_trim_video_audio.py | 2 +- .../plugins/publish/validate_asset_docs.py | 4 + 8 files changed, 516 insertions(+), 14 deletions(-) create mode 100644 openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py create mode 100644 openpype/hosts/traypublisher/plugins/publish/collect_editorial_resources.py create mode 100644 openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py rename openpype/{hosts/standalonepublisher => }/plugins/publish/extract_trim_video_audio.py (98%) diff --git a/openpype/hosts/traypublisher/api/editorial.py b/openpype/hosts/traypublisher/api/editorial.py index 713f1b5c6c..948e05ec61 100644 --- a/openpype/hosts/traypublisher/api/editorial.py +++ b/openpype/hosts/traypublisher/api/editorial.py @@ -4,6 +4,7 @@ from copy import deepcopy from openpype.client import get_asset_by_id from openpype.pipeline.create import CreatorError + class ShotMetadataSover: """Collecting hierarchy context from `parents` and `hierarchy` data present in `clip` family instances coming from the request json data file diff --git a/openpype/hosts/traypublisher/plugins/create/create_editorial.py b/openpype/hosts/traypublisher/plugins/create/create_editorial.py index 8f7101385c..b87253a705 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_editorial.py +++ b/openpype/hosts/traypublisher/plugins/create/create_editorial.py @@ -232,14 +232,10 @@ or updating already created. Publishing will create OTIO file. def _create_otio_instance(self, subset_name, data, pre_create_data): # get path of sequence file_path_data = pre_create_data["sequence_filepath_data"] + media_path_data = pre_create_data["media_filepaths_data"] - if len(file_path_data["filenames"]) == 0: - raise FileExistsError("File path was not added") - - file_path = os.path.join( - file_path_data["directory"], file_path_data["filenames"][0]) - - self.log.info(f"file_path: {file_path}") + file_path = self._get_path_from_file_data(file_path_data) + media_path = self._get_path_from_file_data(media_path_data) # get editorial sequence file into otio timeline object extension = os.path.splitext(file_path)[1] @@ -256,6 +252,7 @@ or updating already created. Publishing will create OTIO file. # Pass precreate data to creator attributes data.update({ "sequenceFilePath": file_path, + "editorialSourcePath": media_path, "otioTimeline": otio.adapters.write_to_string(otio_timeline) }) @@ -263,6 +260,18 @@ or updating already created. Publishing will create OTIO file. return otio_timeline + def _get_path_from_file_data(self, file_path_data): + # TODO: just temporarly solving only one media file + if isinstance(file_path_data, list): + file_path_data = file_path_data.pop() + + if len(file_path_data["filenames"]) == 0: + raise FileExistsError( + f"File path was not added: {file_path_data}") + + return os.path.join( + file_path_data["directory"], file_path_data["filenames"][0]) + def _get_clip_instances( self, otio_timeline, @@ -303,11 +312,14 @@ or updating already created. Publishing will create OTIO file. "instance_label": None, "instance_id": None } - self.log.info( - f"Creating subsets from presets: \n{pformat(family_presets)}") + self.log.info(( + "Creating subsets from presets: \n" + f"{pformat(family_presets)}" + )) for _fpreset in family_presets: instance = self._make_subset_instance( + clip, _fpreset, deepcopy(base_instance_data), parenting_data @@ -316,6 +328,7 @@ or updating already created. Publishing will create OTIO file. def _make_subset_instance( self, + clip, _fpreset, future_instance_data, parenting_data @@ -329,6 +342,8 @@ or updating already created. Publishing will create OTIO file. # add file extension filter only if it is not shot family if family == "shot": + future_instance_data["otioClip"] = ( + otio.adapters.write_to_string(clip)) c_instance = self.create_context.creators[ "editorial_shot"].create( future_instance_data) @@ -458,6 +473,7 @@ or updating already created. Publishing will create OTIO file. # TODO: should loockup shot name for update "asset": parent_asset_name, "task": "", + # parent time properties "trackStartFrame": track_start_frame, "timelineOffset": timeline_offset, @@ -568,7 +584,20 @@ or updating already created. Publishing will create OTIO file. ".fcpxml" ], allow_sequences=False, - label="Filepath", + single_item=True, + label="Sequence file", + ), + FileDef( + "media_filepaths_data", + folders=False, + extensions=[ + ".mov", + ".mp4", + ".wav" + ], + allow_sequences=False, + single_item=False, + label="Media files", ), # TODO: perhpas better would be timecode and fps input NumberDef( diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py new file mode 100644 index 0000000000..e3dfb1512a --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py @@ -0,0 +1,32 @@ +from pprint import pformat +import pyblish.api + + +class CollectClipInstance(pyblish.api.InstancePlugin): + """Collect clip instances and resolve its parent""" + + label = "Collect Clip Instances" + order = pyblish.api.CollectorOrder + + hosts = ["traypublisher"] + families = ["plate", "review", "audio"] + + def process(self, instance): + creator_identifier = instance.data["creator_identifier"] + if "editorial" not in creator_identifier: + return + + instance.data["families"].append("clip") + + parent_instance_id = instance.data["parent_instance_id"] + edit_shared_data = instance.context.data["editorialSharedData"] + instance.data.update( + edit_shared_data[parent_instance_id] + ) + + if "editorialSourcePath" in instance.context.data.keys(): + instance.data["editorialSourcePath"] = ( + instance.context.data["editorialSourcePath"]) + instance.data["families"].append("trimming") + + self.log.debug(pformat(instance.data)) \ No newline at end of file diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py index c088709a61..e181d0abe5 100644 --- a/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py +++ b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py @@ -4,11 +4,11 @@ import pyblish.api import opentimelineio as otio -class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): +class CollectEditorialInstance(pyblish.api.InstancePlugin): """Collect data for instances created by settings creators.""" label = "Collect Editorial Instances" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.1 hosts = ["traypublisher"] families = ["editorial"] @@ -27,6 +27,8 @@ class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): otio_timeline_string) instance.context.data["otioTimeline"] = otio_timeline + instance.context.data["editorialSourcePath"] = ( + instance.data["editorialSourcePath"]) self.log.info(fpath) @@ -41,6 +43,6 @@ class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): "files": os.path.basename(fpath) }) - self.log.debug("Created Simple Settings instance {}".format( + self.log.debug("Created Editorial Instance {}".format( pformat(instance.data) )) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_editorial_resources.py b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_resources.py new file mode 100644 index 0000000000..33a852e7a5 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_resources.py @@ -0,0 +1,271 @@ +import os +import re +import tempfile +import pyblish.api +from copy import deepcopy +import clique + + +class CollectInstanceResources(pyblish.api.InstancePlugin): + """Collect instance's resources""" + + # must be after `CollectInstances` + order = pyblish.api.CollectorOrder + label = "Collect Editorial Resources" + hosts = ["standalonepublisher"] + families = ["clip"] + + def process(self, instance): + self.context = instance.context + self.log.info(f"Processing instance: {instance}") + self.new_instances = [] + subset_files = dict() + subset_dirs = list() + anatomy = self.context.data["anatomy"] + anatomy_data = deepcopy(self.context.data["anatomyData"]) + anatomy_data.update({"root": anatomy.roots}) + + subset = instance.data["subset"] + clip_name = instance.data["clipName"] + + editorial_source_root = instance.data["editorialSourceRoot"] + editorial_source_path = instance.data["editorialSourcePath"] + + # if `editorial_source_path` then loop through + if editorial_source_path: + # add family if mov or mp4 found which is longer for + # cutting `trimming` to enable `ExtractTrimmingVideoAudio` plugin + staging_dir = os.path.normpath( + tempfile.mkdtemp(prefix="pyblish_tmp_") + ) + instance.data["stagingDir"] = staging_dir + instance.data["families"] += ["trimming"] + return + + # if template pattern in path then fill it with `anatomy_data` + if "{" in editorial_source_root: + editorial_source_root = editorial_source_root.format( + **anatomy_data) + + self.log.debug(f"root: {editorial_source_root}") + # loop `editorial_source_root` and find clip name in folders + # and look for any subset name alternatives + for root, dirs, _files in os.walk(editorial_source_root): + # search only for directories related to clip name + correct_clip_dir = None + for _d_search in dirs: + # avoid all non clip dirs + if _d_search not in clip_name: + continue + # found correct dir for clip + correct_clip_dir = _d_search + + # continue if clip dir was not found + if not correct_clip_dir: + continue + + clip_dir_path = os.path.join(root, correct_clip_dir) + subset_files_items = list() + # list content of clip dir and search for subset items + for subset_item in os.listdir(clip_dir_path): + # avoid all items which are not defined as subsets by name + if subset not in subset_item: + continue + + subset_item_path = os.path.join( + clip_dir_path, subset_item) + # if it is dir store it to `subset_dirs` list + if os.path.isdir(subset_item_path): + subset_dirs.append(subset_item_path) + + # if it is file then store it to `subset_files` list + if os.path.isfile(subset_item_path): + subset_files_items.append(subset_item_path) + + if subset_files_items: + subset_files.update({clip_dir_path: subset_files_items}) + + # break the loop if correct_clip_dir was captured + # no need to cary on if correct folder was found + if correct_clip_dir: + break + + if subset_dirs: + # look all dirs and check for subset name alternatives + for _dir in subset_dirs: + instance_data = deepcopy( + {k: v for k, v in instance.data.items()}) + sub_dir = os.path.basename(_dir) + # if subset name is only alternative then create new instance + if sub_dir != subset: + instance_data = self.duplicate_instance( + instance_data, subset, sub_dir) + + # create all representations + self.create_representations( + os.listdir(_dir), instance_data, _dir) + + if sub_dir == subset: + self.new_instances.append(instance_data) + # instance.data.update(instance_data) + + if subset_files: + unique_subset_names = list() + root_dir = list(subset_files.keys()).pop() + files_list = subset_files[root_dir] + search_pattern = f"({subset}[A-Za-z0-9]+)(?=[\\._\\s])" + for _file in files_list: + pattern = re.compile(search_pattern) + match = pattern.findall(_file) + if not match: + continue + match_subset = match.pop() + if match_subset in unique_subset_names: + continue + unique_subset_names.append(match_subset) + + self.log.debug(f"unique_subset_names: {unique_subset_names}") + + for _un_subs in unique_subset_names: + instance_data = self.duplicate_instance( + instance.data, subset, _un_subs) + + # create all representations + self.create_representations( + [os.path.basename(f) for f in files_list + if _un_subs in f], + instance_data, root_dir) + + # remove the original instance as it had been used only + # as template and is duplicated + self.context.remove(instance) + + # create all instances in self.new_instances into context + for new_instance in self.new_instances: + _new_instance = self.context.create_instance( + new_instance["name"]) + _new_instance.data.update(new_instance) + + def duplicate_instance(self, instance_data, subset, new_subset): + + new_instance_data = dict() + for _key, _value in instance_data.items(): + new_instance_data[_key] = _value + if not isinstance(_value, str): + continue + if subset in _value: + new_instance_data[_key] = _value.replace( + subset, new_subset) + + self.log.info(f"Creating new instance: {new_instance_data['name']}") + self.new_instances.append(new_instance_data) + return new_instance_data + + def create_representations( + self, files_list, instance_data, staging_dir): + """ Create representations from Collection object + """ + # collecting frames for later frame start/end reset + frames = list() + # break down Collection object to collections and reminders + collections, remainder = clique.assemble(files_list) + # add staging_dir to instance_data + instance_data["stagingDir"] = staging_dir + # add representations to instance_data + instance_data["representations"] = list() + + collection_head_name = None + # loop through collections and create representations + for _collection in collections: + ext = _collection.tail[1:] + collection_head_name = _collection.head + frame_start = list(_collection.indexes)[0] + frame_end = list(_collection.indexes)[-1] + repre_data = { + "frameStart": frame_start, + "frameEnd": frame_end, + "name": ext, + "ext": ext, + "files": [item for item in _collection], + "stagingDir": staging_dir + } + + if instance_data.get("keepSequence"): + repre_data_keep = deepcopy(repre_data) + instance_data["representations"].append(repre_data_keep) + + if "review" in instance_data["families"]: + repre_data.update({ + "thumbnail": True, + "frameStartFtrack": frame_start, + "frameEndFtrack": frame_end, + "step": 1, + "fps": self.context.data.get("fps"), + "name": "review", + "tags": ["review", "ftrackreview", "delete"], + }) + instance_data["representations"].append(repre_data) + + # add to frames for frame range reset + frames.append(frame_start) + frames.append(frame_end) + + # loop through reminders and create representations + for _reminding_file in remainder: + ext = os.path.splitext(_reminding_file)[-1][1:] + if ext not in instance_data["extensions"]: + continue + if collection_head_name and ( + (collection_head_name + ext) not in _reminding_file + ) and (ext in ["mp4", "mov"]): + self.log.info(f"Skipping file: {_reminding_file}") + continue + frame_start = 1 + frame_end = 1 + + repre_data = { + "name": ext, + "ext": ext, + "files": _reminding_file, + "stagingDir": staging_dir + } + + # exception for thumbnail + if "thumb" in _reminding_file: + repre_data.update({ + 'name': "thumbnail", + 'thumbnail': True + }) + + # exception for mp4 preview + if ext in ["mp4", "mov"]: + frame_start = 0 + frame_end = ( + (instance_data["frameEnd"] - instance_data["frameStart"]) + + 1) + # add review ftrack family into families + for _family in ["review", "ftrack"]: + if _family not in instance_data["families"]: + instance_data["families"].append(_family) + repre_data.update({ + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartFtrack": frame_start, + "frameEndFtrack": frame_end, + "step": 1, + "fps": self.context.data.get("fps"), + "name": "review", + "thumbnail": True, + "tags": ["review", "ftrackreview", "delete"], + }) + + # add to frames for frame range reset only if no collection + if not collections: + frames.append(frame_start) + frames.append(frame_end) + + instance_data["representations"].append(repre_data) + + # reset frame start / end + instance_data["frameStart"] = min(frames) + instance_data["frameEnd"] = max(frames) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py new file mode 100644 index 0000000000..5abafa498d --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py @@ -0,0 +1,163 @@ +from pprint import pformat +import pyblish.api +import opentimelineio as otio + + +class CollectShotInstance(pyblish.api.InstancePlugin): + """Collect shot instances and resolve its parent""" + + label = "Collect Shot Instances" + order = pyblish.api.CollectorOrder - 0.09 + + hosts = ["traypublisher"] + families = ["shot"] + + SHARED_KEYS = [ + "asset", + "fps", + "frameStart", + "frameEnd", + "clipIn", + "clipOut", + "sourceIn", + "sourceOut" + ] + + def process(self, instance): + self.log.debug(pformat(instance.data)) + + creator_identifier = instance.data["creator_identifier"] + if "editorial" not in creator_identifier: + return + + # get otio clip object + otio_clip = self._get_otio_clip(instance) + instance.data["otioClip"] = otio_clip + + # first solve the inputs from creator attr + data = self._solve_inputs_to_data(instance) + instance.data.update(data) + + # distribute all shared keys to clips instances + self._distribute_shared_data(instance) + self._solve_hierarchy_context(instance) + + self.log.debug(pformat(instance.data)) + + def _get_otio_clip(self, instance): + context = instance.context + # convert otio clip from string to object + otio_clip_string = instance.data.pop("otioClip") + otio_clip = otio.adapters.read_from_string( + otio_clip_string) + + otio_timeline = context.data["otioTimeline"] + + clips = [ + clip for clip in otio_timeline.each_child( + descended_from_type=otio.schema.Clip) + if clip.name == otio_clip.name + ] + self.log.debug(otio_timeline.each_child( + descended_from_type=otio.schema.Clip)) + + otio_clip = clips.pop() + self.log.debug(f"__ otioclip.parent: {otio_clip.parent}") + + return otio_clip + + def _distribute_shared_data(self, instance): + context = instance.context + + instance_id = instance.data["instance_id"] + + if not context.data.get("editorialSharedData"): + context.data["editorialSharedData"] = {} + + context.data["editorialSharedData"][instance_id] = { + _k: _v for _k, _v in instance.data.items() + if _k in self.SHARED_KEYS + } + + def _solve_inputs_to_data(self, instance): + _cr_attrs = instance.data["creator_attributes"] + workfile_start_frame = _cr_attrs["workfile_start_frame"] + frame_start = _cr_attrs["frameStart"] + frame_end = _cr_attrs["frameEnd"] + frame_dur = frame_end - frame_start + + return { + "asset": _cr_attrs["asset_name"], + "fps": float(_cr_attrs["fps"]), + "handleStart": _cr_attrs["handle_start"], + "handleEnd": _cr_attrs["handle_end"], + "frameStart": workfile_start_frame, + "frameEnd": workfile_start_frame + frame_dur, + "clipIn": _cr_attrs["clipIn"], + "clipOut": _cr_attrs["clipOut"], + "sourceIn": _cr_attrs["sourceIn"], + "sourceOut": _cr_attrs["sourceOut"], + "workfileFrameStart": workfile_start_frame + } + + def _solve_hierarchy_context(self, instance): + context = instance.context + + final_context = ( + context.data["hierarchyContext"] + if context.data.get("hierarchyContext") + else {} + ) + + name = instance.data["asset"] + + # get handles + handle_start = int(instance.data["handleStart"]) + handle_end = int(instance.data["handleEnd"]) + + in_info = { + "entity_type": "Shot", + "custom_attributes": { + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": instance.data["frameStart"], + "frameEnd": instance.data["frameEnd"], + "clipIn": instance.data["clipIn"], + "clipOut": instance.data["clipOut"], + "fps": instance.data["fps"] + }, + "tasks": instance.data["tasks"] + } + + parents = instance.data.get('parents', []) + self.log.debug(f"parents: {pformat(parents)}") + + actual = {name: in_info} + + for parent in reversed(parents): + parent_name = parent["entity_name"] + next_dict = { + parent_name: { + "entity_type": parent["entity_type"], + "childs": actual + } + } + actual = next_dict + + final_context = self._update_dict(final_context, actual) + + # adding hierarchy context to instance + context.data["hierarchyContext"] = final_context + self.log.debug(pformat(final_context)) + + def _update_dict(self, ex_dict, new_dict): + for key in ex_dict: + if key in new_dict and isinstance(ex_dict[key], dict): + new_dict[key] = self._update_dict(ex_dict[key], new_dict[key]) + else: + if ex_dict.get(key) and new_dict.get(key): + continue + else: + new_dict[key] = ex_dict[key] + + return new_dict \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py b/openpype/plugins/publish/extract_trim_video_audio.py similarity index 98% rename from openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py rename to openpype/plugins/publish/extract_trim_video_audio.py index 51dc84e9a2..b0c30283d9 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py +++ b/openpype/plugins/publish/extract_trim_video_audio.py @@ -14,7 +14,7 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): # must be before `ExtractThumbnailSP` order = pyblish.api.ExtractorOrder - 0.01 label = "Extract Trim Video/Audio" - hosts = ["standalonepublisher"] + hosts = ["standalonepublisher", "traypublisher"] families = ["clip", "trimming"] # make sure it is enabled only if at least both families are available diff --git a/openpype/plugins/publish/validate_asset_docs.py b/openpype/plugins/publish/validate_asset_docs.py index bc1f9b9e6c..daeb442f28 100644 --- a/openpype/plugins/publish/validate_asset_docs.py +++ b/openpype/plugins/publish/validate_asset_docs.py @@ -24,6 +24,10 @@ class ValidateAssetDocs(pyblish.api.InstancePlugin): if instance.data.get("assetEntity"): self.log.info("Instance has set asset document in its data.") + elif "editorial" in instance.data.get("creator_identifier", ""): + # skip if it is editorial + self.log.info("Editorial instance is no need to check...") + else: raise PublishValidationError(( "Instance \"{}\" doesn't have asset document "