diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index 920f3431a7..6bf0cd9754 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -2,6 +2,7 @@ import os import re import queue import collections +import copy from pype.ftrack.lib.io_nonsingleton import DbConnector @@ -756,19 +757,19 @@ class SyncEntitiesFactory: prepared_avalon_attr_ca_id = avalon_attrs_ca_id.get(attr_key) if prepared_attrs: self.entities_dict[entity_id]["custom_attributes"] = ( - prepared_attrs.copy() + copy.deepcopy(prepared_attrs) ) if prepared_attrs_ca_id: self.entities_dict[entity_id]["custom_attributes_id"] = ( - prepared_attrs_ca_id.copy() + copy.deepcopy(prepared_attrs_ca_id) ) if prepared_avalon_attr: self.entities_dict[entity_id]["avalon_attrs"] = ( - prepared_avalon_attr.copy() + copy.deepcopy(prepared_avalon_attr) ) if prepared_avalon_attr_ca_id: self.entities_dict[entity_id]["avalon_attrs_id"] = ( - prepared_avalon_attr_ca_id.copy() + copy.deepcopy(prepared_avalon_attr_ca_id) ) # TODO query custom attributes by entity_id @@ -852,7 +853,7 @@ class SyncEntitiesFactory: # Skip project because has stored defaults at the moment if entity_dict["entity_type"] == "project": continue - entity_dict["hier_attrs"] = prepare_dict.copy() + entity_dict["hier_attrs"] = copy.deepcopy(prepare_dict) for key, val in prepare_dict_avalon.items(): entity_dict["avalon_attrs"][key] = val @@ -878,7 +879,13 @@ class SyncEntitiesFactory: for item in values["data"]: value = item["value"] - if value is None: + # WARNING It is not possible to propage enumerate hierachical + # attributes with multiselection 100% right. Unseting all values + # will cause inheritance from parent. + if ( + value is None + or (isinstance(value, (tuple, list)) and not value) + ): continue entity_id = item["entity_id"] key = attribute_key_by_id[item["configuration_id"]] @@ -909,7 +916,7 @@ class SyncEntitiesFactory: while not hier_down_queue.empty(): hier_values, parent_id = hier_down_queue.get() for child_id in self.entities_dict[parent_id]["children"]: - _hier_values = hier_values.copy() + _hier_values = copy.deepcopy(hier_values) for key in attributes_by_key.keys(): if key.startswith("avalon_"): store_key = "avalon_attrs" @@ -1891,7 +1898,7 @@ class SyncEntitiesFactory: parents_queue.put((self.ft_project_id, [], False)) while not parents_queue.empty(): ftrack_id, parent_parents, changed = parents_queue.get() - _parents = parent_parents.copy() + _parents = copy.deepcopy(parent_parents) if ftrack_id not in hierarchy_changing_ids and not changed: if ftrack_id != self.ft_project_id: _parents.append(self.entities_dict[ftrack_id]["name"]) diff --git a/pype/plugins/ftrack/publish/collect_ftrack_api.py b/pype/plugins/ftrack/publish/collect_ftrack_api.py index 0aad3b2433..151b8882a3 100644 --- a/pype/plugins/ftrack/publish/collect_ftrack_api.py +++ b/pype/plugins/ftrack/publish/collect_ftrack_api.py @@ -22,7 +22,7 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): ftrack_log.setLevel(logging.WARNING) # Collect session - session = ftrack_api.Session() + session = ftrack_api.Session(auto_connect_event_hub=True) self.log.debug("Ftrack user: \"{0}\"".format(session.api_user)) context.data["ftrackSession"] = session diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 71917946b8..6f65d6ce01 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -18,7 +18,7 @@ class ExtractBurnin(pype.api.Extractor): label = "Extract burnins" order = pyblish.api.ExtractorOrder + 0.03 families = ["review", "burnin"] - hosts = ["nuke", "maya", "shell", "premiere"] + hosts = ["nuke", "maya", "shell", "nukestudio", "premiere"] optional = True def process(self, instance): diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index b0db2423d4..81a96586d5 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -20,15 +20,15 @@ class ExtractReview(pyblish.api.InstancePlugin): label = "Extract Review" order = pyblish.api.ExtractorOrder + 0.02 families = ["review"] - hosts = ["nuke", "maya", "shell", "premiere"] + hosts = ["nuke", "maya", "shell", "nukestudio", "premiere"] outputs = {} ext_filter = [] to_width = 1920 to_height = 1080 - def process(self, instance): + def process(self, instance): output_profiles = self.outputs or {} inst_data = instance.data @@ -82,6 +82,12 @@ class ExtractReview(pyblish.api.InstancePlugin): repre_new = repre.copy() ext = profile.get("ext", None) p_tags = profile.get('tags', []) + + # append repre tags into profile tags + for t in tags: + if t not in p_tags: + p_tags.append(t) + self.log.info("p_tags: `{}`".format(p_tags)) # adding control for presets to be sequence @@ -175,7 +181,8 @@ class ExtractReview(pyblish.api.InstancePlugin): frame_start_handle = frame_start - handle_start frame_end_handle = frame_end + handle_end if isinstance(repre["files"], list): - if frame_start_handle != repre.get("detectedStart", frame_start_handle): + if frame_start_handle != repre.get( + "detectedStart", frame_start_handle): frame_start_handle = repre.get("detectedStart") # exclude handle if no handles defined diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 08c390d040..0cd46d8891 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -301,6 +301,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): sequence_repre = isinstance(files, list) repre_context = None if sequence_repre: + self.log.debug( + "files: {}".format(files)) src_collections, remainder = clique.assemble(files) self.log.debug( "src_tail_collections: {}".format(str(src_collections))) @@ -347,6 +349,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): anatomy.templates["render"].get("padding") ) ) + index_frame_start = int(repre.get("frameStart")) # exception for slate workflow diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index 1850df2d00..c70953d23f 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -115,7 +115,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): # Add version data to instance version_data = { - "colorspace": node["colorspace"].value(), + "colorspace": node["colorspace"].value(), } instance.data["family"] = "write" @@ -150,6 +150,11 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "deadlinePriority": deadlinePriority }) + if "render" in families: + instance.data["family"] = "render2d" + if "render" not in families: + instance.data["families"].insert(0, "render") + if "prerender" in families: instance.data.update({ "family": "prerender", diff --git a/pype/plugins/nukestudio/_unused/collect_timecodes.py b/pype/plugins/nukestudio/_unused/collect_timecodes.py new file mode 100644 index 0000000000..5ac07314a4 --- /dev/null +++ b/pype/plugins/nukestudio/_unused/collect_timecodes.py @@ -0,0 +1,90 @@ +import pyblish.api +import opentimelineio.opentime as otio_ot + + +class CollectClipTimecodes(pyblish.api.InstancePlugin): + """Collect time with OpenTimelineIO: + source_h(In,Out)[timecode, sec] + timeline(In,Out)[timecode, sec] + """ + + order = pyblish.api.CollectorOrder + 0.101 + label = "Collect Timecodes" + hosts = ["nukestudio"] + + def process(self, instance): + + data = dict() + self.log.debug("__ instance.data: {}".format(instance.data)) + # Timeline data. + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + + source_in_h = instance.data("sourceInH", + instance.data("sourceIn") - handle_start) + source_out_h = instance.data("sourceOutH", + instance.data("sourceOut") + handle_end) + + timeline_in = instance.data["clipIn"] + timeline_out = instance.data["clipOut"] + + # set frame start with tag or take it from timeline + frame_start = instance.data.get("startingFrame") + + if not frame_start: + frame_start = timeline_in + + source = instance.data.get("source") + + otio_data = dict() + self.log.debug("__ source: `{}`".format(source)) + + rate_fps = instance.context.data["fps"] + + otio_in_h_ratio = otio_ot.RationalTime( + value=(source.timecodeStart() + ( + source_in_h + (source_out_h - source_in_h))), + rate=rate_fps) + + otio_out_h_ratio = otio_ot.RationalTime( + value=(source.timecodeStart() + source_in_h), + rate=rate_fps) + + otio_timeline_in_ratio = otio_ot.RationalTime( + value=int( + instance.data.get("timelineTimecodeStart", 0)) + timeline_in, + rate=rate_fps) + + otio_timeline_out_ratio = otio_ot.RationalTime( + value=int( + instance.data.get("timelineTimecodeStart", 0)) + timeline_out, + rate=rate_fps) + + otio_data.update({ + + "otioClipInHTimecode": otio_ot.to_timecode(otio_in_h_ratio), + + "otioClipOutHTimecode": otio_ot.to_timecode(otio_out_h_ratio), + + "otioClipInHSec": otio_ot.to_seconds(otio_in_h_ratio), + + "otioClipOutHSec": otio_ot.to_seconds(otio_out_h_ratio), + + "otioTimelineInTimecode": otio_ot.to_timecode( + otio_timeline_in_ratio), + + "otioTimelineOutTimecode": otio_ot.to_timecode( + otio_timeline_out_ratio), + + "otioTimelineInSec": otio_ot.to_seconds(otio_timeline_in_ratio), + + "otioTimelineOutSec": otio_ot.to_seconds(otio_timeline_out_ratio) + }) + + data.update({ + "otioData": otio_data, + "sourceTimecodeIn": otio_ot.to_timecode(otio_in_h_ratio), + "sourceTimecodeOut": otio_ot.to_timecode(otio_out_h_ratio) + }) + instance.data.update(data) + self.log.debug("data: {}".format(instance.data)) diff --git a/pype/plugins/nukestudio/publish/collect_clip_resolution.py b/pype/plugins/nukestudio/publish/collect_clip_resolution.py new file mode 100644 index 0000000000..b70f8f2f95 --- /dev/null +++ b/pype/plugins/nukestudio/publish/collect_clip_resolution.py @@ -0,0 +1,21 @@ +import pyblish.api + + +class CollectClipResolution(pyblish.api.InstancePlugin): + """Collect clip geometry resolution""" + + order = pyblish.api.CollectorOrder + 0.101 + label = "Collect Clip Resoluton" + hosts = ["nukestudio"] + + def process(self, instance): + sequence = instance.context.data['activeSequence'] + resolution_width = int(sequence.format().width()) + resolution_height = int(sequence.format().height()) + pixel_aspect = sequence.format().pixelAspect() + + instance.data.update({ + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "pixelAspect": pixel_aspect + }) diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index 746df67485..d39e25bfc6 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -47,11 +47,42 @@ class CollectClips(api.ContextPlugin): track = item.parent() source = item.source().mediaSource() source_path = source.firstpath() + clip_in = int(item.timelineIn()) + clip_out = int(item.timelineOut()) file_head = source.filenameHead() file_info = next((f for f in source.fileinfos()), None) - source_first_frame = file_info.startFrame() + source_first_frame = int(file_info.startFrame()) is_sequence = False + self.log.debug( + "__ assets_shared: {}".format( + context.data["assetsShared"])) + + # Check for clips with the same range + # this is for testing if any vertically neighbouring + # clips has been already processed + clip_matching_with_range = next( + (k for k, v in context.data["assetsShared"].items() + if (v.get("_clipIn", 0) == clip_in) + and (v.get("_clipOut", 0) == clip_out) + ), False) + + # check if clip name is the same in matched + # vertically neighbouring clip + # if it is then it is correct and resent variable to False + # not to be rised wrong name exception + if asset in str(clip_matching_with_range): + clip_matching_with_range = False + + # rise wrong name exception if found one + assert (not clip_matching_with_range), ( + "matching clip: {asset}" + " timeline range ({clip_in}:{clip_out})" + " conflicting with {clip_matching_with_range}" + " >> rename any of clips to be the same as the other <<" + ).format( + **locals()) + if not source.singleFile(): self.log.info("Single file") is_sequence = True @@ -89,32 +120,31 @@ class CollectClips(api.ContextPlugin): ) data.update({ - "name": "{0}_{1}".format(track.name(), item.name()), - "item": item, - "source": source, - "timecodeStart": str(source.timecodeStart()), - "timelineTimecodeStart": str(sequence.timecodeStart()), - "sourcePath": source_path, - "sourceFileHead": file_head, - "isSequence": is_sequence, - "track": track.name(), - "trackIndex": track_index, - "sourceFirst": source_first_frame, - "effects": effects, - "sourceIn": int(item.sourceIn()), - "sourceOut": int(item.sourceOut()), - "mediaDuration": (int(item.sourceOut()) - - int(item.sourceIn())) + 1, - "clipIn": int(item.timelineIn()), - "clipOut": int(item.timelineOut()), - "clipDuration": ( - int(item.timelineOut()) - int( - item.timelineIn())) + 1, - "asset": asset, - "family": "clip", - "families": [], - "handleStart": projectdata.get("handleStart", 0), - "handleEnd": projectdata.get("handleEnd", 0)}) + "name": "{0}_{1}".format(track.name(), item.name()), + "item": item, + "source": source, + "timecodeStart": str(source.timecodeStart()), + "timelineTimecodeStart": str(sequence.timecodeStart()), + "sourcePath": source_path, + "sourceFileHead": file_head, + "isSequence": is_sequence, + "track": track.name(), + "trackIndex": track_index, + "sourceFirst": source_first_frame, + "effects": effects, + "sourceIn": int(item.sourceIn()), + "sourceOut": int(item.sourceOut()), + "mediaDuration": int(source.duration()), + "clipIn": clip_in, + "clipOut": clip_out, + "clipDuration": ( + int(item.timelineOut()) - int( + item.timelineIn())) + 1, + "asset": asset, + "family": "clip", + "families": [], + "handleStart": projectdata.get("handleStart", 0), + "handleEnd": projectdata.get("handleEnd", 0)}) instance = context.create_instance(**data) @@ -122,7 +152,10 @@ class CollectClips(api.ContextPlugin): self.log.info("Created instance.data: {}".format(instance.data)) self.log.debug(">> effects: {}".format(instance.data["effects"])) - context.data["assetsShared"][asset] = dict() + context.data["assetsShared"][asset] = { + "_clipIn": clip_in, + "_clipOut": clip_out + } # from now we are collecting only subtrackitems on # track with no video items diff --git a/pype/plugins/nukestudio/publish/collect_frame_ranges.py b/pype/plugins/nukestudio/publish/collect_frame_ranges.py index 38224f683d..1cb5e5dd1e 100644 --- a/pype/plugins/nukestudio/publish/collect_frame_ranges.py +++ b/pype/plugins/nukestudio/publish/collect_frame_ranges.py @@ -35,14 +35,15 @@ class CollectClipFrameRanges(pyblish.api.InstancePlugin): frame_end = frame_start + (timeline_out - timeline_in) - data.update( - { - "sourceInH": source_in_h, - "sourceOutH": source_out_h, - "frameStart": frame_start, - "frameEnd": frame_end, - "clipInH": timeline_in_h, - "clipOutH": timeline_out_h + data.update({ + "sourceInH": source_in_h, + "sourceOutH": source_out_h, + "frameStart": frame_start, + "frameEnd": frame_end, + "clipInH": timeline_in_h, + "clipOutH": timeline_out_h, + "clipDurationH": instance.data.get( + "clipDuration") + handle_start + handle_end } ) self.log.debug("__ data: {}".format(data)) diff --git a/pype/plugins/nukestudio/publish/collect_framerate.py b/pype/plugins/nukestudio/publish/collect_framerate.py index a0fd4df599..694052f802 100644 --- a/pype/plugins/nukestudio/publish/collect_framerate.py +++ b/pype/plugins/nukestudio/publish/collect_framerate.py @@ -1,5 +1,6 @@ from pyblish import api + class CollectFramerate(api.ContextPlugin): """Collect framerate from selected sequence.""" @@ -9,4 +10,13 @@ class CollectFramerate(api.ContextPlugin): def process(self, context): sequence = context.data["activeSequence"] - context.data["fps"] = sequence.framerate().toFloat() + context.data["fps"] = self.get_rate(sequence) + + def get_rate(self, sequence): + num, den = sequence.framerate().toRational() + rate = float(num) / float(den) + + if rate.is_integer(): + return rate + + return round(rate, 3) diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index 5bc9bea7dd..38040f8c51 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -37,11 +37,13 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): assets_shared = context.data.get("assetsShared") tags = instance.data.get("tags", None) clip = instance.data["item"] - asset = instance.data.get("asset") + asset = instance.data["asset"] sequence = context.data['activeSequence'] - width = int(sequence.format().width()) - height = int(sequence.format().height()) - pixel_aspect = sequence.format().pixelAspect() + resolution_width = instance.data["resolutionWidth"] + resolution_height = instance.data["resolutionHeight"] + pixel_aspect = instance.data["pixelAspect"] + clip_in = instance.data["clipIn"] + clip_out = instance.data["clipOut"] fps = context.data["fps"] # build data for inner nukestudio project property @@ -72,6 +74,31 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): # and finding only hierarchical tag if "hierarchy" in t_type.lower(): + # Check for clips with the same range + # this is for testing if any vertically neighbouring + # clips has been already processed + match = next(( + k for k, v in assets_shared.items() + if (v["_clipIn"] == clip_in) + and (v["_clipOut"] == clip_out) + ), False) + + self.log.debug( + "__ assets_shared[match]: {}".format( + assets_shared[match])) + + # check if hierarchy key is present in matched + # vertically neighbouring clip + if not assets_shared[match].get("hierarchy"): + match = False + + # rise exception if multiple hierarchy tag found + assert not match, ( + "Two clips above each other with" + " hierarchy tag are not allowed" + " >> keep hierarchy tag only in one of them <<" + ) + d_metadata = dict() parents = list() @@ -82,7 +109,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): if "shot" in template.lower(): instance.data["asset"] = [ t for t in template.split('/')][-1] - template = "/".join([t for t in template.split('/')][0:-1]) + template = "/".join( + [t for t in template.split('/')][0:-1]) # take template from Tag.note and break it into parts template_split = template.split("/") @@ -149,8 +177,12 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): instance.data["hierarchy"] = hierarchy instance.data["parents"] = parents + self.log.info( + "clip: {asset}[{clip_in}:{clip_out}]".format( + **locals())) # adding to asset shared dict - self.log.debug("__ assets_shared: {}".format(assets_shared)) + self.log.debug( + "__ assets_shared: {}".format(assets_shared)) if assets_shared.get(asset): self.log.debug("Adding to shared assets: `{}`".format( asset)) @@ -162,11 +194,11 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): "asset": asset, "hierarchy": hierarchy, "parents": parents, - "resolutionWidth": width, - "resolutionHeight": height, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, "pixelAspect": pixel_aspect, "fps": fps, - "tasks": instance.data["tasks"] + "tasks": instance.data["tasks"] }) # adding frame start if any on instance @@ -175,8 +207,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): asset_shared.update({ "startingFrame": start_frame }) - - + self.log.debug( + "assets_shared: {assets_shared}".format(**locals())) class CollectHierarchyContext(pyblish.api.ContextPlugin): '''Collecting Hierarchy from instaces and building diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index 3e5ba51b60..770cef7e3f 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -64,15 +64,15 @@ class CollectPlates(api.InstancePlugin): # adding SourceResolution if Tag was present if instance.data.get("sourceResolution") and instance.data.get("main"): item = instance.data["item"] - width = int(item.source().mediaSource().width()) - height = int(item.source().mediaSource().height()) + resolution_width = int(item.source().mediaSource().width()) + resolution_height = int(item.source().mediaSource().height()) pixel_aspect = int(item.source().mediaSource().pixelAspect()) self.log.info("Source Width and Height are: `{0} x {1} : {2}`".format( - width, height, pixel_aspect)) + resolution_width, resolution_height, pixel_aspect)) data.update({ - "width": width, - "height": height, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, "pixelAspect": pixel_aspect }) @@ -102,14 +102,6 @@ class CollectPlatesData(api.InstancePlugin): instance.data["representations"] = list() version_data = dict() - context = instance.context - anatomy = context.data.get("anatomy", None) - padding = int( - anatomy.templates["render"].get( - "frame_padding", - anatomy.templates["render"].get("padding") - ) - ) name = instance.data["subset"] source_path = instance.data["sourcePath"] @@ -154,6 +146,7 @@ class CollectPlatesData(api.InstancePlugin): source_first_frame = instance.data.get("sourceFirst") source_file_head = instance.data.get("sourceFileHead") + self.log.debug("source_first_frame: `{}`".format(source_first_frame)) if instance.data.get("isSequence", False): self.log.info("Is sequence of files") @@ -190,8 +183,7 @@ class CollectPlatesData(api.InstancePlugin): "frameEnd": instance.data["sourceOut"] - instance.data["sourceIn"] + 1, 'step': 1, 'fps': instance.context.data["fps"], - 'preview': True, - 'thumbnail': False, + 'tags': ["preview"], 'name': "preview", 'ext': "mov", } diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index c7fb5222b0..aa8c60767c 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -36,9 +36,10 @@ class CollectReviews(api.InstancePlugin): return if not track: - self.log.debug( - "Skipping \"{}\" because tag is not having `track` in metadata".format(instance) - ) + self.log.debug(( + "Skipping \"{}\" because tag is not having" + "`track` in metadata" + ).format(instance)) return # add to representations @@ -68,18 +69,17 @@ class CollectReviews(api.InstancePlugin): rev_inst.data["name"])) if rev_inst is None: - raise RuntimeError( - "TrackItem from track name `{}` has to be also selected".format( - track) - ) + raise RuntimeError(( + "TrackItem from track name `{}` has to" + "be also selected" + ).format(track)) + instance.data["families"].append("review") file_path = rev_inst.data.get("sourcePath") file_dir = os.path.dirname(file_path) file = os.path.basename(file_path) ext = os.path.splitext(file)[-1][1:] - handleStart = rev_inst.data.get("handleStart") - handleEnd = rev_inst.data.get("handleEnd") # change label instance.data["label"] = "{0} - {1} - ({2}) - review".format( @@ -94,15 +94,35 @@ class CollectReviews(api.InstancePlugin): "stagingDir": file_dir, "frameStart": rev_inst.data.get("sourceIn"), "frameEnd": rev_inst.data.get("sourceOut"), - "frameStartFtrack": rev_inst.data.get("sourceIn") - handleStart, - "frameEndFtrack": rev_inst.data.get("sourceOut") + handleEnd, + "frameStartFtrack": rev_inst.data.get("sourceInH"), + "frameEndFtrack": rev_inst.data.get("sourceOutH"), "step": 1, "fps": rev_inst.data.get("fps"), - "preview": True, - "thumbnail": False, "name": "preview", + "tags": ["preview"], "ext": ext } + + media_duration = instance.data.get("mediaDuration") + clip_duration_h = instance.data.get("clipDurationH") + + if media_duration > clip_duration_h: + self.log.debug("Media duration higher: {}".format( + (media_duration - clip_duration_h))) + representation.update({ + "frameStart": instance.data.get("sourceInH"), + "frameEnd": instance.data.get("sourceOutH"), + "tags": ["_cut-bigger", "delete"] + }) + elif media_duration < clip_duration_h: + self.log.debug("Media duration higher: {}".format( + (media_duration - clip_duration_h))) + representation.update({ + "frameStart": instance.data.get("sourceInH"), + "frameEnd": instance.data.get("sourceOutH"), + "tags": ["_cut-smaller", "delete"] + }) + instance.data["representations"].append(representation) self.log.debug("Added representation: {}".format(representation)) @@ -122,15 +142,18 @@ class CollectReviews(api.InstancePlugin): thumb_path = os.path.join(staging_dir, thumb_file) self.log.debug("__ thumb_path: {}".format(thumb_path)) - thumb_frame = instance.data["sourceIn"] + ((instance.data["sourceOut"] - instance.data["sourceIn"])/2) - + thumb_frame = instance.data["sourceIn"] + ( + (instance.data["sourceOut"] - instance.data["sourceIn"]) / 2) + self.log.debug("__ thumb_frame: {}".format(thumb_frame)) thumbnail = item.thumbnail(thumb_frame).save( thumb_path, format='png' ) - self.log.debug("__ sourceIn: `{}`".format(instance.data["sourceIn"])) - self.log.debug("__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame)) + self.log.debug( + "__ sourceIn: `{}`".format(instance.data["sourceIn"])) + self.log.debug( + "__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame)) self.log.debug("__ thumbnail: {}".format(thumbnail)) diff --git a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py new file mode 100644 index 0000000000..a4fbf90bed --- /dev/null +++ b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py @@ -0,0 +1,245 @@ +import os +from pyblish import api +import pype + + +class ExtractReviewCutUpVideo(pype.api.Extractor): + """Cut up clips from long video file""" + + order = api.ExtractorOrder + # order = api.CollectorOrder + 0.1023 + label = "Extract Review CutUp Video" + hosts = ["nukestudio"] + families = ["review"] + + # presets + tags_addition = [] + + def process(self, instance): + inst_data = instance.data + asset = inst_data['asset'] + + # get representation and loop them + representations = inst_data["representations"] + + # get resolution default + resolution_width = inst_data["resolutionWidth"] + resolution_height = inst_data["resolutionHeight"] + + # frame range data + media_duration = inst_data["mediaDuration"] + + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe") + + # filter out mov and img sequences + representations_new = representations[:] + for repre in representations: + input_args = list() + output_args = list() + + tags = repre.get("tags", []) + + # check if supported tags are in representation for activation + filter_tag = False + for tag in ["_cut-bigger", "_cut-smaller"]: + if tag in tags: + filter_tag = True + break + if not filter_tag: + continue + + self.log.debug("__ repre: {}".format(repre)) + + file = repre.get("files") + staging_dir = repre.get("stagingDir") + frame_start = repre.get("frameStart") + frame_end = repre.get("frameEnd") + fps = repre.get("fps") + ext = repre.get("ext") + + new_file_name = "{}_{}".format(asset, file) + + full_input_path = os.path.join( + staging_dir, file) + + full_output_dir = os.path.join( + staging_dir, "cuts") + + os.path.isdir(full_output_dir) or os.makedirs(full_output_dir) + + full_output_path = os.path.join( + full_output_dir, new_file_name) + + self.log.debug("__ full_input_path: {}".format(full_input_path)) + self.log.debug("__ full_output_path: {}".format(full_output_path)) + + # check if audio stream is in input video file + ffprob_cmd = ( + "{ffprobe_path} -i {full_input_path} -show_streams " + "-select_streams a -loglevel error" + ).format(**locals()) + self.log.debug("ffprob_cmd: {}".format(ffprob_cmd)) + audio_check_output = pype.api.subprocess(ffprob_cmd) + self.log.debug("audio_check_output: {}".format(audio_check_output)) + + # translate frame to sec + start_sec = float(frame_start) / fps + duration_sec = float(frame_end - frame_start + 1) / fps + + empty_add = None + + # check if not missing frames at start + if (start_sec < 0) or (media_duration < frame_end): + # for later swithing off `-c:v copy` output arg + empty_add = True + + # init empty variables + video_empty_start = video_layer_start = "" + audio_empty_start = audio_layer_start = "" + video_empty_end = video_layer_end = "" + audio_empty_end = audio_layer_end = "" + audio_input = audio_output = "" + v_inp_idx = 0 + concat_n = 1 + + # try to get video native resolution data + try: + resolution_output = pype.api.subprocess(( + "{ffprobe_path} -i {full_input_path} -v error " + "-select_streams v:0 -show_entries " + "stream=width,height -of csv=s=x:p=0" + ).format(**locals())) + + x, y = resolution_output.split("x") + resolution_width = int(x) + resolution_height = int(y) + except Exception as E: + self.log.warning( + "Video native resolution is untracable: {}".format(E)) + + if audio_check_output: + # adding input for empty audio + input_args.append("-f lavfi -i anullsrc") + + # define audio empty concat variables + audio_input = "[1:a]" + audio_output = ":a=1" + v_inp_idx = 1 + + # adding input for video black frame + input_args.append(( + "-f lavfi -i \"color=c=black:" + "s={resolution_width}x{resolution_height}:r={fps}\"" + ).format(**locals())) + + if (start_sec < 0): + # recalculate input video timing + empty_start_dur = abs(start_sec) + start_sec = 0 + duration_sec = float(frame_end - ( + frame_start + (empty_start_dur * fps)) + 1) / fps + + # define starting empty video concat variables + video_empty_start = ( + "[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];" + ).format(**locals()) + video_layer_start = "[gv0]" + + if audio_check_output: + # define starting empty audio concat variables + audio_empty_start = ( + "[0]atrim=duration={empty_start_dur}[ga0];" + ).format(**locals()) + audio_layer_start = "[ga0]" + + # alter concat number of clips + concat_n += 1 + + # check if not missing frames at the end + if (media_duration < frame_end): + # recalculate timing + empty_end_dur = float(frame_end - media_duration + 1) / fps + duration_sec = float(media_duration - frame_start) / fps + + # define ending empty video concat variables + video_empty_end = ( + "[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];" + ).format(**locals()) + video_layer_end = "[gv1]" + + if audio_check_output: + # define ending empty audio concat variables + audio_empty_end = ( + "[0]atrim=duration={empty_end_dur}[ga1];" + ).format(**locals()) + audio_layer_end = "[ga0]" + + # alter concat number of clips + concat_n += 1 + + # concatting black frame togather + output_args.append(( + "-filter_complex \"" + "{audio_empty_start}" + "{video_empty_start}" + "{audio_empty_end}" + "{video_empty_end}" + "{video_layer_start}{audio_layer_start}[1:v]{audio_input}" + "{video_layer_end}{audio_layer_end}" + "concat=n={concat_n}:v=1{audio_output}\"" + ).format(**locals())) + + # append ffmpeg input video clip + input_args.append("-ss {:0.2f}".format(start_sec)) + input_args.append("-t {:0.2f}".format(duration_sec)) + input_args.append("-i {}".format(full_input_path)) + + # add copy audio video codec if only shortening clip + if ("_cut-bigger" in tags) and (not empty_add): + output_args.append("-c:v copy") + + # make sure it is having no frame to frame comprassion + output_args.append("-intra") + + # output filename + output_args.append("-y") + output_args.append(full_output_path) + + mov_args = [ + ffmpeg_path, + " ".join(input_args), + " ".join(output_args) + ] + subprcs_cmd = " ".join(mov_args) + + # run subprocess + self.log.debug("Executing: {}".format(subprcs_cmd)) + output = pype.api.subprocess(subprcs_cmd) + self.log.debug("Output: {}".format(output)) + + repre_new = { + "files": new_file_name, + "stagingDir": full_output_dir, + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartFtrack": frame_start, + "frameEndFtrack": frame_end, + "step": 1, + "fps": fps, + "name": "cut_up_preview", + "tags": ["review", "delete"] + self.tags_addition, + "ext": ext, + "anatomy_template": "publish" + } + + representations_new.append(repre_new) + + for repre in representations_new: + if ("delete" in repre.get("tags", [])) and ( + "cut_up_preview" not in repre["name"]): + representations_new.remove(repre) + + self.log.debug( + "Representations: {}".format(representations_new)) + instance.data["representations"] = representations_new