diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 87ff925a0a..a0f27f383f 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -17,6 +17,7 @@ class ExtractBurnin(pype.api.Extractor): label = "Quicktime with burnins" order = pyblish.api.ExtractorOrder + 0.03 families = ["review", "burnin"] + hosts = ["nuke", "maya", "shell"] optional = True def process(self, instance): diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 1b66b4e9d2..0eb6bfd694 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -19,6 +19,7 @@ class ExtractReview(pyblish.api.InstancePlugin): label = "Extract Review" order = pyblish.api.ExtractorOrder + 0.02 families = ["review"] + hosts = ["nuke", "maya", "shell"] def process(self, instance): # adding plugin attributes from presets @@ -53,7 +54,7 @@ class ExtractReview(pyblish.api.InstancePlugin): ext = "mov" self.log.warning( "`ext` attribute not in output profile. Setting to default ext: `mov`") - + self.log.debug("instance.families: {}".format(instance.data['families'])) self.log.debug("profile.families: {}".format(profile['families'])) diff --git a/pype/plugins/nuke/_load_unused/load_sequence.py b/pype/plugins/nuke/_load_unused/load_sequence.py deleted file mode 100644 index 695dd0b981..0000000000 --- a/pype/plugins/nuke/_load_unused/load_sequence.py +++ /dev/null @@ -1,252 +0,0 @@ -import os -import contextlib - -from avalon import api -import avalon.io as io - -from avalon.nuke import log -import nuke - - -@contextlib.contextmanager -def preserve_inputs(node, knobs): - """Preserve the node's inputs after context""" - - values = {} - for name in knobs: - try: - knob_value = node[name].vaule() - values[name] = knob_value - except ValueError: - log.warning("missing knob {} in node {}" - "{}".format(name, node['name'].value())) - - try: - yield - finally: - for name, value in values.items(): - node[name].setValue(value) - - -@contextlib.contextmanager -def preserve_trim(node): - """Preserve the relative trim of the Loader tool. - - This tries to preserve the loader's trim (trim in and trim out) after - the context by reapplying the "amount" it trims on the clip's length at - start and end. - - """ - # working script frame range - script_start = nuke.root()["start_frame"].value() - - start_at_frame = None - offset_frame = None - if node['frame_mode'].value() == "start at": - start_at_frame = node['frame'].value() - if node['frame_mode'].value() is "offset": - offset_frame = node['frame'].value() - - try: - yield - finally: - if start_at_frame: - node['frame_mode'].setValue("start at") - node['frame'].setValue(str(script_start)) - log.info("start frame of reader was set to" - "{}".format(script_start)) - - if offset_frame: - node['frame_mode'].setValue("offset") - node['frame'].setValue(str((script_start + offset_frame))) - log.info("start frame of reader was set to" - "{}".format(script_start)) - - -def loader_shift(node, frame, relative=True): - """Shift global in time by i preserving duration - - This moves the loader by i frames preserving global duration. When relative - is False it will shift the global in to the start frame. - - Args: - loader (tool): The fusion loader tool. - frame (int): The amount of frames to move. - relative (bool): When True the shift is relative, else the shift will - change the global in to frame. - - Returns: - int: The resulting relative frame change (how much it moved) - - """ - # working script frame range - script_start = nuke.root()["start_frame"].value() - - if node['frame_mode'].value() == "start at": - start_at_frame = node['frame'].value() - if node['frame_mode'].value() is "offset": - offset_frame = node['frame'].value() - - if relative: - shift = frame - else: - if start_at_frame: - shift = frame - if offset_frame: - shift = frame + offset_frame - - # Shifting global in will try to automatically compensate for the change - # in the "ClipTimeStart" and "HoldFirstFrame" inputs, so we preserve those - # input values to "just shift" the clip - with preserve_inputs(node, knobs=["file", - "first", - "last", - "originfirst", - "originlast", - "frame_mode", - "frame"]): - - # GlobalIn cannot be set past GlobalOut or vice versa - # so we must apply them in the order of the shift. - if start_at_frame: - node['frame_mode'].setValue("start at") - node['frame'].setValue(str(script_start + shift)) - if offset_frame: - node['frame_mode'].setValue("offset") - node['frame'].setValue(str(shift)) - - return int(shift) - - -class LoadSequence(api.Loader): - """Load image sequence into Nuke""" - - families = ["write"] - representations = ["*"] - - label = "Load sequence" - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name, namespace, data): - - from avalon.nuke import ( - containerise, - ls_img_sequence, - viewer_update_and_undo_stop - ) - log.info("here i am") - # Fallback to asset name when namespace is None - if namespace is None: - namespace = context['asset']['name'] - - # Use the first file for now - # TODO: fix path fname - file = ls_img_sequence(os.path.dirname(self.fname), one=True) - - # Create the Loader with the filename path set - with viewer_update_and_undo_stop(): - # TODO: it might be universal read to img/geo/camera - r = nuke.createNode( - "Read", - "name {}".format(self.name)) # TODO: does self.name exist? - r["file"].setValue(file['path']) - if len(file['frames']) is 1: - first = file['frames'][0][0] - last = file['frames'][0][1] - r["originfirst"].setValue(first) - r["first"].setValue(first) - r["originlast"].setValue(last) - r["last"].setValue(last) - else: - first = file['frames'][0][0] - last = file['frames'][:-1][1] - r["originfirst"].setValue(first) - r["first"].setValue(first) - r["originlast"].setValue(last) - r["last"].setValue(last) - log.warning("Missing frames in image sequence") - - # Set global in point to start frame (if in version.data) - start = context["version"]["data"].get("startFrame", None) - if start is not None: - loader_shift(r, start, relative=False) - - containerise(r, - name=name, - namespace=namespace, - context=context, - loader=self.__class__.__name__) - - def switch(self, container, representation): - self.update(container, representation) - - def update(self, container, representation): - """Update the Loader's path - - Fusion automatically tries to reset some variables when changing - the loader's path to a new file. These automatic changes are to its - inputs: - - """ - - from avalon.nuke import ( - viewer_update_and_undo_stop, - ls_img_sequence, - update_container - ) - log.info("this i can see") - node = container["_tool"] - # TODO: prepare also for other readers img/geo/camera - assert node.Class() == "Reader", "Must be Reader" - - root = api.get_representation_path(representation) - file = ls_img_sequence(os.path.dirname(root), one=True) - - # Get start frame from version data - version = io.find_one({"type": "version", - "_id": representation["parent"]}) - start = version["data"].get("startFrame") - if start is None: - log.warning("Missing start frame for updated version" - "assuming starts at frame 0 for: " - "{} ({})".format(node['name'].value(), representation)) - start = 0 - - with viewer_update_and_undo_stop(): - - # Update the loader's path whilst preserving some values - with preserve_trim(node): - with preserve_inputs(node, - knobs=["file", - "first", - "last", - "originfirst", - "originlast", - "frame_mode", - "frame"]): - node["file"] = file["path"] - - # Set the global in to the start frame of the sequence - global_in_changed = loader_shift(node, start, relative=False) - if global_in_changed: - # Log this change to the user - log.debug("Changed '{}' global in:" - " {:d}".format(node['name'].value(), start)) - - # Update the imprinted representation - update_container( - node, - {"representation": str(representation["_id"])} - ) - - def remove(self, container): - - from avalon.nuke import viewer_update_and_undo_stop - - node = container["_tool"] - assert node.Class() == "Reader", "Must be Reader" - - with viewer_update_and_undo_stop(): - nuke.delete(node) diff --git a/pype/plugins/nuke/load/load_mov.py b/pype/plugins/nuke/load/load_mov.py index cd45c77906..a1e0602620 100644 --- a/pype/plugins/nuke/load/load_mov.py +++ b/pype/plugins/nuke/load/load_mov.py @@ -75,7 +75,7 @@ def loader_shift(node, frame, relative=True): class LoadMov(api.Loader): """Load mov file into Nuke""" - families = ["write", "source", "plate", "render"] + families = ["write", "source", "plate", "render", "review"] representations = ["mov", "preview", "review", "mp4"] label = "Load mov" diff --git a/pype/plugins/nukestudio/publish/collect_workfile_version.py b/pype/plugins/nukestudio/_unused/collect_workfile_version.py similarity index 85% rename from pype/plugins/nukestudio/publish/collect_workfile_version.py rename to pype/plugins/nukestudio/_unused/collect_workfile_version.py index 3904c22f52..733fbfc44a 100644 --- a/pype/plugins/nukestudio/publish/collect_workfile_version.py +++ b/pype/plugins/nukestudio/_unused/collect_workfile_version.py @@ -11,5 +11,5 @@ class CollectWorkfileVersion(pyblish.api.ContextPlugin): project = context.data('activeProject') path = project.path() - context.data["version"] = pype.get_version_from_path(path) + context.data["version"] = int(pype.get_version_from_path(path)) self.log.info("version: {}".format(context.data["version"])) diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index a91558ae2c..952838e590 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -78,9 +78,8 @@ class CollectClips(api.ContextPlugin): "sourceFirst": source_first_frame, "sourceIn": int(item.sourceIn()), "sourceOut": int(item.sourceOut()), - "startFrame": int(item.timelineIn()), - "endFrame": int(item.timelineOut()), - "fps": float(item.sequence().framerate().toFloat()) + "timelineIn": int(item.timelineIn()), + "timelineOut": int(item.timelineOut()) } ) @@ -93,7 +92,7 @@ class CollectClips(api.ContextPlugin): "handles": 0, "handleStart": projectdata.get("handles", 0), "handleEnd": projectdata.get("handles", 0), - "version": version + "version": int(version) } ) instance = context.create_instance(**data) diff --git a/pype/plugins/nukestudio/publish/collect_frame_ranges.py b/pype/plugins/nukestudio/publish/collect_frame_ranges.py new file mode 100644 index 0000000000..22b99a9ed4 --- /dev/null +++ b/pype/plugins/nukestudio/publish/collect_frame_ranges.py @@ -0,0 +1,48 @@ +import pyblish.api + +class CollectClipFrameRanges(pyblish.api.InstancePlugin): + """Collect all frame range data: source(In,Out), timeline(In,Out), edit_(in, out), f(start, end)""" + + order = pyblish.api.CollectorOrder + 0.101 + label = "Collect Frame Ranges" + hosts = ["nukestudio"] + + def process(self, instance): + + data = dict() + + # Timeline data. + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + + source_in_h = instance.data["sourceIn"] - handle_start + source_out_h = instance.data["sourceOut"] + handle_end + + timeline_in = instance.data["timelineIn"] + timeline_out = instance.data["timelineOut"] + + timeline_in_h = timeline_in - handle_start + timeline_out_h = timeline_out + handle_end + + # set frame start with tag or take it from timeline + frame_start = instance.data.get("frameStart") + + if not frame_start: + frame_start = timeline_in + + frame_end = frame_start + (timeline_out - timeline_in) + + data.update( + { + "sourceInH": source_in_h, + "sourceOutH": source_out_h, + "startFrame": frame_start, + "endFrame": frame_end, + "timelineInH": timeline_in_h, + "timelineOutH": timeline_out_h, + "edit_in": timeline_in, + "edit_out": timeline_out + } + ) + self.log.debug("__ data: {}".format(data)) + instance.data.update(data) diff --git a/pype/plugins/nukestudio/publish/collect_framerate.py b/pype/plugins/nukestudio/publish/collect_framerate.py index 56b76b5011..a0fd4df599 100644 --- a/pype/plugins/nukestudio/publish/collect_framerate.py +++ b/pype/plugins/nukestudio/publish/collect_framerate.py @@ -9,4 +9,4 @@ class CollectFramerate(api.ContextPlugin): def process(self, context): sequence = context.data["activeSequence"] - context.data["framerate"] = sequence.framerate().toFloat() + context.data["fps"] = sequence.framerate().toFloat() diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index 28b007b109..92149b7fc8 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -55,7 +55,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): self.log.debug("__ tags: {}".format(tags)) if not tags: - return + continue # loop trough all tags for t in tags: @@ -148,13 +148,13 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): self.log.debug("__ assets_shared: {}".format(assets_shared)) if assets_shared.get(asset): self.log.debug("Adding to shared assets: `{}`".format( - instance.data["name"])) + asset)) asset_shared = assets_shared.get(asset) else: asset_shared = assets_shared[asset] asset_shared.update({ - "asset": instance.data["asset"], + "asset": asset, "hierarchy": hierarchy, "parents": parents, "tasks": instance.data["tasks"] @@ -220,7 +220,11 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): # adding frame start if any on instance start_frame = s_asset_data.get("frameStart") if start_frame: - instance.data["frameStart"] = start_frame + instance.data["startFrame"] = start_frame + instance.data["endFrame"] = start_frame + ( + instance.data["timelineOut"] - + instance.data["timelineIn"]) + self.log.debug( @@ -249,14 +253,14 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): # get custom attributes of the shot if instance.data.get("main"): in_info['custom_attributes'] = { - 'handles': int(instance.data.get('handles')), + 'handles': int(instance.data.get('handles', 0)), 'handle_start': handle_start, 'handle_end': handle_end, - 'fstart': int(instance.data["startFrame"]), - 'fend': int(instance.data["endFrame"]), - 'fps': instance.data["fps"], - "edit_in": int(instance.data["startFrame"]), - "edit_out": int(instance.data["endFrame"]) + 'fstart': instance.data["startFrame"], + 'fend': instance.data["endFrame"], + 'fps': instance.context.data["fps"], + "edit_in": instance.data["timelineIn"], + "edit_out": instance.data["timelineOut"] } # adding SourceResolution if Tag was present @@ -273,15 +277,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): "pixel_aspect": pixel_aspect }) - start_frame = instance.data.get("frameStart") - if start_frame: - in_info['custom_attributes'].update({ - 'fstart': start_frame, - 'fend': start_frame + ( - instance.data["endFrame"] - - instance.data["startFrame"]) - }) - in_info['tasks'] = instance.data['tasks'] parents = instance.data.get('parents', []) diff --git a/pype/plugins/nukestudio/publish/collect_leader_clip.py b/pype/plugins/nukestudio/publish/collect_leader_clip.py new file mode 100644 index 0000000000..62ef420316 --- /dev/null +++ b/pype/plugins/nukestudio/publish/collect_leader_clip.py @@ -0,0 +1,24 @@ +from pyblish import api + + +class CollectLeaderClip(api.InstancePlugin): + """Collect Leader clip from selected track items. Clip with hierarchy Tag is defining sharable data attributes between other clips with `subset` tags. So `handle_start/end`, `frame_start`, etc""" + + order = api.CollectorOrder + 0.0111 + label = "Collect Leader Clip" + hosts = ["nukestudio"] + families = ['clip'] + + def process(self, instance): + # gets tags + tags = instance.data["tags"] + + for t in tags: + t_metadata = dict(t["metadata"]) + t_type = t_metadata.get("tag.label", "") + self.log.info("`hierarhy`: `{}`".format(t_type)) + # gets only task family tags and collect labels + if "hierarchy" in t_type.lower(): + if not instance.data.get("main"): + instance.data["main"] = True + self.log.info("`Leader Clip` found in instance.name: `{}`".format(instance.data["name"])) diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index abd02bfa78..0341fbfa6e 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -57,43 +57,8 @@ class CollectPlates(api.InstancePlugin): data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[1] ) - # # Timeline data. - # handle_start = int(instance.data["handleStart"] + data["handles"]) - # handle_end = int(instance.data["handleEnd"] + data["handles"]) - # Timeline data. - handle_start = int(instance.data["handleStart"]) - handle_end = int(instance.data["handleEnd"]) - - source_in_h = data["sourceIn"] - handle_start - source_out_h = data["sourceOut"] + handle_end - - timeline_in = int(data["item"].timelineIn()) - timeline_out = int(data["item"].timelineOut()) - - timeline_frame_start = timeline_in - handle_start - timeline_frame_end = timeline_out + handle_end - - frame_start = instance.data.get("frameStart", 1) - frame_end = frame_start + (data["sourceOut"] - data["sourceIn"]) - - data.update( - { - "sourceFirst": data["sourceFirst"], - "sourceIn": data["sourceIn"], - "sourceOut": data["sourceOut"], - "sourceInH": source_in_h, - "sourceOutH": source_out_h, - "frameStart": frame_start, - "startFrame": frame_start, - "endFrame": frame_end, - "timelineIn": timeline_in, - "timelineOut": timeline_out, - "timelineInHandles": timeline_frame_start, - "timelineOutHandles": timeline_frame_end, - "handleStart": handle_start, - "handleEnd": handle_end - } - ) + if "review" in instance.data["families"]: + data["label"] += " - review" # adding SourceResolution if Tag was present if instance.data.get("sourceResolution") and instance.data.get("main"): @@ -110,9 +75,6 @@ class CollectPlates(api.InstancePlugin): self.log.debug("Creating instance with name: {}".format(data["name"])) instance.context.create_instance(**data) - # # remove original instance - # instance.context.remove(instance) - class CollectPlatesData(api.InstancePlugin): """Collect plates""" @@ -124,6 +86,12 @@ class CollectPlatesData(api.InstancePlugin): def process(self, instance): import os + if "review" in instance.data.get("track", ""): + self.log.debug( + "Skipping \"{}\" because its `review` track " + "\"plate\"".format(instance) + ) + return # add to representations if not instance.data.get("representations"): @@ -135,9 +103,6 @@ class CollectPlatesData(api.InstancePlugin): padding = int(anatomy.templates['render']['padding']) name = instance.data["subset"] - asset = instance.data["asset"] - track = instance.data["track"] - version = instance.data["version"] source_path = instance.data["sourcePath"] source_file = os.path.basename(source_path) @@ -154,56 +119,20 @@ class CollectPlatesData(api.InstancePlugin): item = instance.data["item"] - # get handles - handle_start = int(instance.data["handleStart"]) - handle_end = int(instance.data["handleEnd"]) + transfer_data = [ + "handleStart", "handleEnd", "sourceIn", "sourceOut", "startFrame", "endFrame", "sourceInH", "sourceOutH", "timelineIn", "timelineOut", "timelineInH", "timelineOutH", "asset", "track", "version" + ] - # get source frames - source_in = int(instance.data["sourceIn"]) - source_out = int(instance.data["sourceOut"]) - - # get source frames - frame_start = int(instance.data["startFrame"]) - frame_end = int(instance.data["endFrame"]) - - # get source frames - source_in_h = int(instance.data["sourceInH"]) - source_out_h = int(instance.data["sourceOutH"]) - - # get timeline frames - timeline_in = int(instance.data["timelineIn"]) - timeline_out = int(instance.data["timelineOut"]) - - # frame-ranges with handles - timeline_frame_start = int(instance.data["timelineInHandles"]) - timeline_frame_end = int(instance.data["timelineOutHandles"]) - - # get colorspace - colorspace = item.sourceMediaColourTransform() - - # get sequence from context, and fps - fps = instance.data["fps"] + # pass data to version + version_data.update({k: instance.data[k] for k in transfer_data}) # add to data of representation version_data.update({ - "handles": handle_start, - "handleStart": handle_start, - "handleEnd": handle_end, - "sourceIn": source_in, - "sourceOut": source_out, - "startFrame": frame_start, - "endFrame": frame_end, - "timelineIn": timeline_in, - "timelineOut": timeline_out, - "timelineInHandles": timeline_frame_start, - "timelineOutHandles": timeline_frame_end, - "fps": fps, - "colorspace": colorspace, + "handles": version_data['handleStart'], + "colorspace": item.sourceMediaColourTransform(), "families": [f for f in families if 'ftrack' not in f], - "asset": asset, "subset": name, - "track": track, - "version": int(version) + "fps": instance.context.data["fps"] }) instance.data["versionData"] = version_data @@ -220,10 +149,9 @@ class CollectPlatesData(api.InstancePlugin): padding=padding, ext=ext ) - self.log.debug("__ source_in_h: {}".format(source_in_h)) - self.log.debug("__ source_out_h: {}".format(source_out_h)) - start_frame = source_first_frame + source_in_h - duration = source_out_h - source_in_h + + start_frame = source_first_frame + instance.data["sourceInH"] + duration = instance.data["sourceOutH"] - instance.data["sourceInH"] end_frame = start_frame + duration files = [file % i for i in range(start_frame, (end_frame + 1), 1)] except Exception as e: @@ -231,8 +159,8 @@ class CollectPlatesData(api.InstancePlugin): head, ext = os.path.splitext(source_file) ext = ext[1:] files = source_file - start_frame = source_in_h - end_frame = source_out_h + start_frame = instance.data["sourceInH"] + end_frame = instance.data["sourceOutH"] mov_file = head + ".mov" mov_path = os.path.normpath(os.path.join(staging_dir, mov_file)) @@ -243,9 +171,9 @@ class CollectPlatesData(api.InstancePlugin): 'files': mov_file, 'stagingDir': staging_dir, 'startFrame': 0, - 'endFrame': source_out - source_in + 1, + 'endFrame': instance.data["sourceOut"] - instance.data["sourceIn"] + 1, 'step': 1, - 'frameRate': fps, + 'frameRate': instance.context.data["fps"], 'preview': True, 'thumbnail': False, 'name': "preview", @@ -258,8 +186,8 @@ class CollectPlatesData(api.InstancePlugin): thumb_file = head + ".png" thumb_path = os.path.join(staging_dir, thumb_file) - self.log.debug("__ thumb_path: {}".format(thumb_path)) - thumbnail = item.thumbnail(source_in).save( + + thumbnail = item.thumbnail(instance.data["sourceIn"]).save( thumb_path, format='png' ) @@ -281,8 +209,8 @@ class CollectPlatesData(api.InstancePlugin): 'stagingDir': staging_dir, 'name': ext, 'ext': ext, - 'startFrame': frame_start - handle_start, - 'endFrame': frame_end + handle_end, + 'startFrame': instance.data["startFrame"] - instance.data["handleStart"], + 'endFrame': instance.data["endFrame"] + instance.data["handleEnd"], } instance.data["representations"].append(plates_representation) diff --git a/pype/plugins/nukestudio/publish/collect_remove_clip_instances.py b/pype/plugins/nukestudio/publish/collect_remove_clip_instances.py new file mode 100644 index 0000000000..d41dc50ab1 --- /dev/null +++ b/pype/plugins/nukestudio/publish/collect_remove_clip_instances.py @@ -0,0 +1,17 @@ +from pyblish import api + +class CollectClipSubsets(api.InstancePlugin): + """Collect Subsets from selected Clips, Tags, Preset.""" + + order = api.CollectorOrder + 0.103 + label = "Collect Remove Clip Instaces" + hosts = ["nukestudio"] + families = ['clip'] + + def process(self, instance): + context = instance.context + + # removing original instance + self.log.info("Removing instance.name: `{}`".format(instance.data["name"])) + + context.remove(instance) diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index 7b18c605a7..84b98b9445 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -13,7 +13,7 @@ class CollectReviews(api.InstancePlugin): """ # Run just before CollectSubsets - order = api.CollectorOrder + 0.1025 + order = api.CollectorOrder + 0.1022 label = "Collect Reviews" hosts = ["nukestudio"] families = ["clip"] @@ -41,30 +41,22 @@ class CollectReviews(api.InstancePlugin): ) return + # add to representations + if not instance.data.get("representations"): + instance.data["representations"] = list() + if track in instance.data["track"]: - self.log.debug("Track item on the track: {}".format( - instance.data["track"])) - # Collect data. - subset = "" - data = {} - for key, value in instance.data.iteritems(): - data[key] = value + self.log.debug("Review will work on `subset`: {}".format( + instance.data["subset"])) - data["family"] = family.lower() - data["ftrackFamily"] = "img" - data["families"] = ["ftrack"] + # change families + instance.data["family"] = "plate" + instance.data["families"] = ["review", "ftrack"] - data["subset"] = family.lower() + subset.title() - data["name"] = data["subset"] + "_" + data["asset"] + self.version_data(instance) + self.create_thumbnail(instance) - data["label"] = "{} - {}".format( - data['asset'], data["subset"] - ) - - data["source"] = data["sourcePath"] - - # self.log.debug("Creating instance with data: {}".format(data)) - instance.context.create_instance(**data) + rev_inst = instance else: self.log.debug("Track item on plateMain") @@ -80,35 +72,89 @@ class CollectReviews(api.InstancePlugin): "TrackItem from track name `{}` has to be also selected".format( track) ) - - # add to representations - if not instance.data.get("representations"): - instance.data["representations"] = list() - - self.log.debug("Instance review: {}".format(rev_inst.data["name"])) - - # getting file path parameters - file_path = rev_inst.data.get("sourcePath") - file_dir = os.path.dirname(file_path) - file = os.path.basename(file_path) - ext = os.path.splitext(file)[-1][1:] - - # adding annotation to lablel - instance.data["label"] += " + review (.{})".format(ext) instance.data["families"].append("review") - # adding representation for review mov - representation = { - "files": file, - "stagingDir": file_dir, - "startFrame": rev_inst.data.get("sourceIn"), - "endFrame": rev_inst.data.get("sourceOut"), - "step": 1, - "frameRate": rev_inst.data.get("fps"), - "preview": True, - "thumbnail": False, - "name": "preview", - "ext": ext - } - instance.data["representations"].append(representation) - self.log.debug("Added representation: {}".format(representation)) + file_path = rev_inst.data.get("sourcePath") + file_dir = os.path.dirname(file_path) + file = os.path.basename(file_path) + ext = os.path.splitext(file)[-1][1:] + + # change label + instance.data["label"] = "{0} - {1} - ({2}) - review".format( + instance.data['asset'], instance.data["subset"], ext + ) + + self.log.debug("Instance review: {}".format(rev_inst.data["name"])) + + + # adding representation for review mov + representation = { + "files": file, + "stagingDir": file_dir, + "startFrame": rev_inst.data.get("sourceIn"), + "endFrame": rev_inst.data.get("sourceOut"), + "step": 1, + "frameRate": rev_inst.data.get("fps"), + "preview": True, + "thumbnail": False, + "name": "preview", + "ext": ext + } + instance.data["representations"].append(representation) + + self.log.debug("Added representation: {}".format(representation)) + + def create_thumbnail(self, instance): + item = instance.data["item"] + source_in = instance.data["sourceIn"] + + source_path = instance.data["sourcePath"] + source_file = os.path.basename(source_path) + head, ext = os.path.splitext(source_file) + + # staging dir creation + staging_dir = os.path.dirname( + source_path) + + thumb_file = head + ".png" + thumb_path = os.path.join(staging_dir, thumb_file) + self.log.debug("__ thumb_path: {}".format(thumb_path)) + self.log.debug("__ source_in: {}".format(source_in)) + thumbnail = item.thumbnail(source_in).save( + thumb_path, + format='png' + ) + self.log.debug("__ thumbnail: {}".format(thumbnail)) + + thumb_representation = { + 'files': thumb_file, + 'stagingDir': staging_dir, + 'name': "thumbnail", + 'thumbnail': True, + 'ext': "png" + } + instance.data["representations"].append( + thumb_representation) + + def version_data(self, instance): + item = instance.data["item"] + + transfer_data = [ + "handleStart", "handleEnd", "sourceIn", "sourceOut", "startFrame", "endFrame", "sourceInH", "sourceOutH", "timelineIn", "timelineOut", "timelineInH", "timelineOutH", "asset", "track", "version" + ] + + version_data = dict() + # pass data to version + version_data.update({k: instance.data[k] for k in transfer_data}) + + # add to data of representation + version_data.update({ + "handles": version_data['handleStart'], + "colorspace": item.sourceMediaColourTransform(), + "families": instance.data["families"], + "subset": instance.data["subset"], + "fps": instance.context.data["fps"] + }) + instance.data["versionData"] = version_data + + instance.data["source"] = instance.data["sourcePath"] diff --git a/pype/plugins/nukestudio/publish/collect_selection.py b/pype/plugins/nukestudio/publish/collect_selection.py index e87f9d03ec..ec8d513de8 100644 --- a/pype/plugins/nukestudio/publish/collect_selection.py +++ b/pype/plugins/nukestudio/publish/collect_selection.py @@ -14,12 +14,12 @@ class CollectSelection(pyblish.api.ContextPlugin): self.log.debug("selection: {}".format(selection)) - if not selection: - self.log.debug( - "Nothing is selected. Collecting all items from sequence " - "\"{}\"".format(hiero.ui.activeSequence()) - ) - for track in hiero.ui.activeSequence().items(): - selection.extend(track.items()) + # if not selection: + # self.log.debug( + # "Nothing is selected. Collecting all items from sequence " + # "\"{}\"".format(hiero.ui.activeSequence()) + # ) + # for track in hiero.ui.activeSequence().items(): + # selection.extend(track.items()) context.data["selection"] = selection diff --git a/pype/plugins/nukestudio/publish/collect_shots.py b/pype/plugins/nukestudio/publish/collect_shots.py index 506020bbc3..9fc14536fb 100644 --- a/pype/plugins/nukestudio/publish/collect_shots.py +++ b/pype/plugins/nukestudio/publish/collect_shots.py @@ -5,7 +5,7 @@ class CollectShots(api.ContextPlugin): """Collect Shot from Clip.""" # Run just before CollectClipSubsets - order = api.CollectorOrder + 0.1025 + order = api.CollectorOrder + 0.1021 label = "Collect Shots" hosts = ["nukestudio"] families = ["clip"] @@ -25,55 +25,24 @@ class CollectShots(api.ContextPlugin): ) continue - if instance.data.get("main"): - # Collect data. - data = {} - for key, value in instance.data.iteritems(): - if key in "main": - continue - data[key] = value + # Collect data. + data = {} + for key, value in instance.data.iteritems(): + data[key] = value - data["family"] = "shot" - data["families"] = [] - data["frameStart"] = instance.data.get("frameStart", 1) + data["family"] = "shot" + data["families"] = [] - data["subset"] = data["family"] + "Main" + data["subset"] = data["family"] + "Main" - data["name"] = data["subset"] + "_" + data["asset"] + data["name"] = data["subset"] + "_" + data["asset"] - data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format( - data["tasks"], [x["name"] for x in data.get("assetbuilds", [])] - ) + data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format( + data["tasks"], [x["name"] for x in data.get("assetbuilds", [])] + ) - # Get handles. - data["handleStart"] = instance.data["handleStart"] - data["handleEnd"] = instance.data["handleEnd"] - - # Frame-ranges with handles. - data["sourceInH"] = data["sourceIn"] - data["handleStart"] - data["sourceOutH"] = data["sourceOut"] + data["handleEnd"] - - # Get timeline frames. - data["timelineIn"] = int(data["item"].timelineIn()) - data["timelineOut"] = int(data["item"].timelineOut()) - - # Frame-ranges with handles. - data["timelineInHandles"] = data["timelineIn"] - data["timelineInHandles"] -= data["handleStart"] - data["timelineOutHandles"] = data["timelineOut"] - data["timelineOutHandles"] += data["handleEnd"] - - # Creating comp frame range. - data["endFrame"] = ( - data["frameStart"] + (data["sourceOut"] - data["sourceIn"]) - ) - - # Get fps. - sequence = instance.context.data["activeSequence"] - data["fps"] = sequence.framerate() - - # Create instance. - self.log.debug("Creating instance with: {}".format(data["name"])) - instance.context.create_instance(**data) + # Create instance. + self.log.debug("Creating instance with: {}".format(data["name"])) + instance.context.create_instance(**data) self.log.debug("_ context: {}".format(context[:])) diff --git a/pype/plugins/nukestudio/publish/collect_subsets.py b/pype/plugins/nukestudio/publish/collect_subsets.py deleted file mode 100644 index 95476b4db7..0000000000 --- a/pype/plugins/nukestudio/publish/collect_subsets.py +++ /dev/null @@ -1,208 +0,0 @@ -from pyblish import api -from copy import deepcopy - - -class CollectClipSubsets(api.InstancePlugin): - """Collect Subsets from selected Clips, Tags, Preset.""" - - order = api.CollectorOrder + 0.103 - label = "Collect Subsets" - hosts = ["nukestudio"] - families = ['clip'] - - def process(self, instance): - context = instance.context - - asset_name = instance.data["asset"] - - # get all subsets from tags and match them with nks_presets > - # > looks to rules for tasks, subsets, representations - subsets_collection = self.get_subsets_from_presets(instance) - - # iterate trough subsets and create instances - for subset, attrs in subsets_collection.items(): - self.log.info((subset, attrs)) - # create families - item = instance.data["item"] - family = instance.data["family"] - families = attrs["families"] + [str(subset)] - task = attrs["task"] - subset = "{0}{1}".format( - subset, - instance.data.get("subsetType") or "Default") - instance_name = "{0}_{1}_{2}".format(asset_name, task, subset) - self.log.info("Creating instance with name: {}".format( - instance_name)) - - # get handles - handles = int(instance.data["handles"]) - handle_start = int(instance.data["handleStart"] + handles) - handle_end = int(instance.data["handleEnd"] + handles) - - # get source frames - source_first = int(instance.data["sourceFirst"]) - source_in = int(instance.data["sourceIn"]) - source_out = int(instance.data["sourceOut"]) - - # frame-ranges with handles - source_in_h = source_in - handle_start - source_out_h = source_out + handle_end - - # get timeline frames - timeline_in = int(item.timelineIn()) - timeline_out = int(item.timelineOut()) - - # frame-ranges with handles - timeline_frame_start = timeline_in - handle_start - timeline_frame_end = timeline_out + handle_end - - # creating comp frame range - frame_start = instance.data["frameStart"] - frame_end = frame_start + (source_out - source_in) - - # get sequence from context, and fps - sequence = context.data["activeSequence"] - fps = sequence.framerate() - - context.create_instance( - name=instance_name, - subset=subset, - asset=asset_name, - track=instance.data.get("track"), - item=item, - task=task, - sourcePath=instance.data.get("sourcePath"), - family=family, - families=families, - sourceFirst=source_first, - sourceIn=source_in, - sourceOut=source_out, - sourceInH=source_in_h, - sourceOutH=source_out_h, - frameStart=frame_start, - startFrame=frame_start, - endFrame=frame_end, - timelineIn=timeline_in, - timelineOut=timeline_out, - timelineInHandles=timeline_frame_start, - timelineOutHandles=timeline_frame_end, - fps=fps, - handles=instance.data["handles"], - handleStart=handle_start, - handleEnd=handle_end, - attributes=attrs, - version=instance.data["version"], - hierarchy=instance.data.get("hierarchy", None), - parents=instance.data.get("parents", None), - publish=True - ) - - # removing original instance - context.remove(instance) - - def get_subsets_from_presets(self, instance): - - family = instance.data["family"] - # get presets and tags - tag_tasks = instance.data["tasks"] - presets = instance.context.data['presets'] - nks_presets = presets[instance.context.data['host']] - family_default_preset = nks_presets["asset_default"].get(family) - - if family_default_preset: - frame_start = family_default_preset.get("fstart", 1) - instance.data["frameStart"] = int(frame_start) - - # get specific presets - pr_host_tasks = deepcopy( - nks_presets["rules_tasks"]).get("hostTasks", None) - - subsets_collect = dict() - # iterate tags and collect subset properities from presets - for task in tag_tasks: - self.log.info("__ task: {}".format(task)) - try: - # get host for task - host = None - host = [h for h, tasks in pr_host_tasks.items() - if task in tasks][0] - except IndexError: - pass - - try: - # get subsets for task - subsets = None - #subsets = pr_host_subsets[host] - except KeyError: - pass - - if not subsets: - continue - - # get subsets for task - for sub in subsets: - # get specific presets - pr_subsets = deepcopy(nks_presets["rules_subsets"]) - pr_representations = deepcopy( - nks_presets["rules_representations"]) - - # initialise collection dictionary - subs_data = dict() - - # gets subset properities - subs_data[sub] = None - subs_data[sub] = pr_subsets.get(sub, None) - - # gets representation if in keys - if subs_data[sub] and ( - "representation" in subs_data[sub].keys() - ): - repr_name = subs_data[sub]["representation"] - - # owerwrite representation key with values from preset - subs_data[sub]["representation"] = pr_representations[ - repr_name - ] - subs_data[sub]["representation"]["name"] = repr_name - - # gets nodes and presets data if in keys - # gets nodes if any - if subs_data[sub] and ( - "nodes" in subs_data[sub].keys() - ): - # iterate trough each node - for k in subs_data[sub]["nodes"]: - pr_node = k - pr_family = subs_data[sub]["nodes"][k]["family"] - - # create attribute dict for later filling - subs_data[sub]["nodes"][k]["attributes"] = dict() - - # iterate presets for the node - for p, path in subs_data[sub]["nodes"][k][ - "presets"].items(): - - # adds node type and family for preset path - nPath = path + [pr_node, pr_family] - - # create basic iternode to be wolked trough until - # found presets at the end - iternode = presets[p] - for part in nPath: - iternode = iternode[part] - - iternode = {k: v for k, v in iternode.items() - if not k.startswith("_")} - # adds found preset to attributes of the node - subs_data[sub]["nodes"][k][ - "attributes"].update(iternode) - - # removes preset key - subs_data[sub]["nodes"][k].pop("presets") - - # add all into dictionary - self.log.info("__ subs_data[sub]: {}".format(subs_data[sub])) - subs_data[sub]["task"] = task.lower() - subsets_collect.update(subs_data) - - return subsets_collect diff --git a/pype/plugins/nukestudio/publish/collect_tag_main.py b/pype/plugins/nukestudio/publish/collect_tag_main.py deleted file mode 100644 index 36d9b95554..0000000000 --- a/pype/plugins/nukestudio/publish/collect_tag_main.py +++ /dev/null @@ -1,32 +0,0 @@ -from pyblish import api - - -class CollectClipTagTypes(api.InstancePlugin): - """Collect Types from Tags of selected track items.""" - - order = api.CollectorOrder + 0.012 - label = "Collect main flag" - hosts = ["nukestudio"] - families = ['clip'] - - def process(self, instance): - # gets tags - tags = instance.data["tags"] - - for t in tags: - t_metadata = dict(t["metadata"]) - t_family = t_metadata.get("tag.family", "") - - # gets only task family tags and collect labels - if "plate" in t_family: - t_subset = t_metadata.get("tag.subset", "") - subset_name = "{0}{1}".format( - t_family, - t_subset.capitalize()) - - if "plateMain" in subset_name: - if not instance.data.get("main"): - instance.data["main"] = True - self.log.info("`plateMain` found in instance.name: `{}`".format( - instance.data["name"])) - return diff --git a/pype/plugins/nukestudio/publish/collect_tag_subsets.py b/pype/plugins/nukestudio/publish/collect_tag_subsets.py new file mode 100644 index 0000000000..0d42000896 --- /dev/null +++ b/pype/plugins/nukestudio/publish/collect_tag_subsets.py @@ -0,0 +1,28 @@ +from pyblish import api + + +class CollectClipSubsetsTags(api.InstancePlugin): + """Collect Subsets from Tags of selected track items.""" + + order = api.CollectorOrder + 0.012 + label = "Collect Tags Subsets" + hosts = ["nukestudio"] + families = ['clip'] + + def process(self, instance): + # gets tags + tags = instance.data["tags"] + + for t in tags: + t_metadata = dict(t["metadata"]) + t_family = t_metadata.get("tag.family", None) + t_subset = t_metadata.get("tag.subset", None) + + # gets only task family tags and collect labels + if t_subset and t_family: + subset_name = "{0}{1}".format( + t_family, + t_subset.capitalize()) + instance.data['subset'] = subset_name + + self.log.info("`subset`: {0} found in `instance.name`: `{1}`".format(subset_name, instance.data["name"]))