diff --git a/pype/maya/__init__.py b/pype/maya/__init__.py index 8bfc4c8ee5..99005a9452 100644 --- a/pype/maya/__init__.py +++ b/pype/maya/__init__.py @@ -114,16 +114,7 @@ def on_init(_): ) safe_deferred(override_component_mask_commands) - launch_workfiles = True - try: - presets = config.get_presets() - launch_workfiles = presets['tools']['workfiles']['start_on_app_launch'] - except KeyError: - log.info( - "Workfiles app start on launch configuration was not found." - " Defaulting to False." - ) - launch_workfiles = False + launch_workfiles = os.environ.get("WORKFILES_STARTUP") if launch_workfiles: safe_deferred(launch_workfiles_app) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py index 492271d844..a815d9ff9b 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py @@ -26,7 +26,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): 'render': 'render', 'nukescript': 'comp', 'review': 'mov', - 'plates': 'img' + 'plate': 'img' } def process(self, instance): diff --git a/pype/plugins/global/publish/collect_assumed_destination.py b/pype/plugins/global/publish/collect_assumed_destination.py index a04933b8c6..46c68bc199 100644 --- a/pype/plugins/global/publish/collect_assumed_destination.py +++ b/pype/plugins/global/publish/collect_assumed_destination.py @@ -9,7 +9,7 @@ class CollectAssumedDestination(pyblish.api.ContextPlugin): label = "Collect Assumed Destination" order = pyblish.api.CollectorOrder + 0.498 - exclude_families = ["clip"] + exclude_families = ["plate"] def process(self, context): diff --git a/pype/plugins/global/publish/collect_presets.py b/pype/plugins/global/publish/collect_presets.py index 58bb26e261..312b6b008a 100644 --- a/pype/plugins/global/publish/collect_presets.py +++ b/pype/plugins/global/publish/collect_presets.py @@ -18,12 +18,14 @@ class CollectPresets(api.ContextPlugin): presets["colorspace"] = presets["colorspace"][p_init["colorspace"]] presets["dataflow"] = presets["dataflow"][p_init["dataflow"]] except KeyError: - log.warning("No projects custom preset available...") + self.log.warning("No projects custom preset available...") presets["colorspace"] = presets["colorspace"]["default"] presets["dataflow"] = presets["dataflow"]["default"] - log.info("Presets `colorspace` and `dataflow` loaded from `default`...") + self.log.info( + "Presets `colorspace` and `dataflow` loaded from `default`..." + ) context.data["presets"] = presets - + self.log.info(context.data["presets"]) return diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index c330654e41..f5422990c2 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -60,7 +60,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "nukescript", "render", "write", - "plates" + "rig", + "plate" ] exclude_families = ["clip"] @@ -217,7 +218,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if 'transfers' not in instance.data: instance.data['transfers'] = [] - for idx, repre in enumerate(repres): + for idx, repre in enumerate(instance.data["representations"]): # Collection # _______ @@ -237,7 +238,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): template = os.path.normpath( anatomy.templates[template_name]["path"]) - if isinstance(files, list): src_collections, remainder = clique.assemble(files) self.log.debug( @@ -265,7 +265,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): dst_head = dst_collection.format("{head}") dst_tail = dst_collection.format("{tail}") - repres[idx]['published_path'] = dst_collection.format() + repre['published_path'] = dst_collection.format() for i in src_collection.indexes: src_padding = src_collection.format("{padding}") % i @@ -283,7 +283,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): hashes = '#' * len(dst_padding) dst = "{0}{1}{2}".format(dst_head, hashes, dst_tail) - else: # Single file # _______ @@ -308,7 +307,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): dst = anatomy_filled[template_name]["path"] instance.data["transfers"].append([src, dst]) - repres[idx]['published_path'] = dst + + repre['published_path'] = dst + self.log.debug("__ dst: {}".format(dst)) representation = { "schema": "pype:representation-2.0", @@ -339,8 +340,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): instance.data['destination_list'] = destination_list representations.append(representation) + for rep in instance.data["representations"]: + self.log.debug("__ represNAME: {}".format(rep['name'])) + self.log.debug("__ represPATH: {}".format(rep['published_path'])) io.insert_many(representations) - self.log.debug("Representation: {}".format(representations)) + # self.log.debug("Representation: {}".format(representations)) self.log.info("Registered {} items".format(len(representations))) diff --git a/pype/plugins/maya/load/load_rig.py b/pype/plugins/maya/load/load_rig.py index 66b086c861..9358d941db 100644 --- a/pype/plugins/maya/load/load_rig.py +++ b/pype/plugins/maya/load/load_rig.py @@ -36,8 +36,7 @@ class RigLoader(pype.maya.plugin.ReferenceLoader): groupReference=True, groupName=groupName) - cmds.makeIdentity(groupName, apply=False, rotate=True, - translate=True, scale=True) + cmds.xform(groupName, pivots=(0, 0, 0)) presets = config.get_presets(project=os.environ['AVALON_PROJECT']) colors = presets['plugins']['maya']['load']['colors'] diff --git a/pype/plugins/maya/publish/validate_rig_controllers.py b/pype/plugins/maya/publish/validate_rig_controllers.py index 15d74012d4..a9072e9d31 100644 --- a/pype/plugins/maya/publish/validate_rig_controllers.py +++ b/pype/plugins/maya/publish/validate_rig_controllers.py @@ -163,6 +163,10 @@ class ValidateRigControllers(pyblish.api.InstancePlugin): if locked: continue + # Ignore proxy connections. + if cmds.addAttr(plug, query=True, usedAsProxy=True): + continue + # Check for incoming connections if cmds.listConnections(plug, source=True, destination=False): invalid.append(plug) diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index 45e476554a..a757d929c5 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -75,8 +75,8 @@ def loader_shift(node, frame, relative=True): class LoadSequence(api.Loader): """Load image sequence into Nuke""" - families = ["write", "source", "plates"] - representations = ["exr", "dpx"] + families = ["write", "source", "plate"] + representations = ["exr", "dpx"] label = "Load sequence" order = -10 diff --git a/pype/plugins/nukestudio/publish/extract_plates.py b/pype/plugins/nukestudio/_unused/extract_plates.py similarity index 100% rename from pype/plugins/nukestudio/publish/extract_plates.py rename to pype/plugins/nukestudio/_unused/extract_plates.py diff --git a/pype/plugins/nukestudio/publish/extract_plates_waiting.py b/pype/plugins/nukestudio/_unused/extract_plates_waiting.py similarity index 100% rename from pype/plugins/nukestudio/publish/extract_plates_waiting.py rename to pype/plugins/nukestudio/_unused/extract_plates_waiting.py diff --git a/pype/plugins/nukestudio/publish/validate_viewer_lut.py b/pype/plugins/nukestudio/_unused/validate_viewer_lut.py similarity index 100% rename from pype/plugins/nukestudio/publish/validate_viewer_lut.py rename to pype/plugins/nukestudio/_unused/validate_viewer_lut.py diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index e0bc7ed99f..af542af7a5 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -1,10 +1,11 @@ +import os from pyblish import api class CollectClips(api.ContextPlugin): """Collect all Track items selection.""" - order = api.CollectorOrder + order = api.CollectorOrder + 0.01 label = "Collect Clips" hosts = ["nukestudio"] @@ -26,11 +27,19 @@ class CollectClips(api.ContextPlugin): source = item.source().mediaSource() source_path = source.firstpath() instance_name = "{0}_{1}".format(track.name(), item.name()) + + try: + head, padding, ext = os.path.basename(source_path).split('.') + source_first_frame = int(padding) + except: + source_first_frame = 0 + data[instance_name] = { "item": item, "source": source, "sourcePath": source_path, "track": track.name(), + "sourceFirst": source_first_frame, "sourceIn": int(item.sourceIn()), "sourceOut": int(item.sourceOut()), "startFrame": int(item.timelineIn()), @@ -47,6 +56,7 @@ class CollectClips(api.ContextPlugin): sourcePath=value["sourcePath"], family=family, families=[], + sourceFirst=value["sourceFirst"], sourceIn=value["sourceIn"], sourceOut=value["sourceOut"], startFrame=value["startFrame"], diff --git a/pype/plugins/nukestudio/publish/collect_current_file.py b/pype/plugins/nukestudio/publish/collect_current_file.py index 35da05e296..0c194e8d3d 100644 --- a/pype/plugins/nukestudio/publish/collect_current_file.py +++ b/pype/plugins/nukestudio/publish/collect_current_file.py @@ -6,6 +6,7 @@ class CollectCurrentFile(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder - 0.1 + def process(self, context): """Todo, inject the current working file""" diff --git a/pype/plugins/nukestudio/publish/collect_handles.py b/pype/plugins/nukestudio/publish/collect_handles.py index c148550a1a..81565b69b8 100644 --- a/pype/plugins/nukestudio/publish/collect_handles.py +++ b/pype/plugins/nukestudio/publish/collect_handles.py @@ -1,46 +1,44 @@ import json from pyblish import api -class CollectClipHandles(api.InstancePlugin): - """Collect Handles from selected track items.""" - order = api.CollectorOrder + 0.006 +class CollectClipHandles(api.ContextPlugin): + """Collect Handles from all instanes and add to assetShared.""" + + order = api.CollectorOrder + 0.101 label = "Collect Handles" hosts = ["nukestudio"] families = ['clip'] - def process(self, instance): - # gets tags - tags = instance.data["tags"] + def process(self, context): + assets_shared = context.data.get("assetsShared") + assert assets_shared, "Context data missing `assetsShared` key" - for t in tags: - t_metadata = dict(t["metadata"]) - t_family = t_metadata.get("tag.family", "") + # find all main types instances and add its handles to asset shared + instances = context[:] + for instance in instances: + # get handles + handles = int(instance.data["handles"]) + handle_start = int(instance.data["handleStart"]) + handle_end = int(instance.data["handleEnd"]) - # gets only task family tags and collect labels - if "handles" in t_family: - # gets value of handles - t_value = int(t_metadata.get("tag.value", "")) + if instance.data.get("main"): + name = instance.data["asset"] + if assets_shared.get(name): + self.log.debug("Adding to shared assets: `{}`".format( + instance.data["name"])) + assets_shared[name].update({ + "handles": handles, + "handleStart": handle_start, + "handleEnd": handle_end + }) - # gets arguments if there are any - t_args = t_metadata.get("tag.args", "") - - # distribute handles - if not t_args: - # add handles to both sides - instance.data['handles'] = t_value - self.log.info("Collected Handles: `{}`".format( - instance.data['handles'])) - else: - t_args = json.loads(t_args.replace("'", "\"")) - # add in start - if 'start' in t_args['where']: - instance.data["handleStart"] += t_value - self.log.info("Collected Handle Start: `{}`".format( - instance.data["handleStart"])) - - # add in end - if 'end' in t_args['where']: - instance.data["handleEnd"] += t_value - self.log.info("Collected Handle End: `{}`".format( - instance.data["handleEnd"])) + for instance in instances: + if not instance.data.get("main"): + self.log.debug("Synchronize handles on: `{}`".format( + instance.data["name"])) + name = instance.data["asset"] + s_asset_data = assets_shared.get(name) + instance.data["handles"] = s_asset_data["handles"] + instance.data["handleStart"] = s_asset_data["handleStart"] + instance.data["handleEnd"] = s_asset_data["handleEnd"] diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index 4426127eb4..13d49bf6fe 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -13,7 +13,7 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin): """ label = "Collect Hierarchy Clip" - order = pyblish.api.CollectorOrder + 0.1 + order = pyblish.api.CollectorOrder + 0.101 families = ["clip"] def convert_to_entity(self, key, value): @@ -38,8 +38,8 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin): asset = instance.data.get("asset") # create asset_names conversion table - if not context.data.get("assetsSharedHierarchy"): - context.data["assetsSharedHierarchy"] = dict() + if not context.data.get("assetsShared"): + context.data["assetsShared"] = dict() # build data for inner nukestudio project property data = { @@ -126,7 +126,7 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin): assert not hd, "Only one Hierarchy Tag is \ allowed. Clip: `{}`".format(asset) - assetsSharedHierarchy = { + assetsShared = { asset: { "asset": instance.data["asset"], "hierarchy": hierarchy, @@ -135,8 +135,8 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin): # add formated hierarchy path into instance data instance.data["hierarchy"] = hierarchy instance.data["parents"] = parents - context.data["assetsSharedHierarchy"].update( - assetsSharedHierarchy) + context.data["assetsShared"].update( + assetsShared) class CollectHierarchyContext(pyblish.api.ContextPlugin): @@ -170,8 +170,23 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): name = instance.data["asset"] - # inject assetsSharedHierarchy to other plates types - assets_shared = context.data.get("assetsSharedHierarchy") + # get handles + handles = int(instance.data["handles"]) + handle_start = int(instance.data["handleStart"] + handles) + handle_end = int(instance.data["handleEnd"] + handles) + + # get source frames + source_first = int(instance.data["sourceFirst"]) + source_in = int(instance.data["sourceIn"]) + source_out = int(instance.data["sourceOut"]) + + instance.data['startFrame'] = int( + source_first + source_in - handle_start) + instance.data['endFrame'] = int( + (source_first + source_out + handle_end)) + + # inject assetsShared to other plates types + assets_shared = context.data.get("assetsShared") if assets_shared: s_asset_data = assets_shared.get(name) @@ -180,6 +195,8 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): instance.data["parents"] = s_asset_data["parents"] instance.data["hierarchy"] = s_asset_data["hierarchy"] + if "main" not in instance.data["name"]: + continue in_info = {} # suppose that all instances are Shots @@ -187,11 +204,24 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): # get custom attributes of the shot in_info['custom_attributes'] = { - 'fend': instance.data['endFrame'], - 'fstart': instance.data['startFrame'], + 'handles': int(instance.data.get('handles')), + 'fend': int( + (source_first + source_out)), + 'fstart': int( + source_first + source_in), 'fps': context.data["framerate"] } + # handle_start = instance.data.get('handleStart') + # handle_end = instance.data.get('handleEnd') + # self.log.debug("__ handle_start: {}".format(handle_start)) + # self.log.debug("__ handle_end: {}".format(handle_end)) + # if handle_start and handle_end: + # in_info['custom_attributes'].update({ + # "handle_start": handle_start, + # "handle_end": handle_end + # }) + in_info['tasks'] = instance.data['tasks'] parents = instance.data.get('parents', []) diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index 28012afece..5971a99ff1 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -8,12 +8,10 @@ class CollectPlates(api.InstancePlugin): order = api.CollectorOrder + 0.49 label = "Extract Plates" hosts = ["nukestudio"] - families = ["plates"] + families = ["plate"] def process(self, instance): import os - import hiero.core - # from hiero.ui.nuke_bridge import FnNsFrameServer # add to representations if not instance.data.get("representations"): @@ -34,10 +32,9 @@ class CollectPlates(api.InstancePlugin): source_file = os.path.basename(source_path) # staging dir creation - staging_dir = instance.data['stagingDir'] = os.path.dirname( + staging_dir = os.path.dirname( source_path) - item = instance.data["item"] # get handles @@ -49,6 +46,10 @@ class CollectPlates(api.InstancePlugin): source_in = int(instance.data["sourceIn"]) source_out = int(instance.data["sourceOut"]) + # get source frames + frame_start = int(instance.data["startFrame"]) + frame_end = int(instance.data["endFrame"]) + # get source frames source_in_h = int(instance.data["sourceInH"]) source_out_h = int(instance.data["sourceOutH"]) @@ -61,7 +62,6 @@ class CollectPlates(api.InstancePlugin): timeline_frame_start = int(instance.data["timelineInHandles"]) timeline_frame_end = int(instance.data["timelineOutHandles"]) - # get colorspace colorspace = item.sourceMediaColourTransform() @@ -72,12 +72,18 @@ class CollectPlates(api.InstancePlugin): self.log.debug("__ handles: {}".format(handles)) self.log.debug("__ handle_start: {}".format(handle_start)) self.log.debug("__ handle_end: {}".format(handle_end)) + self.log.debug("__ frame_start: {}".format(frame_start)) + self.log.debug("__ frame_end: {}".format(frame_end)) + self.log.debug("__ f duration: {}".format(frame_end - frame_start + 1)) self.log.debug("__ source_in: {}".format(source_in)) self.log.debug("__ source_out: {}".format(source_out)) + self.log.debug("__ s duration: {}".format(source_out - source_in + 1)) self.log.debug("__ source_in_h: {}".format(source_in_h)) self.log.debug("__ source_out_h: {}".format(source_out_h)) + self.log.debug("__ sh duration: {}".format(source_out_h - source_in_h + 1)) self.log.debug("__ timeline_in: {}".format(timeline_in)) self.log.debug("__ timeline_out: {}".format(timeline_out)) + self.log.debug("__ t duration: {}".format(timeline_out - timeline_in + 1)) self.log.debug("__ timeline_frame_start: {}".format( timeline_frame_start)) self.log.debug("__ timeline_frame_end: {}".format(timeline_frame_end)) @@ -87,6 +93,12 @@ class CollectPlates(api.InstancePlugin): self.log.debug("__ source_file: {}".format(source_file)) self.log.debug("__ staging_dir: {}".format(staging_dir)) + self.log.debug("__ before family: {}".format(family)) + self.log.debug("__ before families: {}".format(families)) + # + # this is just workaround because 'clip' family is filtered + instance.data["family"] = families[-1] + instance.data["families"].append(family) # add to data of representation version_data.update({ @@ -95,14 +107,15 @@ class CollectPlates(api.InstancePlugin): "handleEnd": handle_end, "sourceIn": source_in, "sourceOut": source_out, + "startFrame": frame_start, + "endFrame": frame_end, "timelineIn": timeline_in, "timelineOut": timeline_out, "timelineInHandles": timeline_frame_start, "timelineOutHandles": timeline_frame_end, "fps": fps, "colorspace": colorspace, - "family": family, - "families": families, + "families": [f for f in families if 'ftrack' not in f], "asset": asset, "subset": name, "track": track, @@ -119,6 +132,8 @@ class CollectPlates(api.InstancePlugin): padding=padding, ext=ext ) + start_frame = source_first_frame + end_frame = source_first_frame + source_out files = [file % i for i in range( (source_first_frame + source_in_h), ((source_first_frame + source_out_h) + 1), 1)] @@ -127,7 +142,45 @@ class CollectPlates(api.InstancePlugin): head, ext = source_file.split('.') files = source_file + if isinstance(files, list): + mov_file = head + ".mov" + mov_path = os.path.normpath(os.path.join(staging_dir, mov_file)) + if os.path.exists(mov_path): + # adding mov into the representations + self.log.debug("__ mov_path: {}".format(mov_path)) + plates_mov_representation = { + 'files': mov_file, + 'stagingDir': staging_dir, + 'startFrame': 0, + 'endFrame': source_out - source_in + 1, + 'step': 1, + 'frameRate': fps, + 'preview': True, + 'thumbnail': False, + 'name': "preview", + 'ext': "mov", + } + instance.data["representations"].append( + plates_mov_representation) + thumb_file = head + ".png" + thumb_path = os.path.join(staging_dir, thumb_file) + self.log.debug("__ thumb_path: {}".format(thumb_path)) + thumbnail = item.thumbnail(source_in).save( + thumb_path, + format='png' + ) + self.log.debug("__ thumbnail: {}".format(thumbnail)) + + thumb_representation = { + 'files': thumb_file, + 'stagingDir': staging_dir, + 'name': "thumbnail", + 'thumbnail': True, + 'ext': "png" + } + instance.data["representations"].append( + thumb_representation) # adding representation for plates plates_representation = { @@ -138,11 +191,6 @@ class CollectPlates(api.InstancePlugin): } instance.data["representations"].append(plates_representation) - - # this is just workaround because 'clip' family is filtered - instance.data["family"] = families[-1] - instance.data["families"].append(family) - # testing families family = instance.data["family"] families = instance.data["families"] diff --git a/pype/plugins/nukestudio/publish/collect_subsets.py b/pype/plugins/nukestudio/publish/collect_subsets.py index 61a4a8322a..00f721ace2 100644 --- a/pype/plugins/nukestudio/publish/collect_subsets.py +++ b/pype/plugins/nukestudio/publish/collect_subsets.py @@ -40,6 +40,7 @@ class CollectClipSubsets(api.InstancePlugin): handle_end = int(instance.data["handleEnd"] + handles) # get source frames + source_first = int(instance.data["sourceFirst"]) source_in = int(instance.data["sourceIn"]) source_out = int(instance.data["sourceOut"]) @@ -56,9 +57,8 @@ class CollectClipSubsets(api.InstancePlugin): timeline_frame_end = timeline_out + handle_end # creating comp frame range - frame_start = instance.data["frameStart"] - handle_start - frame_end = frame_start + \ - (timeline_frame_end - timeline_frame_start) + frame_start = instance.data["frameStart"] + frame_end = frame_start + (source_out - source_in) # get sequence from context, and fps sequence = context.data["activeSequence"] @@ -74,6 +74,7 @@ class CollectClipSubsets(api.InstancePlugin): sourcePath=instance.data.get("sourcePath"), family=family, families=families, + sourceFirst=source_first, sourceIn=source_in, sourceOut=source_out, sourceInH=source_in_h, @@ -121,6 +122,7 @@ class CollectClipSubsets(api.InstancePlugin): subsets_collect = dict() # iterate tags and collect subset properities from presets for task in tag_tasks: + self.log.info("__ task: {}".format(task)) try: # get host for task host = None @@ -201,6 +203,7 @@ class CollectClipSubsets(api.InstancePlugin): subs_data[sub]["nodes"][k].pop("presets") # add all into dictionary + self.log.info("__ subs_data[sub]: {}".format(subs_data[sub])) subs_data[sub]["task"] = task.lower() subsets_collect.update(subs_data) diff --git a/pype/plugins/nukestudio/publish/collect_tag_handles.py b/pype/plugins/nukestudio/publish/collect_tag_handles.py new file mode 100644 index 0000000000..1d8ce05205 --- /dev/null +++ b/pype/plugins/nukestudio/publish/collect_tag_handles.py @@ -0,0 +1,47 @@ +import json +from pyblish import api + + +class CollectClipTagHandles(api.InstancePlugin): + """Collect Handles from selected track items.""" + + order = api.CollectorOrder + 0.006 + label = "Collect Tag Handles" + hosts = ["nukestudio"] + families = ['clip'] + + def process(self, instance): + # gets tags + tags = instance.data["tags"] + + for t in tags: + t_metadata = dict(t["metadata"]) + t_family = t_metadata.get("tag.family", "") + + # gets only task family tags and collect labels + if "handles" in t_family: + # gets value of handles + t_value = int(t_metadata.get("tag.value", "")) + + # gets arguments if there are any + t_args = t_metadata.get("tag.args", "") + + # distribute handles + if not t_args: + # add handles to both sides + instance.data['handles'] = t_value + self.log.info("Collected Handles: `{}`".format( + instance.data['handles'])) + else: + t_args = json.loads(t_args.replace("'", "\"")) + # add in start + if 'start' in t_args['where']: + instance.data["handleStart"] += t_value + self.log.info("Collected Handle Start: `{}`".format( + instance.data["handleStart"])) + + # add in end + if 'end' in t_args['where']: + instance.data["handleEnd"] += t_value + self.log.info("Collected Handle End: `{}`".format( + instance.data["handleEnd"])) diff --git a/pype/plugins/nukestudio/publish/collect_tag_tasks.py b/pype/plugins/nukestudio/publish/collect_tag_tasks.py index d48939db88..592559fc50 100644 --- a/pype/plugins/nukestudio/publish/collect_tag_tasks.py +++ b/pype/plugins/nukestudio/publish/collect_tag_tasks.py @@ -4,7 +4,7 @@ from pyblish import api class CollectClipTagTasks(api.InstancePlugin): """Collect Tags from selected track items.""" - order = api.CollectorOrder + 0.006 + order = api.CollectorOrder + 0.012 label = "Collect Tag Tasks" hosts = ["nukestudio"] families = ['clip'] diff --git a/pype/plugins/nukestudio/publish/collect_tag_types.py b/pype/plugins/nukestudio/publish/collect_tag_types.py index 7393df9cad..6889ddd81a 100644 --- a/pype/plugins/nukestudio/publish/collect_tag_types.py +++ b/pype/plugins/nukestudio/publish/collect_tag_types.py @@ -4,7 +4,7 @@ from pyblish import api class CollectClipTagTypes(api.InstancePlugin): """Collect Types from Tags of selected track items.""" - order = api.CollectorOrder + 0.007 + order = api.CollectorOrder + 0.012 label = "Collect Plate Type from Tag" hosts = ["nukestudio"] families = ['clip'] @@ -26,6 +26,9 @@ class CollectClipTagTypes(api.InstancePlugin): t_type.capitalize(), t_order) subset_names.append(subset_type) + if "main" in t_type: + instance.data["main"] = True + if subset_names: instance.data["subsetType"] = subset_names[0] diff --git a/pype/plugins/nukestudio/publish/collect_tags.py b/pype/plugins/nukestudio/publish/collect_tags.py index 8c1e12f2be..49005f4b22 100644 --- a/pype/plugins/nukestudio/publish/collect_tags.py +++ b/pype/plugins/nukestudio/publish/collect_tags.py @@ -4,7 +4,7 @@ from pyblish import api class CollectClipTags(api.InstancePlugin): """Collect Tags from selected track items.""" - order = api.CollectorOrder + 0.005 + order = api.CollectorOrder + 0.011 label = "Collect Tags" hosts = ["nukestudio"] families = ['clip'] diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 034484a442..4f89366190 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -9,6 +9,40 @@ log = pype.Logger().get_logger("BurninWrapper", "burninwrap") class ModifiedBurnins(ffmpeg_burnins.Burnins): + ''' + This is modification of OTIO FFmpeg Burnin adapter. + - requires FFmpeg in PATH + + Offers 6 positions for burnin text. Each can be set with: + - static text + - frames + - timecode + + Options - dictionary which sets the final look. + - Datatypes explanation: + string format must be supported by FFmpeg. + Examples: "#000000", "0x000000", "black" + must be accesible by ffmpeg = name of registered Font in system or path to font file. + Examples: "Arial", "C:/Windows/Fonts/arial.ttf" + + - Possible keys: + "opacity" - Opacity of text - + "bg_opacity" - Opacity of background (box around text) - + "bg_color" - Background color - + "bg_padding" - Background padding in pixels - + "x_offset" - offsets burnin vertically by entered pixels from border - + "y_offset" - offsets burnin horizontally by entered pixels from border - + - x_offset & y_offset should be set at least to same value as bg_padding!! + "font" - Font Family for text - + "font_size" - Font size in pixels - + "font_color" - Color of text - + "frame_offset" - Default start frame - + - required IF start frame is not set when using frames or timecode burnins + + On initializing class can be set General options through "options_init" arg. + General can be overriden when adding burnin + + ''' TOP_CENTERED = ffmpeg_burnins.TOP_CENTERED BOTTOM_CENTERED = ffmpeg_burnins.BOTTOM_CENTERED TOP_LEFT = ffmpeg_burnins.TOP_LEFT @@ -162,13 +196,79 @@ def example(input_path, output_path): burnin.render(output_path, overwrite=True) -def example_with_presets(input_path, output_path, data): +def burnins_from_data(input_path, output_path, data, overwrite=True): + ''' + This method adds burnins to video/image file based on presets setting. + Extension of output MUST be same as input. (mov -> mov, avi -> avi,...) + + :param input_path: full path to input file where burnins should be add + :type input_path: str + :param output_path: full path to output file where output will be rendered + :type output_path: str + :param data: data required for burnin settings (more info below) + :type data: dict + :param overwrite: output will be overriden if already exists, defaults to True + :type overwrite: bool + + Presets must be set separately. Should be dict with 2 keys: + - "options" - sets look of burnins - colors, opacity,...(more info: ModifiedBurnins doc) + - *OPTIONAL* default values are used when not included + - "burnins" - contains dictionary with burnins settings + - *OPTIONAL* burnins won't be added (easier is not to use this) + - each key of "burnins" represents Alignment, there are 6 possibilities: + TOP_LEFT TOP_CENTERED TOP_RIGHT + BOTTOM_LEFT BOTTOM_CENTERED BOTTOM_RIGHT + - value for each key is dict which should contain "function" which says + what kind of burnin is that: + "text", "timecode" or "frame_numbers" + - "text" key with content is also required when "text" function is used + + Requirement of *data* keys is based on presets. + - "start_frame" - is required when "timecode" or "frame_numbers" function is used + - "start_frame_tc" - when "timecode" should start with different frame + - *keys for static text* + + EXAMPLE: + preset = { + "options": {*OPTIONS FOR LOOK*}, + "burnins": { + "TOP_LEFT": { + "function": "text", + "text": "static_text" + }, + "TOP_RIGHT": { + "function": "text", + "text": "{shot}" + }, + "BOTTOM_LEFT": { + "function": "timecode" + }, + "BOTTOM_RIGHT": { + "function": "frame_numbers" + } + } + } + + For this preset we'll need at least this data: + data = { + "start_frame": 1001, + "shot": "sh0010" + } + + When Timecode should start from 1 then data need: + data = { + "start_frame": 1001, + "start_frame_tc": 1, + "shot": "sh0010" + } + ''' presets = config.get_presets().get('tools', {}).get('burnins', {}) options_init = presets.get('options') burnin = ModifiedBurnins(input_path, options_init=options_init) start_frame = data.get("start_frame") + start_frame_tc = data.get('start_frame_tc', start_frame) for align_text, preset in presets.get('burnins', {}).items(): align = None if align_text == 'TOP_LEFT': @@ -205,8 +305,8 @@ def example_with_presets(input_path, output_path, data): if bi_func == 'frame_numbers': burnin.add_frame_numbers(align, start_frame=start_frame) elif bi_func == 'timecode': - burnin.add_timecode(align, start_frame=start_frame) - elif: bi_func == 'text': + burnin.add_timecode(align, start_frame=start_frame_tc) + elif bi_func == 'text': if not preset.get('text'): log.error('Text is not set for text function burnin!') return @@ -218,7 +318,7 @@ def example_with_presets(input_path, output_path, data): ) return - burnin.render(output_path, overwrite=True) + burnin.render(output_path, overwrite=overwrite) '''