diff --git a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py index ce0dfeb244..7adc36f4b5 100644 --- a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py +++ b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py @@ -290,6 +290,11 @@ class StoreThumbnailsToAvalon(BaseAction): {"$set": {"data.thumbnail_id": thumbnail_id}} ) + self.db_con.update_one( + {"_id": avalon_asset["_id"]}, + {"$set": {"data.thumbnail_id": thumbnail_id}} + ) + action_job["status"] = "done" session.commit() diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index f08dc73c19..f5b4c4b8c3 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -236,6 +236,7 @@ class SyncEntitiesFactory: " from TypedContext where project_id is \"{}\"" ) ignore_custom_attr_key = "avalon_ignore_sync" + ignore_entity_types = ["milestone"] report_splitter = {"type": "label", "value": "---"} @@ -366,7 +367,10 @@ class SyncEntitiesFactory: parent_id = entity["parent_id"] entity_type = entity.entity_type entity_type_low = entity_type.lower() - if entity_type_low == "task": + if entity_type_low in self.ignore_entity_types: + continue + + elif entity_type_low == "task": entities_dict[parent_id]["tasks"].append(entity["name"]) continue diff --git a/pype/plugins/global/publish/collect_anatomy.py b/pype/plugins/global/publish/collect_anatomy.py index 0831c16d32..ae83e39513 100644 --- a/pype/plugins/global/publish/collect_anatomy.py +++ b/pype/plugins/global/publish/collect_anatomy.py @@ -18,7 +18,7 @@ Provides: import os import json -from avalon import io, api, lib +from avalon import api, lib from pypeapp import Anatomy import pyblish.api diff --git a/pype/plugins/global/publish/collect_instance_anatomy_data.py b/pype/plugins/global/publish/collect_instance_anatomy_data.py index 9c6a8b08f2..825c48dcf4 100644 --- a/pype/plugins/global/publish/collect_instance_anatomy_data.py +++ b/pype/plugins/global/publish/collect_instance_anatomy_data.py @@ -33,7 +33,6 @@ class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): order = pyblish.api.CollectorOrder + 0.49 label = "Collect instance anatomy data" - hosts = ["maya", "nuke", "standalonepublisher"] def process(self, instance): # get all the stuff from the database diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index 61e9e034f9..abd20bb9ea 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -3,7 +3,6 @@ import os import pyblish.api import clique import pype.api -import pype.lib class ExtractJpegEXR(pyblish.api.InstancePlugin): @@ -70,8 +69,6 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): proj_name = os.environ.get('AVALON_PROJECT', '__default__') profile = config_data.get(proj_name, config_data['__default__']) - ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") - jpeg_items = [] jpeg_items.append( os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg")) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index cdec90cb3d..4d63e2c641 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -43,320 +43,328 @@ class ExtractReview(pyblish.api.InstancePlugin): # filter out mov and img sequences representations_new = representations[:] for repre in representations: - if repre['ext'] in self.ext_filter: - tags = repre.get("tags", []) - - if "thumbnail" in tags: - continue - - self.log.info("Try repre: {}".format(repre)) - - if "review" in tags: - staging_dir = repre["stagingDir"] - - # iterating preset output profiles - for name, profile in output_profiles.items(): - repre_new = repre.copy() - ext = profile.get("ext", None) - p_tags = profile.get('tags', []) - self.log.info("p_tags: `{}`".format(p_tags)) - - # adding control for presets to be sequence - # or single file - is_sequence = ("sequence" in p_tags) and (ext in ( - "png", "jpg", "jpeg")) - - self.log.debug("Profile name: {}".format(name)) - - if not ext: - ext = "mov" - self.log.warning( - str("`ext` attribute not in output " - "profile. Setting to default ext: `mov`")) - - self.log.debug( - "instance.families: {}".format( - instance.data['families'])) - self.log.debug( - "profile.families: {}".format(profile['families'])) - - if any(item in instance.data['families'] for item in profile['families']): - if isinstance(repre["files"], list): - collections, remainder = clique.assemble( - repre["files"]) - - full_input_path = os.path.join( - staging_dir, collections[0].format( - '{head}{padding}{tail}') - ) - - filename = collections[0].format('{head}') - if filename.endswith('.'): - filename = filename[:-1] - else: - full_input_path = os.path.join( - staging_dir, repre["files"]) - filename = repre["files"].split(".")[0] - - repr_file = filename + "_{0}.{1}".format(name, ext) - full_output_path = os.path.join( - staging_dir, repr_file) - - if is_sequence: - filename_base = filename + "_{0}".format(name) - repr_file = filename_base + ".%08d.{0}".format( - ext) - repre_new["sequence_file"] = repr_file - full_output_path = os.path.join( - staging_dir, filename_base, repr_file) - - self.log.info("input {}".format(full_input_path)) - self.log.info("output {}".format(full_output_path)) - - new_tags = [x for x in tags if x != "delete"] - - # add families - [instance.data["families"].append(t) - for t in p_tags - if t not in instance.data["families"]] - - # add to - [new_tags.append(t) for t in p_tags - if t not in new_tags] - - self.log.info("new_tags: `{}`".format(new_tags)) - - input_args = [] - - # overrides output file - input_args.append("-y") - - # preset's input data - input_args.extend(profile.get('input', [])) - - # necessary input data - # adds start arg only if image sequence - if isinstance(repre["files"], list): - input_args.append( - "-start_number {0} -framerate {1}".format( - start_frame, fps)) - - input_args.append("-i {}".format(full_input_path)) - - for audio in instance.data.get("audio", []): - offset_frames = ( - instance.data.get("startFrameReview") - - audio["offset"] - ) - offset_seconds = offset_frames / fps - - if offset_seconds > 0: - input_args.append("-ss") - else: - input_args.append("-itsoffset") - - input_args.append(str(abs(offset_seconds))) - - input_args.extend( - ["-i", audio["filename"]] - ) - - # Need to merge audio if there are more - # than 1 input. - if len(instance.data["audio"]) > 1: - input_args.extend( - [ - "-filter_complex", - "amerge", - "-ac", - "2" - ] - ) - - output_args = [] - codec_args = profile.get('codec', []) - output_args.extend(codec_args) - # preset's output data - output_args.extend(profile.get('output', [])) - - # defining image ratios - resolution_ratio = float(resolution_width / ( - resolution_height * pixel_aspect)) - delivery_ratio = float(to_width) / float(to_height) - self.log.debug(resolution_ratio) - self.log.debug(delivery_ratio) - - # get scale factor - scale_factor = to_height / ( - resolution_height * pixel_aspect) - self.log.debug(scale_factor) - - # letter_box - lb = profile.get('letter_box', 0) - if lb != 0: - ffmpet_width = to_width - ffmpet_height = to_height - if "reformat" not in p_tags: - lb /= pixel_aspect - if resolution_ratio != delivery_ratio: - ffmpet_width = resolution_width - ffmpet_height = int( - resolution_height * pixel_aspect) - else: - if resolution_ratio != delivery_ratio: - lb /= scale_factor - else: - lb /= pixel_aspect - - output_args.append(str( - "-filter:v scale={0}x{1}:flags=lanczos," - "setsar=1,drawbox=0:0:iw:" - "round((ih-(iw*(1/{2})))/2):t=fill:" - "c=black,drawbox=0:ih-round((ih-(iw*(" - "1/{2})))/2):iw:round((ih-(iw*(1/{2})))" - "/2):t=fill:c=black").format( - ffmpet_width, ffmpet_height, lb)) - - # In case audio is longer than video. - output_args.append("-shortest") - - # output filename - output_args.append(full_output_path) - - self.log.debug( - "__ pixel_aspect: `{}`".format(pixel_aspect)) - self.log.debug( - "__ resolution_width: `{}`".format( - resolution_width)) - self.log.debug( - "__ resolution_height: `{}`".format( - resolution_height)) - - # scaling none square pixels and 1920 width - if "reformat" in p_tags: - if resolution_ratio < delivery_ratio: - self.log.debug("lower then delivery") - width_scale = int(to_width * scale_factor) - width_half_pad = int(( - to_width - width_scale)/2) - height_scale = to_height - height_half_pad = 0 - else: - self.log.debug("heigher then delivery") - width_scale = to_width - width_half_pad = 0 - scale_factor = float(to_width) / float( - resolution_width) - self.log.debug(scale_factor) - height_scale = int( - resolution_height * scale_factor) - height_half_pad = int( - (to_height - height_scale)/2) - - self.log.debug( - "__ width_scale: `{}`".format(width_scale)) - self.log.debug( - "__ width_half_pad: `{}`".format( - width_half_pad)) - self.log.debug( - "__ height_scale: `{}`".format( - height_scale)) - self.log.debug( - "__ height_half_pad: `{}`".format( - height_half_pad)) - - scaling_arg = str( - "scale={0}x{1}:flags=lanczos," - "pad={2}:{3}:{4}:{5}:black,setsar=1" - ).format(width_scale, height_scale, - to_width, to_height, - width_half_pad, - height_half_pad - ) - - vf_back = self.add_video_filter_args( - output_args, scaling_arg) - # add it to output_args - output_args.insert(0, vf_back) - - # baking lut file application - lut_path = instance.data.get("lutPath") - if lut_path and ("bake-lut" in p_tags): - # removing Gama info as it is all baked in lut - gamma = next((g for g in input_args - if "-gamma" in g), None) - if gamma: - input_args.remove(gamma) - - # create lut argument - lut_arg = "lut3d=file='{}'".format( - lut_path.replace( - "\\", "/").replace(":/", "\\:/") - ) - lut_arg += ",colormatrix=bt601:bt709" - - vf_back = self.add_video_filter_args( - output_args, lut_arg) - # add it to output_args - output_args.insert(0, vf_back) - self.log.info("Added Lut to ffmpeg command") - self.log.debug( - "_ output_args: `{}`".format(output_args)) - - if is_sequence: - stg_dir = os.path.dirname(full_output_path) - - if not os.path.exists(stg_dir): - self.log.debug( - "creating dir: {}".format(stg_dir)) - os.mkdir(stg_dir) - - ffmpeg_path = ( - pype.lib.get_ffmpeg_tool_path("ffmpeg") - ) - mov_args = [ - ffmpeg_path, - " ".join(input_args), - " ".join(output_args) - ] - subprcs_cmd = " ".join(mov_args) - - # run subprocess - self.log.debug("Executing: {}".format(subprcs_cmd)) - output = pype.api.subprocess(subprcs_cmd) - self.log.debug("Output: {}".format(output)) - - # create representation data - repre_new.update({ - 'name': name, - 'ext': ext, - 'files': repr_file, - "tags": new_tags, - "outputName": name, - "codec": codec_args, - "_profile": profile, - "resolutionHeight": resolution_height, - "resolutionWidth": resolution_width, - }) - if is_sequence: - repre_new.update({ - "stagingDir": stg_dir, - "files": os.listdir(stg_dir) - }) - - if repre_new.get('preview'): - repre_new.pop("preview") - if repre_new.get('thumbnail'): - repre_new.pop("thumbnail") - - # adding representation - self.log.debug("Adding: {}".format(repre_new)) - representations_new.append(repre_new) - else: - continue - else: + if repre['ext'] not in self.ext_filter: continue + tags = repre.get("tags", []) + + if "thumbnail" in tags: + continue + + self.log.info("Try repre: {}".format(repre)) + + if "review" not in tags: + continue + + staging_dir = repre["stagingDir"] + + # iterating preset output profiles + for name, profile in output_profiles.items(): + repre_new = repre.copy() + ext = profile.get("ext", None) + p_tags = profile.get('tags', []) + self.log.info("p_tags: `{}`".format(p_tags)) + + # adding control for presets to be sequence + # or single file + is_sequence = ("sequence" in p_tags) and (ext in ( + "png", "jpg", "jpeg")) + + self.log.debug("Profile name: {}".format(name)) + + if not ext: + ext = "mov" + self.log.warning( + str("`ext` attribute not in output " + "profile. Setting to default ext: `mov`")) + + self.log.debug( + "instance.families: {}".format( + instance.data['families'])) + self.log.debug( + "profile.families: {}".format(profile['families'])) + + profile_family_check = False + for _family in profile['families']: + if _family in instance.data['families']: + profile_family_check = True + break + + if not profile_family_check: + continue + + if isinstance(repre["files"], list): + collections, remainder = clique.assemble( + repre["files"]) + + full_input_path = os.path.join( + staging_dir, collections[0].format( + '{head}{padding}{tail}') + ) + + filename = collections[0].format('{head}') + if filename.endswith('.'): + filename = filename[:-1] + else: + full_input_path = os.path.join( + staging_dir, repre["files"]) + filename = repre["files"].split(".")[0] + + repr_file = filename + "_{0}.{1}".format(name, ext) + full_output_path = os.path.join( + staging_dir, repr_file) + + if is_sequence: + filename_base = filename + "_{0}".format(name) + repr_file = filename_base + ".%08d.{0}".format( + ext) + repre_new["sequence_file"] = repr_file + full_output_path = os.path.join( + staging_dir, filename_base, repr_file) + + self.log.info("input {}".format(full_input_path)) + self.log.info("output {}".format(full_output_path)) + + new_tags = [x for x in tags if x != "delete"] + + # add families + [instance.data["families"].append(t) + for t in p_tags + if t not in instance.data["families"]] + + # add to + [new_tags.append(t) for t in p_tags + if t not in new_tags] + + self.log.info("new_tags: `{}`".format(new_tags)) + + input_args = [] + + # overrides output file + input_args.append("-y") + + # preset's input data + input_args.extend(profile.get('input', [])) + + # necessary input data + # adds start arg only if image sequence + if isinstance(repre["files"], list): + input_args.append( + "-start_number {0} -framerate {1}".format( + start_frame, fps)) + + input_args.append("-i {}".format(full_input_path)) + + for audio in instance.data.get("audio", []): + offset_frames = ( + instance.data.get("startFrameReview") - + audio["offset"] + ) + offset_seconds = offset_frames / fps + + if offset_seconds > 0: + input_args.append("-ss") + else: + input_args.append("-itsoffset") + + input_args.append(str(abs(offset_seconds))) + + input_args.extend( + ["-i", audio["filename"]] + ) + + # Need to merge audio if there are more + # than 1 input. + if len(instance.data["audio"]) > 1: + input_args.extend( + [ + "-filter_complex", + "amerge", + "-ac", + "2" + ] + ) + + output_args = [] + codec_args = profile.get('codec', []) + output_args.extend(codec_args) + # preset's output data + output_args.extend(profile.get('output', [])) + + # defining image ratios + resolution_ratio = float(resolution_width / ( + resolution_height * pixel_aspect)) + delivery_ratio = float(to_width) / float(to_height) + self.log.debug(resolution_ratio) + self.log.debug(delivery_ratio) + + # get scale factor + scale_factor = to_height / ( + resolution_height * pixel_aspect) + self.log.debug(scale_factor) + + # letter_box + lb = profile.get('letter_box', 0) + if lb != 0: + ffmpet_width = to_width + ffmpet_height = to_height + if "reformat" not in p_tags: + lb /= pixel_aspect + if resolution_ratio != delivery_ratio: + ffmpet_width = resolution_width + ffmpet_height = int( + resolution_height * pixel_aspect) + else: + if resolution_ratio != delivery_ratio: + lb /= scale_factor + else: + lb /= pixel_aspect + + output_args.append(str( + "-filter:v scale={0}x{1}:flags=lanczos," + "setsar=1,drawbox=0:0:iw:" + "round((ih-(iw*(1/{2})))/2):t=fill:" + "c=black,drawbox=0:ih-round((ih-(iw*(" + "1/{2})))/2):iw:round((ih-(iw*(1/{2})))" + "/2):t=fill:c=black").format( + ffmpet_width, ffmpet_height, lb)) + + # In case audio is longer than video. + output_args.append("-shortest") + + # output filename + output_args.append(full_output_path) + + self.log.debug( + "__ pixel_aspect: `{}`".format(pixel_aspect)) + self.log.debug( + "__ resolution_width: `{}`".format( + resolution_width)) + self.log.debug( + "__ resolution_height: `{}`".format( + resolution_height)) + + # scaling none square pixels and 1920 width + if "reformat" in p_tags: + if resolution_ratio < delivery_ratio: + self.log.debug("lower then delivery") + width_scale = int(to_width * scale_factor) + width_half_pad = int(( + to_width - width_scale)/2) + height_scale = to_height + height_half_pad = 0 + else: + self.log.debug("heigher then delivery") + width_scale = to_width + width_half_pad = 0 + scale_factor = float(to_width) / float( + resolution_width) + self.log.debug(scale_factor) + height_scale = int( + resolution_height * scale_factor) + height_half_pad = int( + (to_height - height_scale)/2) + + self.log.debug( + "__ width_scale: `{}`".format(width_scale)) + self.log.debug( + "__ width_half_pad: `{}`".format( + width_half_pad)) + self.log.debug( + "__ height_scale: `{}`".format( + height_scale)) + self.log.debug( + "__ height_half_pad: `{}`".format( + height_half_pad)) + + scaling_arg = str( + "scale={0}x{1}:flags=lanczos," + "pad={2}:{3}:{4}:{5}:black,setsar=1" + ).format(width_scale, height_scale, + to_width, to_height, + width_half_pad, + height_half_pad + ) + + vf_back = self.add_video_filter_args( + output_args, scaling_arg) + # add it to output_args + output_args.insert(0, vf_back) + + # baking lut file application + lut_path = instance.data.get("lutPath") + if lut_path and ("bake-lut" in p_tags): + # removing Gama info as it is all baked in lut + gamma = next((g for g in input_args + if "-gamma" in g), None) + if gamma: + input_args.remove(gamma) + + # create lut argument + lut_arg = "lut3d=file='{}'".format( + lut_path.replace( + "\\", "/").replace(":/", "\\:/") + ) + lut_arg += ",colormatrix=bt601:bt709" + + vf_back = self.add_video_filter_args( + output_args, lut_arg) + # add it to output_args + output_args.insert(0, vf_back) + self.log.info("Added Lut to ffmpeg command") + self.log.debug( + "_ output_args: `{}`".format(output_args)) + + if is_sequence: + stg_dir = os.path.dirname(full_output_path) + + if not os.path.exists(stg_dir): + self.log.debug( + "creating dir: {}".format(stg_dir)) + os.mkdir(stg_dir) + + mov_args = [ + os.path.join( + os.environ.get( + "FFMPEG_PATH", + ""), "ffmpeg"), + " ".join(input_args), + " ".join(output_args) + ] + subprcs_cmd = " ".join(mov_args) + + # run subprocess + self.log.debug("Executing: {}".format(subprcs_cmd)) + output = pype.api.subprocess(subprcs_cmd) + self.log.debug("Output: {}".format(output)) + + # create representation data + repre_new.update({ + 'name': name, + 'ext': ext, + 'files': repr_file, + "tags": new_tags, + "outputName": name, + "codec": codec_args, + "_profile": profile, + "resolutionHeight": resolution_height, + "resolutionWidth": resolution_width, + }) + if is_sequence: + repre_new.update({ + "stagingDir": stg_dir, + "files": os.listdir(stg_dir) + }) + + if repre_new.get('preview'): + repre_new.pop("preview") + if repre_new.get('thumbnail'): + repre_new.pop("thumbnail") + + # adding representation + self.log.debug("Adding: {}".format(repre_new)) + representations_new.append(repre_new) + for repre in representations_new: if "delete" in repre.get("tags", []): representations_new.remove(repre) diff --git a/pype/plugins/nukestudio/publish/collect_audio.py b/pype/plugins/nukestudio/publish/collect_audio.py index 61419b1ad9..727d7da795 100644 --- a/pype/plugins/nukestudio/publish/collect_audio.py +++ b/pype/plugins/nukestudio/publish/collect_audio.py @@ -1,5 +1,5 @@ from pyblish import api - +import os class CollectAudio(api.InstancePlugin): """Collect audio from tags. @@ -12,7 +12,7 @@ class CollectAudio(api.InstancePlugin): """ # Run just before CollectSubsets - order = api.CollectorOrder + 0.1025 + order = api.CollectorOrder + 0.1021 label = "Collect Audio" hosts = ["nukestudio"] families = ["clip"] @@ -21,8 +21,10 @@ class CollectAudio(api.InstancePlugin): # Exclude non-tagged instances. tagged = False for tag in instance.data["tags"]: - family = dict(tag["metadata"]).get("tag.family", "") + tag_data = dict(tag["metadata"]) + family = tag_data.get("tag.family", "") if family.lower() == "audio": + subset = tag_data.get("tag.subset", "Main") tagged = True if not tagged: @@ -40,14 +42,14 @@ class CollectAudio(api.InstancePlugin): data["family"] = "audio" data["families"] = ["ftrack"] - subset = "" - for tag in instance.data["tags"]: - tag_data = dict(tag["metadata"]) - if "tag.subset" in tag_data: - subset = tag_data["tag.subset"] data["subset"] = "audio" + subset.title() data["source"] = data["sourcePath"] + data["label"] = "{} - {} - ({})".format( + data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[ + 1] + ) + self.log.debug("Creating instance with data: {}".format(data)) instance.context.create_instance(**data) diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index e1a5645227..b8654b0784 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -1,7 +1,7 @@ import os from pyblish import api - +import hiero import nuke class CollectClips(api.ContextPlugin): @@ -48,7 +48,9 @@ class CollectClips(api.ContextPlugin): track = item.parent() source = item.source().mediaSource() source_path = source.firstpath() - effects = [f for f in item.linkedItems() if f.isEnabled()] + effects = [f for f in item.linkedItems() + if f.isEnabled() + if isinstance(f, hiero.core.EffectTrackItem)] # If source is *.nk its a comp effect and we need to fetch the # write node output. This should be improved by parsing the script diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index b98eccce7f..acdc5193ae 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -14,7 +14,7 @@ class CollectPlates(api.InstancePlugin): """ # Run just before CollectSubsets - order = api.CollectorOrder + 0.1025 + order = api.CollectorOrder + 0.1021 label = "Collect Plates" hosts = ["nukestudio"] families = ["clip"] @@ -23,8 +23,10 @@ class CollectPlates(api.InstancePlugin): # Exclude non-tagged instances. tagged = False for tag in instance.data["tags"]: - family = dict(tag["metadata"]).get("tag.family", "") + tag_data = dict(tag["metadata"]) + family = tag_data.get("tag.family", "") if family.lower() == "plate": + subset = tag_data.get("tag.subset", "Main") tagged = True break @@ -34,29 +36,27 @@ class CollectPlates(api.InstancePlugin): "\"plate\"".format(instance) ) return + self.log.debug("__ subset: `{}`".format(instance.data["subset"])) + # if "audio" in instance.data["subset"]: + # return # Collect data. data = {} for key, value in instance.data.iteritems(): data[key] = value + self.log.debug("__ family: `{}`".format(family)) + self.log.debug("__ subset: `{}`".format(subset)) + data["family"] = family.lower() data["families"] = ["ftrack"] + instance.data["families"][1:] data["source"] = data["sourcePath"] - - subset = "" - for tag in instance.data["tags"]: - tag_data = dict(tag["metadata"]) - if "tag.subset" in tag_data: - subset = tag_data["tag.subset"] - data["subset"] = data["family"] + subset.title() - + data["subset"] = family + subset.title() data["name"] = data["subset"] + "_" + data["asset"] data["label"] = "{} - {} - ({})".format( - data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[ - 1] - ) + data['asset'], data["subset"], os.path.splitext( + data["sourcePath"])[1]) if "review" in instance.data["families"]: data["label"] += " - review" @@ -146,6 +146,7 @@ class CollectPlatesData(api.InstancePlugin): head, padding = os.path.splitext(basename) ext = ext[1:] padding = padding[1:] + self.log.debug("_ padding: `{}`".format(padding)) # head, padding, ext = source_file.split('.') source_first_frame = int(padding) padding = len(padding) diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index f223e5ca65..af8fd4a0e7 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -16,7 +16,7 @@ class CollectReviews(api.InstancePlugin): order = api.CollectorOrder + 0.1022 label = "Collect Reviews" hosts = ["nukestudio"] - families = ["clip"] + families = ["plate"] def process(self, instance): # Exclude non-tagged instances. diff --git a/pype/plugins/nukestudio/publish/extract_audio.py b/pype/plugins/nukestudio/publish/extract_audio.py index 315ba6784d..2c4afc8412 100644 --- a/pype/plugins/nukestudio/publish/extract_audio.py +++ b/pype/plugins/nukestudio/publish/extract_audio.py @@ -10,8 +10,6 @@ class ExtractAudioFile(pype.api.Extractor): hosts = ["nukestudio"] families = ["clip", "audio"] match = api.Intersection - optional = True - active = False def process(self, instance): import os