diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index 775b8c0332..4eaf28eae4 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -204,7 +204,7 @@ def import_to_avalon( except Exception: mongo_id = '' - if mongo_id is not '': + if mongo_id != '': avalon_asset = database[project_name].find_one( {'_id': ObjectId(mongo_id)} ) @@ -319,7 +319,7 @@ def import_to_avalon( {'$set': { 'name': name, 'silo': silo, - 'data': data, + 'data': enter_data, 'parent': ObjectId(projectId) }}) @@ -340,26 +340,26 @@ def get_avalon_attr(session): def changeability_check_childs(entity): - if (entity.entity_type.lower() != 'task' and 'children' not in entity): - return True - childs = entity['children'] - for child in childs: - if child.entity_type.lower() == 'task': - config = get_config_data() - if 'sync_to_avalon' in config: - config = config['sync_to_avalon'] - if 'statuses_name_change' in config: - available_statuses = config['statuses_name_change'] - else: - available_statuses = [] - ent_status = child['status']['name'].lower() - if ent_status not in available_statuses: - return False - # If not task go deeper - elif changeability_check_childs(child) is False: - return False - # If everything is allright + if (entity.entity_type.lower() != 'task' and 'children' not in entity): return True + childs = entity['children'] + for child in childs: + if child.entity_type.lower() == 'task': + config = get_config_data() + if 'sync_to_avalon' in config: + config = config['sync_to_avalon'] + if 'statuses_name_change' in config: + available_statuses = config['statuses_name_change'] + else: + available_statuses = [] + ent_status = child['status']['name'].lower() + if ent_status not in available_statuses: + return False + # If not task go deeper + elif changeability_check_childs(child) is False: + return False + # If everything is allright + return True def get_data(entity, session, custom_attributes): @@ -489,11 +489,11 @@ def get_project_config(entity): return config + def get_tasks(project): - return [ - {'name': task_type['name']} for task_type in project[ - 'project_schema']['_task_type_schema']['types'] - ] + task_types = project['project_schema']['_task_type_schema']['types'] + return [{'name': task_type['name']} for task_type in task_types] + def get_project_apps(entity): """ Get apps from project @@ -568,6 +568,7 @@ def get_config_data(): return data + def show_errors(obj, event, errors): title = 'Hey You! You raised few Errors! (*look below*)' items = [] diff --git a/pype/maya/lib.py b/pype/maya/lib.py index 74e2359d38..fce1772b8e 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -16,6 +16,7 @@ import maya.api.OpenMaya as om from avalon import api, maya, io, pipeline from avalon.vendor.six import string_types import avalon.maya.lib +import avalon.maya.interactive from pype import lib @@ -1873,6 +1874,9 @@ def set_context_settings(): set_scene_resolution(width, height) + # Set frame range. + avalon.maya.interactive.reset_frame_range() + # Valid FPS def validate_fps(): diff --git a/pype/nukestudio/__init__.py b/pype/nukestudio/__init__.py index 139937a365..ef9f639d49 100644 --- a/pype/nukestudio/__init__.py +++ b/pype/nukestudio/__init__.py @@ -1,16 +1,15 @@ import os -import sys from avalon.tools import workfiles from avalon import api as avalon from pyblish import api as pyblish from .. import api - from .menu import ( install as menu_install, _update_menu_task_label ) +from .tags import add_tags_from_presets from pypeapp import Logger @@ -67,6 +66,15 @@ def install(config): "kAfterNewProjectCreated", launch_workfiles_app ) + # Add tags on project load. + hiero.core.events.registerInterest( + "kAfterProjectLoad", add_tags + ) + + +def add_tags(event): + add_tags_from_presets() + def launch_workfiles_app(event): workfiles.show(os.environ["AVALON_WORKDIR"]) diff --git a/pype/nukestudio/tags.py b/pype/nukestudio/tags.py index e6c29a4f4e..227210d6bf 100644 --- a/pype/nukestudio/tags.py +++ b/pype/nukestudio/tags.py @@ -1,14 +1,15 @@ -import hiero import re + from pypeapp import ( config, Logger ) +from avalon import io + +import hiero log = Logger().get_logger(__name__, "nukestudio") -_hierarchy_orig = 'hierarchy_orig' - def create_tag(key, value): """ @@ -36,10 +37,10 @@ def update_tag(tag, value): value (dict): parameters of tag """ - tag.setNote(value['note']) - tag.setIcon(str(value['icon']['path'])) + tag.setNote(value["note"]) + tag.setIcon(str(value["icon"]["path"])) mtd = tag.metadata() - pres_mtd = value.get('metadata', None) + pres_mtd = value.get("metadata", None) if pres_mtd: [mtd.setValue("tag.{}".format(str(k)), str(v)) for k, v in pres_mtd.items()] @@ -56,9 +57,40 @@ def add_tags_from_presets(): presets = config.get_presets() # get nukestudio tag.json from presets - nks_pres = presets['nukestudio'] + nks_pres = presets["nukestudio"] nks_pres_tags = nks_pres.get("tags", None) + # Get project task types. + tasks = io.find_one({"type": "project"})["config"]["tasks"] + nks_pres_tags["[Tasks]"] = {} + for task in tasks: + nks_pres_tags["[Tasks]"][task["name"]] = { + "editable": "1", + "note": "", + "icon": { + "path": "icons:TagGood.png" + }, + "metadata": { + "family": "task" + } + } + + # Get project assets. Currently Ftrack specific to differentiate between + # asset builds and shots. + nks_pres_tags["[AssetBuilds]"] = {} + for asset in io.find({"type": "asset"}): + if asset["data"]["entityType"] == "AssetBuild": + nks_pres_tags["[AssetBuilds]"][asset["name"]] = { + "editable": "1", + "note": "", + "icon": { + "path": "icons:TagActor.png" + }, + "metadata": { + "family": "assetbuild" + } + } + # get project and root bin object project = hiero.core.projects()[-1] root_bin = project.tagsBin() @@ -108,24 +140,10 @@ def add_tags_from_presets(): else: # check if Hierarchy in name # update Tag if already exists - tag_names = [tg.name().lower() for tg in tags] for _t in tags: - if 'hierarchy' not in _t.name().lower(): - # update only non hierarchy tags - # because hierarchy could be edited - update_tag(_t, _val) - elif _hierarchy_orig in _t.name().lower(): - # if hierarchy_orig already exists just - # sync with preset - update_tag(_t, _val) - else: - # if history tag already exist then create - # backup synchronisable original Tag - if (_hierarchy_orig not in tag_names): - # create Tag obj - tag = create_tag( - _hierarchy_orig.capitalize(), _val - ) + if "hierarchy" in _t.name().lower(): + continue - # adding Tag to Bin - root_bin.addItem(tag) + # update only non hierarchy tags + # because hierarchy could be edited + update_tag(_t, _val) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py index cbb4e89998..a79e1f8ce5 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py @@ -1,5 +1,4 @@ import pyblish.api -import os import json @@ -26,7 +25,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): 'nukescript': 'comp', 'write': 'render', 'review': 'mov', - 'plate': 'img' + 'plate': 'img', + 'audio': 'audio' } def process(self, instance): @@ -39,7 +39,9 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): family = instance.data['family'].lower() asset_type = '' - asset_type = self.family_mapping[family] + asset_type = instance.data.get( + "ftrackFamily", self.family_mapping[family] + ) componentList = [] ft_session = instance.context.data["ftrackSession"] diff --git a/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py b/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py index 230cc0e4f1..5f0516c593 100644 --- a/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py +++ b/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py @@ -1,4 +1,5 @@ import pyblish.api +from avalon import io class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): @@ -27,7 +28,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): order = pyblish.api.IntegratorOrder - 0.04 label = 'Integrate Hierarchy To Ftrack' - families = ["clip"] + families = ["clip", "shot"] optional = False def process(self, context): @@ -35,6 +36,9 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): if "hierarchyContext" not in context.data: return + if not io.Session: + io.install() + self.ft_project = None self.session = context.data["ftrackSession"] @@ -46,6 +50,8 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): for entity_name in input_data: entity_data = input_data[entity_name] entity_type = entity_data['entity_type'] + self.log.debug(entity_data) + self.log.debug(entity_type) if entity_type.lower() == 'project': query = 'Project where full_name is "{}"'.format(entity_name) @@ -79,10 +85,13 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): # CUSTOM ATTRIBUTES custom_attributes = entity_data.get('custom_attributes', []) instances = [ - i for i in self.context[:] if i.data['asset'] in entity['name']] + i for i in self.context[:] if i.data['asset'] in entity['name'] + ] for key in custom_attributes: assert (key in entity['custom_attributes']), ( - 'Missing custom attribute key: `{0}` in attrs: `{1}`'.format(key, entity['custom_attributes'].keys())) + 'Missing custom attribute key: `{0}` in attrs: ' + '`{1}`'.format(key, entity['custom_attributes'].keys()) + ) entity['custom_attributes'][key] = custom_attributes[key] @@ -114,10 +123,33 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): ) self.session.commit() + # Incoming links. + self.create_links(entity_data, entity) + self.session.commit() + if 'childs' in entity_data: self.import_to_ftrack( entity_data['childs'], entity) + def create_links(self, entity_data, entity): + # Clear existing links. + for link in entity.get("incoming_links", []): + self.session.delete(link) + self.session.commit() + + # Create new links. + for input in entity_data.get("inputs", []): + input_id = io.find_one({"_id": input})["data"]["ftrackId"] + assetbuild = self.session.get("AssetBuild", input_id) + self.log.debug( + "Creating link from {0} to {1}".format( + assetbuild["name"], entity["name"] + ) + ) + self.session.create( + "TypedContextLink", {"from": assetbuild, "to": entity} + ) + def get_all_task_types(self, project): tasks = {} proj_template = project['project_schema'] diff --git a/pype/plugins/global/publish/integrate_assumed_destination.py b/pype/plugins/global/publish/integrate_assumed_destination.py index 758eca5a9f..6999ce6ab8 100644 --- a/pype/plugins/global/publish/integrate_assumed_destination.py +++ b/pype/plugins/global/publish/integrate_assumed_destination.py @@ -9,7 +9,7 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin): label = "Integrate Assumed Destination" order = pyblish.api.IntegratorOrder - 0.05 - families = ["clip", "projectfile"] + families = ["clip", "projectfile", "plate"] def process(self, instance): diff --git a/pype/plugins/global/publish/integrate_hierarchy_avalon.py b/pype/plugins/global/publish/integrate_hierarchy_avalon.py index d75fd10792..c01cb2d26a 100644 --- a/pype/plugins/global/publish/integrate_hierarchy_avalon.py +++ b/pype/plugins/global/publish/integrate_hierarchy_avalon.py @@ -3,139 +3,99 @@ from avalon import io class IntegrateHierarchyToAvalon(pyblish.api.ContextPlugin): - """ - Create entities in ftrack based on collected data from premiere - - """ + """Create entities in Avalon based on collected data.""" order = pyblish.api.IntegratorOrder - 0.1 - label = 'Integrate Hierarchy To Avalon' - families = ['clip'] + label = "Integrate Hierarchy To Avalon" + families = ["clip", "shot"] def process(self, context): if "hierarchyContext" not in context.data: - self.log.info('skipping IntegrateHierarchyToAvalon') + self.log.info("skipping IntegrateHierarchyToAvalon") return - self.db = io - if not self.db.Session: - self.db.install() + if not io.Session: + io.install() input_data = context.data["hierarchyContext"] + self.project = None self.import_to_avalon(input_data) def import_to_avalon(self, input_data, parent=None): - for name in input_data: - self.log.info('input_data[name]: {}'.format(input_data[name])) + self.log.info("input_data[name]: {}".format(input_data[name])) entity_data = input_data[name] - entity_type = entity_data['entity_type'] + entity_type = entity_data["entity_type"] data = {} + + data["inputs"] = entity_data.get("inputs", []) + data["entityType"] = entity_type + + # Custom attributes. + for k, val in entity_data.get("custom_attributes", {}).items(): + data[k] = val + + # Tasks. + tasks = entity_data.get("tasks", []) + if tasks is not None or len(tasks) > 0: + data["tasks"] = tasks + parents = [] + visualParent = None + # do not store project"s id as visualParent (silo asset) + if self.project is not None: + if self.project["_id"] != parent["_id"]: + visualParent = parent["_id"] + parents.extend(parent.get("data", {}).get("parents", [])) + parents.append(parent["name"]) + data["visualParent"] = visualParent + data["parents"] = parents + # Process project - if entity_type.lower() == 'project': - entity = self.db.find_one({'type': 'project'}) + if entity_type.lower() == "project": + entity = io.find_one({"type": "project"}) # TODO: should be in validator? - assert (entity is not None), "Didn't find project in DB" + assert (entity is not None), "Did not find project in DB" # get data from already existing project - for key, value in entity.get('data', {}).items(): + for key, value in entity.get("data", {}).items(): data[key] = value - self.av_project = entity + self.project = entity # Raise error if project or parent are not set - elif self.av_project is None or parent is None: + elif self.project is None or parent is None: raise AssertionError( "Collected items are not in right order!" ) # Else process assset else: - entity = self.db.find_one({'type': 'asset', 'name': name}) - # Create entity if doesn't exist + entity = io.find_one({"type": "asset", "name": name}) + # Create entity if doesn"t exist if entity is None: - if self.av_project['_id'] == parent['_id']: + if self.project["_id"] == parent["_id"]: silo = None - elif parent['silo'] is None: - silo = parent['name'] + elif parent["silo"] is None: + silo = parent["name"] else: - silo = parent['silo'] - entity = self.create_avalon_asset(name, silo) - self.log.info('entity: {}'.format(entity)) - self.log.info('data: {}'.format(entity.get('data', {}))) - self.log.info('____1____') - data['entityType'] = entity_type - # TASKS - tasks = entity_data.get('tasks', []) - if tasks is not None or len(tasks) > 0: - data['tasks'] = tasks - parents = [] - visualParent = None - data = input_data[name] - if self.av_project['_id'] != parent['_id']: - visualParent = parent['_id'] - parents.extend(parent.get('data', {}).get('parents', [])) - parents.append(parent['name']) - data['visualParent'] = visualParent - data['parents'] = parents - - self.db.update_many( - {'_id': entity['_id']}, - {'$set': { - 'data': data, - }}) - - entity = self.db.find_one({'type': 'asset', 'name': name}) - self.log.info('entity: {}'.format(entity)) - self.log.info('data: {}'.format(entity.get('data', {}))) - self.log.info('____2____') - - # Else get data from already existing - else: - self.log.info('entity: {}'.format(entity)) - self.log.info('data: {}'.format(entity.get('data', {}))) - self.log.info('________') - for key, value in entity.get('data', {}).items(): - data[key] = value - - data['entityType'] = entity_type - # TASKS - tasks = entity_data.get('tasks', []) - if tasks is not None or len(tasks) > 0: - data['tasks'] = tasks - parents = [] - visualParent = None - # do not store project's id as visualParent (silo asset) - - if self.av_project['_id'] != parent['_id']: - visualParent = parent['_id'] - parents.extend(parent.get('data', {}).get('parents', [])) - parents.append(parent['name']) - data['visualParent'] = visualParent - data['parents'] = parents - - # CUSTOM ATTRIBUTES - for k, val in entity_data.get('custom_attributes', {}).items(): - data[k] = val + silo = parent["silo"] + entity = self.create_avalon_asset(name, silo, data) # Update entity data with input data - self.db.update_many( - {'_id': entity['_id']}, - {'$set': { - 'data': data, - }}) + io.update_many({"_id": entity["_id"]}, {"$set": {"data": data}}) - if 'childs' in entity_data: - self.import_to_avalon(entity_data['childs'], entity) + if "childs" in entity_data: + self.import_to_avalon(entity_data["childs"], entity) - def create_avalon_asset(self, name, silo): + def create_avalon_asset(self, name, silo, data): item = { - 'schema': 'avalon-core:asset-2.0', - 'name': name, - 'silo': silo, - 'parent': self.av_project['_id'], - 'type': 'asset', - 'data': {} + "schema": "avalon-core:asset-2.0", + "name": name, + "silo": silo, + "parent": self.project["_id"], + "type": "asset", + "data": data } - entity_id = self.db.insert_one(item).inserted_id + self.log.debug("Creating asset: {}".format(item)) + entity_id = io.insert_one(item).inserted_id - return self.db.find_one({'_id': entity_id}) + return io.find_one({"_id": entity_id}) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index f96fb240c9..75ad687d0f 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -2,6 +2,8 @@ import os import logging import shutil import clique +import traceback +import sys import errno import pyblish.api @@ -62,7 +64,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "rendersetup", "rig", "plate", - "look" + "look", + "audio" ] exclude_families = ["clip"] @@ -98,6 +101,16 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # \ / # o __/ # + for result in context.data["results"]: + if not result["success"]: + self.log.debug(result) + exc_type, exc_value, exc_traceback = result["error_info"] + extracted_traceback = traceback.extract_tb(exc_traceback)[-1] + self.log.debug( + "Error at line {}: \"{}\"".format( + extracted_traceback[1], result["error"] + ) + ) assert all(result["success"] for result in context.data["results"]), ( "Atomicity not held, aborting.") diff --git a/pype/plugins/maya/load/load_audio.py b/pype/plugins/maya/load/load_audio.py new file mode 100644 index 0000000000..e1860d0ca6 --- /dev/null +++ b/pype/plugins/maya/load/load_audio.py @@ -0,0 +1,27 @@ +from maya import cmds, mel + +from avalon import api + + +class AudioLoader(api.Loader): + """Specific loader of audio.""" + + families = ["audio"] + label = "Import audio." + representations = ["wav"] + icon = "volume-up" + color = "orange" + + def load(self, context, name, namespace, data): + start_frame = cmds.playbackOptions(query=True, min=True) + sound_node = cmds.sound( + file=context["representation"]["data"]["path"], offset=start_frame + ) + cmds.timeControl( + mel.eval("$tmpVar=$gPlayBackSlider"), + edit=True, + sound=sound_node, + displaySound=True + ) + + return [sound_node] diff --git a/pype/plugins/maya/load/load_image_plane.py b/pype/plugins/maya/load/load_image_plane.py new file mode 100644 index 0000000000..5534cce0ee --- /dev/null +++ b/pype/plugins/maya/load/load_image_plane.py @@ -0,0 +1,79 @@ +import pymel.core as pc + +from avalon import api +from Qt import QtWidgets + + +class ImagePlaneLoader(api.Loader): + """Specific loader of plate for image planes on selected camera.""" + + families = ["plate"] + label = "Create imagePlane on selected camera." + representations = ["mov"] + icon = "image" + color = "orange" + + def load(self, context, name, namespace, data): + new_nodes = [] + image_plane_depth = 1000 + + # Getting camera from selection. + selection = pc.ls(selection=True) + + if len(selection) > 1: + QtWidgets.QMessageBox.critical( + None, + "Error!", + "Multiple nodes selected. Please select only one.", + QtWidgets.QMessageBox.Ok + ) + return + + if len(selection) < 1: + QtWidgets.QMessageBox.critical( + None, + "Error!", + "No camera selected.", + QtWidgets.QMessageBox.Ok + ) + return + + relatives = pc.listRelatives(selection[0], shapes=True) + if not pc.ls(relatives, type="camera"): + QtWidgets.QMessageBox.critical( + None, + "Error!", + "Selected node is not a camera.", + QtWidgets.QMessageBox.Ok + ) + return + + camera = selection[0] + + camera.displayResolution.set(1) + camera.farClipPlane.set(image_plane_depth * 10) + + # Create image plane + image_plane_transform, image_plane_shape = pc.imagePlane( + camera=camera, showInAllViews=False + ) + image_plane_shape.depth.set(image_plane_depth) + # Need to get "type" by string, because its a method as well. + pc.Attribute(image_plane_shape + ".type").set(2) + image_plane_shape.imageName.set( + context["representation"]["data"]["path"] + ) + image_plane_shape.useFrameExtension.set(1) + + start_frame = pc.playbackOptions(q=True, min=True) + end_frame = pc.playbackOptions(q=True, max=True) + + image_plane_shape.frameOffset.set(1 - start_frame) + image_plane_shape.frameIn.set(start_frame) + image_plane_shape.frameOut.set(end_frame) + + new_nodes.extend( + [image_plane_transform.name(), image_plane_shape.name()] + ) + + return new_nodes diff --git a/pype/plugins/nukestudio/publish/collect_assetbuilds.py b/pype/plugins/nukestudio/publish/collect_assetbuilds.py new file mode 100644 index 0000000000..76326c320b --- /dev/null +++ b/pype/plugins/nukestudio/publish/collect_assetbuilds.py @@ -0,0 +1,56 @@ +from pyblish import api +from avalon import io + + +class CollectAssetBuilds(api.ContextPlugin): + """Collect asset from tags. + + Tag is expected to have name of the asset and metadata: + { + "family": "assetbuild" + } + """ + + # Run just after CollectClip + order = api.CollectorOrder + 0.02 + label = "Collect AssetBuilds" + hosts = ["nukestudio"] + + def process(self, context): + asset_builds = {} + for asset in io.find({"type": "asset"}): + if asset["data"]["entityType"] == "AssetBuild": + self.log.debug("Found \"{}\" in database.".format(asset)) + asset_builds[asset["name"]] = asset + + for instance in context: + if instance.data["family"] != "clip": + continue + + # Exclude non-tagged instances. + tagged = False + asset_names = [] + for tag in instance.data["tags"]: + family = dict(tag["metadata"]).get("tag.family", "") + if family.lower() == "assetbuild": + asset_names.append(tag["name"]) + tagged = True + + if not tagged: + self.log.debug( + "Skipping \"{}\" because its not tagged with " + "\"assetbuild\"".format(instance) + ) + continue + + # Collect asset builds. + data = {"assetbuilds": []} + for name in asset_names: + data["assetbuilds"].append( + asset_builds[name] + ) + self.log.debug( + "Found asset builds: {}".format(data["assetbuilds"]) + ) + + instance.data.update(data) diff --git a/pype/plugins/nukestudio/publish/collect_audio.py b/pype/plugins/nukestudio/publish/collect_audio.py new file mode 100644 index 0000000000..61419b1ad9 --- /dev/null +++ b/pype/plugins/nukestudio/publish/collect_audio.py @@ -0,0 +1,53 @@ +from pyblish import api + + +class CollectAudio(api.InstancePlugin): + """Collect audio from tags. + + Tag is expected to have metadata: + { + "family": "audio", + "subset": "main" + } + """ + + # Run just before CollectSubsets + order = api.CollectorOrder + 0.1025 + label = "Collect Audio" + hosts = ["nukestudio"] + families = ["clip"] + + def process(self, instance): + # Exclude non-tagged instances. + tagged = False + for tag in instance.data["tags"]: + family = dict(tag["metadata"]).get("tag.family", "") + if family.lower() == "audio": + tagged = True + + if not tagged: + self.log.debug( + "Skipping \"{}\" because its not tagged with " + "\"audio\"".format(instance) + ) + return + + # Collect data. + data = {} + for key, value in instance.data.iteritems(): + data[key] = value + + data["family"] = "audio" + data["families"] = ["ftrack"] + + subset = "" + for tag in instance.data["tags"]: + tag_data = dict(tag["metadata"]) + if "tag.subset" in tag_data: + subset = tag_data["tag.subset"] + data["subset"] = "audio" + subset.title() + + data["source"] = data["sourcePath"] + + self.log.debug("Creating instance with data: {}".format(data)) + instance.context.create_instance(**data) diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index af542af7a5..f678ad9f50 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -1,6 +1,9 @@ import os + from pyblish import api +import nuke + class CollectClips(api.ContextPlugin): """Collect all Track items selection.""" @@ -12,7 +15,7 @@ class CollectClips(api.ContextPlugin): def process(self, context): projectdata = context.data["projectData"] version = context.data.get("version", "001") - data = {} + instances_data = [] for item in context.data.get("selection", []): # Skip audio track items # Try/Except is to handle items types, like EffectTrackItem @@ -26,44 +29,68 @@ class CollectClips(api.ContextPlugin): track = item.parent() source = item.source().mediaSource() source_path = source.firstpath() - instance_name = "{0}_{1}".format(track.name(), item.name()) + + # If source is *.nk its a comp effect and we need to fetch the + # write node output. This should be improved by parsing the script + # rather than opening it. + if source_path.endswith(".nk"): + nuke.scriptOpen(source_path) + # There should noly be one. + write_node = nuke.allNodes(filter="Write")[0] + path = nuke.filename(write_node) + + if "%" in path: + # Get start frame from Nuke script and use the item source + # in/out, because you can have multiple shots covered with + # one nuke script. + start_frame = int(nuke.root()["first_frame"].getValue()) + if write_node["use_limit"].getValue(): + start_frame = int(write_node["first"].getValue()) + + path = path % (start_frame + item.sourceIn()) + + source_path = path + self.log.debug( + "Fetched source path \"{}\" from \"{}\" in " + "\"{}\".".format( + source_path, write_node.name(), source.firstpath() + ) + ) try: - head, padding, ext = os.path.basename(source_path).split('.') + head, padding, ext = os.path.basename(source_path).split(".") source_first_frame = int(padding) except: source_first_frame = 0 - data[instance_name] = { - "item": item, - "source": source, - "sourcePath": source_path, - "track": track.name(), - "sourceFirst": source_first_frame, - "sourceIn": int(item.sourceIn()), - "sourceOut": int(item.sourceOut()), - "startFrame": int(item.timelineIn()), - "endFrame": int(item.timelineOut()) - } - - for key, value in data.items(): - family = "clip" - context.create_instance( - name=key, - asset=value["item"].name(), - item=value["item"], - source=value["source"], - sourcePath=value["sourcePath"], - family=family, - families=[], - sourceFirst=value["sourceFirst"], - sourceIn=value["sourceIn"], - sourceOut=value["sourceOut"], - startFrame=value["startFrame"], - endFrame=value["endFrame"], - handles=projectdata['handles'], - handleStart=0, - handleEnd=0, - version=version, - track=value["track"] + instances_data.append( + { + "name": "{0}_{1}".format(track.name(), item.name()), + "item": item, + "source": source, + "sourcePath": source_path, + "track": track.name(), + "sourceFirst": source_first_frame, + "sourceIn": int(item.sourceIn()), + "sourceOut": int(item.sourceOut()), + "startFrame": int(item.timelineIn()), + "endFrame": int(item.timelineOut()) + } + ) + + for data in instances_data: + data.update( + { + "asset": data["item"].name(), + "family": "clip", + "families": [], + "handles": projectdata.get("handles", 0), + "handleStart": 0, + "handleEnd": 0, + "version": version + } + ) + instance = context.create_instance(**data) + self.log.debug( + "Created instance with data: {}".format(instance.data) ) diff --git a/pype/plugins/nukestudio/publish/collect_handles.py b/pype/plugins/nukestudio/publish/collect_handles.py index 9ac0ed76f5..0aa339d039 100644 --- a/pype/plugins/nukestudio/publish/collect_handles.py +++ b/pype/plugins/nukestudio/publish/collect_handles.py @@ -8,7 +8,6 @@ class CollectClipHandles(api.ContextPlugin): order = api.CollectorOrder + 0.1025 label = "Collect Handles" hosts = ["nukestudio"] - families = ['clip'] def process(self, context): assets_shared = context.data.get("assetsShared") @@ -16,7 +15,15 @@ class CollectClipHandles(api.ContextPlugin): # find all main types instances and add its handles to asset shared instances = context[:] + filtered_instances = [] for instance in instances: + families = instance.data.get("families", []) + families += [instance.data["family"]] + if "clip" in families: + filtered_instances.append(instance) + else: + continue + # get handles handles = int(instance.data["handles"]) handle_start = int(instance.data["handleStart"]) @@ -33,12 +40,14 @@ class CollectClipHandles(api.ContextPlugin): "handleEnd": handle_end }) - for instance in instances: + for instance in filtered_instances: if not instance.data.get("main"): self.log.debug("Synchronize handles on: `{}`".format( instance.data["name"])) name = instance.data["asset"] s_asset_data = assets_shared.get(name) - instance.data["handles"] = s_asset_data["handles"] - instance.data["handleStart"] = s_asset_data["handleStart"] - instance.data["handleEnd"] = s_asset_data["handleEnd"] + instance.data["handles"] = s_asset_data.get("handles", 0) + instance.data["handleStart"] = s_asset_data.get( + "handleStart", 0 + ) + instance.data["handleEnd"] = s_asset_data.get("handleEnd", 0) diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index 7082ecd210..a20a515077 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -43,7 +43,9 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin): # build data for inner nukestudio project property data = { - "sequence": context.data['activeSequence'].name().replace(' ', '_'), + "sequence": ( + context.data['activeSequence'].name().replace(' ', '_') + ), "track": clip.parent().name().replace(' ', '_'), "clip": asset } @@ -110,7 +112,10 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin): # create new shot asset name instance.data["asset"] = instance.data["asset"].format( **d_metadata) - self.log.debug("__ instance.data[asset]: {}".format(instance.data["asset"])) + self.log.debug( + "__ instance.data[asset]: " + "{}".format(instance.data["asset"]) + ) # lastly fill those individual properties itno # format the string with collected data @@ -126,8 +131,10 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin): # check if hierarchy attribute is already created # it should not be so return warning if it is hd = instance.data.get("hierarchy") - assert not hd, "Only one Hierarchy Tag is \ - allowed. Clip: `{}`".format(asset) + assert not hd, ( + "Only one Hierarchy Tag is allowed. " + "Clip: `{}`".format(asset) + ) assetsShared = { asset: { @@ -179,15 +186,12 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): handle_start = int(instance.data["handleStart"] + handles) handle_end = int(instance.data["handleEnd"] + handles) - # get source frames - source_first = int(instance.data["sourceFirst"]) - source_in = int(instance.data["sourceIn"]) - source_out = int(instance.data["sourceOut"]) - - instance.data['startFrame'] = int( - source_first + source_in - handle_start) - instance.data['endFrame'] = int( - (source_first + source_out + handle_end)) + instance.data['startFrame'] = ( + instance.data["item"].timelineIn() - handle_start + ) + instance.data['endFrame'] = ( + instance.data["item"].timelineOut() + handle_end + ) # inject assetsShared to other plates types assets_shared = context.data.get("assetsShared") @@ -200,31 +204,44 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): instance.data["parents"] = s_asset_data["parents"] instance.data["hierarchy"] = s_asset_data["hierarchy"] - self.log.debug("__ instance.data[parents]: {}".format(instance.data["parents"])) - self.log.debug("__ instance.data[hierarchy]: {}".format(instance.data["hierarchy"])) - self.log.debug("__ instance.data[name]: {}".format(instance.data["name"])) - if "main" not in instance.data["name"].lower(): - continue + self.log.debug( + "__ instance.data[parents]: {}".format( + instance.data["parents"] + ) + ) + self.log.debug( + "__ instance.data[hierarchy]: {}".format( + instance.data["hierarchy"] + ) + ) + self.log.debug( + "__ instance.data[name]: {}".format(instance.data["name"]) + ) in_info = {} + + in_info["inputs"] = [ + x["_id"] for x in instance.data.get("assetbuilds", []) + ] + # suppose that all instances are Shots in_info['entity_type'] = 'Shot' # get custom attributes of the shot in_info['custom_attributes'] = { 'handles': int(instance.data.get('handles')), - 'fend': int( - (source_first + source_out)), - 'fstart': int( - source_first + source_in), - 'fps': context.data["framerate"] + 'fstart': int(instance.data["startFrame"]), + 'fend': int(instance.data["endFrame"]), + 'fps': context.data["framerate"], + "edit_in": int(instance.data["startFrame"]), + "edit_out": int(instance.data["endFrame"]) } handle_start = instance.data.get('handleStart') handle_end = instance.data.get('handleEnd') self.log.debug("__ handle_start: {}".format(handle_start)) self.log.debug("__ handle_end: {}".format(handle_end)) - + if handle_start and handle_end: in_info['custom_attributes'].update({ "handle_start": handle_start, diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index 6dbd27dc59..9f6aa5539c 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -1,12 +1,107 @@ +import os + from pyblish import api -import pype class CollectPlates(api.InstancePlugin): + """Collect plates from tags. + + Tag is expected to have metadata: + { + "family": "plate" + "subset": "main" + } + """ + + # Run just before CollectSubsets + order = api.CollectorOrder + 0.1025 + label = "Collect Plates" + hosts = ["nukestudio"] + families = ["clip"] + + def process(self, instance): + # Exclude non-tagged instances. + tagged = False + for tag in instance.data["tags"]: + family = dict(tag["metadata"]).get("tag.family", "") + if family.lower() == "plate": + tagged = True + + if not tagged: + self.log.debug( + "Skipping \"{}\" because its not tagged with " + "\"plate\"".format(instance) + ) + return + + # Collect data. + data = {} + for key, value in instance.data.iteritems(): + data[key] = value + + data["family"] = "plate" + data["families"] = ["ftrack"] + data["source"] = data["sourcePath"] + + subset = "" + for tag in instance.data["tags"]: + tag_data = dict(tag["metadata"]) + if "tag.subset" in tag_data: + subset = tag_data["tag.subset"] + data["subset"] = "plate" + subset.title() + + data["label"] += " - {} - ({})".format( + subset, os.path.splitext(data["sourcePath"])[1] + ) + + # Timeline data. + handle_start = int(instance.data["handleStart"] + data["handles"]) + handle_end = int(instance.data["handleEnd"] + data["handles"]) + + source_in_h = data["sourceIn"] - handle_start + source_out_h = data["sourceOut"] + handle_end + + timeline_in = int(data["item"].timelineIn()) + timeline_out = int(data["item"].timelineOut()) + + timeline_frame_start = timeline_in - handle_start + timeline_frame_end = timeline_out + handle_end + + frame_start = 1 + frame_end = frame_start + (data["sourceOut"] - data["sourceIn"]) + + sequence = instance.context.data["activeSequence"] + fps = sequence.framerate() + + data.update( + { + "sourceFirst": data["sourceFirst"], + "sourceIn": data["sourceIn"], + "sourceOut": data["sourceOut"], + "sourceInH": source_in_h, + "sourceOutH": source_out_h, + "frameStart": frame_start, + "startFrame": frame_start, + "endFrame": frame_end, + "timelineIn": timeline_in, + "timelineOut": timeline_out, + "timelineInHandles": timeline_frame_start, + "timelineOutHandles": timeline_frame_end, + "fps": fps, + "handleStart": handle_start, + "handleEnd": handle_end + } + ) + + self.log.debug("Creating instance with data: {}".format(data)) + instance.context.create_instance(**data) + + +class CollectPlatesData(api.InstancePlugin): """Collect plates""" - order = api.CollectorOrder + 0.49 - label = "Collect Plates" + order = api.CollectorOrder + 0.495 + label = "Collect Plates Data" hosts = ["nukestudio"] families = ["plate"] @@ -25,12 +120,17 @@ class CollectPlates(api.InstancePlugin): name = instance.data["subset"] asset = instance.data["asset"] track = instance.data["track"] - family = instance.data["family"] - families = instance.data["families"] version = instance.data["version"] source_path = instance.data["sourcePath"] source_file = os.path.basename(source_path) + # Filter out "clip" family. + families = instance.data["families"] + [instance.data["family"]] + families = list(set(families)) + if "clip" in families: + families.remove("clip") + family = families[-1] + # staging dir creation staging_dir = os.path.dirname( source_path) @@ -80,10 +180,14 @@ class CollectPlates(api.InstancePlugin): self.log.debug("__ s duration: {}".format(source_out - source_in + 1)) self.log.debug("__ source_in_h: {}".format(source_in_h)) self.log.debug("__ source_out_h: {}".format(source_out_h)) - self.log.debug("__ sh duration: {}".format(source_out_h - source_in_h + 1)) + self.log.debug("__ sh duration: {}".format( + source_out_h - source_in_h + 1) + ) self.log.debug("__ timeline_in: {}".format(timeline_in)) self.log.debug("__ timeline_out: {}".format(timeline_out)) - self.log.debug("__ t duration: {}".format(timeline_out - timeline_in + 1)) + self.log.debug("__ t duration: {}".format( + timeline_out - timeline_in + 1) + ) self.log.debug("__ timeline_frame_start: {}".format( timeline_frame_start)) self.log.debug("__ timeline_frame_end: {}".format(timeline_frame_end)) @@ -95,10 +199,6 @@ class CollectPlates(api.InstancePlugin): self.log.debug("__ before family: {}".format(family)) self.log.debug("__ before families: {}".format(families)) - # - # this is just workaround because 'clip' family is filtered - instance.data["family"] = families[-1] - instance.data["families"].append(family) # add to data of representation version_data.update({ @@ -137,10 +237,9 @@ class CollectPlates(api.InstancePlugin): ext=ext ) start_frame = source_first_frame - end_frame = source_first_frame + source_out - files = [file % i for i in range( - (source_first_frame + source_in_h), - ((source_first_frame + source_out_h) + 1), 1)] + duration = source_out_h - source_in_h + end_frame = source_first_frame + duration + files = [file % i for i in range(start_frame, (end_frame + 1), 1)] except Exception as e: self.log.debug("Exception in file: {}".format(e)) head, ext = os.path.splitext(source_file) @@ -149,27 +248,6 @@ class CollectPlates(api.InstancePlugin): start_frame = source_in_h end_frame = source_out_h - - mov_file = head + ".mov" - mov_path = os.path.normpath(os.path.join(staging_dir, mov_file)) - if os.path.exists(mov_path): - # adding mov into the representations - self.log.debug("__ mov_path: {}".format(mov_path)) - plates_mov_representation = { - 'files': mov_file, - 'stagingDir': staging_dir, - 'startFrame': 0, - 'endFrame': source_out - source_in + 1, - 'step': 1, - 'frameRate': fps, - 'preview': True, - 'thumbnail': False, - 'name': "preview", - 'ext': "mov", - } - instance.data["representations"].append( - plates_mov_representation) - thumb_file = head + ".png" thumb_path = os.path.join(staging_dir, thumb_file) self.log.debug("__ thumb_path: {}".format(thumb_path)) @@ -210,6 +288,3 @@ class CollectPlates(api.InstancePlugin): plates_representation)) self.log.debug("__ after family: {}".format(family)) self.log.debug("__ after families: {}".format(families)) - - # # this will do FnNsFrameServer - # FnNsFrameServer.renderFrames(*_args) diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py new file mode 100644 index 0000000000..253d2db8ca --- /dev/null +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -0,0 +1,54 @@ +from pyblish import api + + +class CollectReviews(api.InstancePlugin): + """Collect review from tags. + + Tag is expected to have metadata: + { + "family": "review" + "subset": "main" + } + """ + + # Run just before CollectSubsets + order = api.CollectorOrder + 0.1025 + label = "Collect Reviews" + hosts = ["nukestudio"] + families = ["clip"] + + def process(self, instance): + # Exclude non-tagged instances. + tagged = False + for tag in instance.data["tags"]: + family = dict(tag["metadata"]).get("tag.family", "") + if family.lower() == "review": + tagged = True + + if not tagged: + self.log.debug( + "Skipping \"{}\" because its not tagged with " + "\"review\"".format(instance) + ) + return + + # Collect data. + data = {} + for key, value in instance.data.iteritems(): + data[key] = value + + data["family"] = "review" + data["ftrackFamily"] = "img" + data["families"] = ["ftrack"] + + subset = "" + for tag in instance.data["tags"]: + tag_data = dict(tag["metadata"]) + if "tag.subset" in tag_data: + subset = tag_data["tag.subset"] + data["subset"] = "review" + subset.title() + + data["source"] = data["sourcePath"] + + self.log.debug("Creating instance with data: {}".format(data)) + instance.context.create_instance(**data) diff --git a/pype/plugins/nukestudio/publish/collect_selection.py b/pype/plugins/nukestudio/publish/collect_selection.py index e22ea79a05..e87f9d03ec 100644 --- a/pype/plugins/nukestudio/publish/collect_selection.py +++ b/pype/plugins/nukestudio/publish/collect_selection.py @@ -2,6 +2,7 @@ import pyblish.api import hiero + class CollectSelection(pyblish.api.ContextPlugin): """Inject the selection in the context.""" @@ -9,7 +10,16 @@ class CollectSelection(pyblish.api.ContextPlugin): label = "Selection" def process(self, context): - selection = getattr(hiero, "selection") + selection = list(hiero.selection) self.log.debug("selection: {}".format(selection)) - context.data["selection"] = hiero.selection + + if not selection: + self.log.debug( + "Nothing is selected. Collecting all items from sequence " + "\"{}\"".format(hiero.ui.activeSequence()) + ) + for track in hiero.ui.activeSequence().items(): + selection.extend(track.items()) + + context.data["selection"] = selection diff --git a/pype/plugins/nukestudio/publish/collect_shots.py b/pype/plugins/nukestudio/publish/collect_shots.py new file mode 100644 index 0000000000..8cf02ff764 --- /dev/null +++ b/pype/plugins/nukestudio/publish/collect_shots.py @@ -0,0 +1,70 @@ +from pyblish import api + + +class CollectShots(api.InstancePlugin): + """Collect Shot from Clip.""" + + # Run just before CollectClipSubsets + order = api.CollectorOrder + 0.1025 + label = "Collect Shots" + hosts = ["nukestudio"] + families = ["clip"] + + def process(self, instance): + # Exclude non-tagged instances. + tagged = False + for tag in instance.data["tags"]: + if tag["name"].lower() == "hierarchy": + tagged = True + + if not tagged: + self.log.debug( + "Skipping \"{}\" because its not tagged with " + "\"Hierarchy\"".format(instance) + ) + return + + # Collect data. + data = {} + for key, value in instance.data.iteritems(): + data[key] = value + + data["family"] = "shot" + data["families"] = [] + data["frameStart"] = 1 + + data["label"] += " - tasks: {} - assetbuilds: {}".format( + data["tasks"], [x["name"] for x in data.get("assetbuilds", [])] + ) + + # Get handles. + data["handleStart"] = instance.data["handleStart"] + data["handleStart"] += data["handles"] + data["handleEnd"] = instance.data["handleEnd"] + data["handles"] + + # Frame-ranges with handles. + data["sourceInH"] = data["sourceIn"] - data["handleStart"] + data["sourceOutH"] = data["sourceOut"] + data["handleEnd"] + + # Get timeline frames. + data["timelineIn"] = int(data["item"].timelineIn()) + data["timelineOut"] = int(data["item"].timelineOut()) + + # Frame-ranges with handles. + data["timelineInHandles"] = data["timelineIn"] + data["timelineInHandles"] -= data["handleStart"] + data["timelineOutHandles"] = data["timelineOut"] + data["timelineOutHandles"] += data["handleEnd"] + + # Creating comp frame range. + data["endFrame"] = ( + data["frameStart"] + (data["sourceOut"] - data["sourceIn"]) + ) + + # Get fps. + sequence = instance.context.data["activeSequence"] + data["fps"] = sequence.framerate() + + # Create instance. + self.log.debug("Creating instance with: {}".format(data)) + instance.context.create_instance(**data) diff --git a/pype/plugins/nukestudio/publish/collect_subsets.py b/pype/plugins/nukestudio/publish/collect_subsets.py index 44099e50da..95476b4db7 100644 --- a/pype/plugins/nukestudio/publish/collect_subsets.py +++ b/pype/plugins/nukestudio/publish/collect_subsets.py @@ -116,8 +116,6 @@ class CollectClipSubsets(api.InstancePlugin): # get specific presets pr_host_tasks = deepcopy( nks_presets["rules_tasks"]).get("hostTasks", None) - pr_host_subsets = deepcopy( - nks_presets["rules_tasks"]).get("hostSubsets", None) subsets_collect = dict() # iterate tags and collect subset properities from presets @@ -134,7 +132,7 @@ class CollectClipSubsets(api.InstancePlugin): try: # get subsets for task subsets = None - subsets = pr_host_subsets[host] + #subsets = pr_host_subsets[host] except KeyError: pass diff --git a/pype/plugins/nukestudio/publish/collect_tag_tasks.py b/pype/plugins/nukestudio/publish/collect_tag_tasks.py index 592559fc50..ed2f3009d3 100644 --- a/pype/plugins/nukestudio/publish/collect_tag_tasks.py +++ b/pype/plugins/nukestudio/publish/collect_tag_tasks.py @@ -13,13 +13,6 @@ class CollectClipTagTasks(api.InstancePlugin): # gets tags tags = instance.data["tags"] - # gets presets for nukestudio - presets = instance.context.data['presets'][ - instance.context.data['host']] - - # find preset for default task - default_tasks = presets['rules_tasks']['defaultTasks'] - tasks = list() for t in tags: t_metadata = dict(t["metadata"]) @@ -30,11 +23,7 @@ class CollectClipTagTasks(api.InstancePlugin): t_task = t_metadata.get("tag.label", "") tasks.append(t_task) - if tasks: - instance.data["tasks"] = tasks - else: - # add tasks from presets if no task tag - instance.data["tasks"] = default_tasks + instance.data["tasks"] = tasks self.log.info("Collected Tasks from Tags: `{}`".format( instance.data["tasks"])) diff --git a/pype/plugins/nukestudio/publish/collect_tag_types.py b/pype/plugins/nukestudio/publish/collect_tag_types.py index 6889ddd81a..a33c71254f 100644 --- a/pype/plugins/nukestudio/publish/collect_tag_types.py +++ b/pype/plugins/nukestudio/publish/collect_tag_types.py @@ -31,6 +31,8 @@ class CollectClipTagTypes(api.InstancePlugin): if subset_names: instance.data["subsetType"] = subset_names[0] + else: + instance.data["subsetType"] = "main" self.log.info("Collected Plate Types from Tags: `{}`".format( instance.data["subsetType"])) diff --git a/pype/plugins/nukestudio/publish/extract_audio.py b/pype/plugins/nukestudio/publish/extract_audio.py index 17ef882690..c16f123353 100644 --- a/pype/plugins/nukestudio/publish/extract_audio.py +++ b/pype/plugins/nukestudio/publish/extract_audio.py @@ -1,6 +1,7 @@ from pyblish import api import pype + class ExtractAudioFile(pype.api.Extractor): """Extracts audio subset file""" @@ -53,10 +54,10 @@ class ExtractAudioFile(pype.api.Extractor): instance.data["representations"] = list() representation = { - 'files': [audio_file], + 'files': os.path.basename(audio_file), 'stagingDir': staging_dir, 'name': "wav", - 'ext': ".wav" + 'ext': "wav" } instance.data["representations"].append(representation) diff --git a/pype/plugins/nukestudio/publish/extract_plate.py b/pype/plugins/nukestudio/publish/extract_plate.py new file mode 100644 index 0000000000..fbbdae612d --- /dev/null +++ b/pype/plugins/nukestudio/publish/extract_plate.py @@ -0,0 +1,140 @@ +import os +import subprocess + +from hiero.exporters.FnExportUtil import writeSequenceAudioWithHandles + +import pype.api +from pype.vendor import ffmpeg + + +class ExtractPlate(pype.api.Extractor): + """Extract plate cut to the timeline. + + Only supporting mov plates for now. Image sequences already get cut down to + timeline range. + + """ + + label = "Plate" + hosts = ["nukestudio"] + families = ["plate"] + optional = True + + def process(self, instance): + if not instance.data["sourcePath"].endswith(".mov"): + self.log.debug( + "Skipping {} because its not a \"*.mov\" " + "format.".format(instance) + ) + return + + staging_dir = self.staging_dir(instance) + filename = "{0}_without_sound".format(instance.name) + ".mov" + output_path = os.path.join(staging_dir, filename) + input_path = instance.data["sourcePath"] + + self.log.info("Outputting movie to %s" % output_path) + + # Cut plate to timeline. + item = instance.data["item"] + start_frame = item.mapTimelineToSource( + item.timelineIn() - ( + instance.data["handleStart"] + instance.data["handles"] + ) + ) + end_frame = item.mapTimelineToSource( + item.timelineOut() + ( + instance.data["handleEnd"] + instance.data["handles"] + ) + ) + framerate = item.sequence().framerate().toFloat() + output_options = { + "vcodec": "copy", + "ss": start_frame / framerate, + "frames": int(end_frame - start_frame) + 1 + } + + try: + ( + ffmpeg + .input(input_path) + .output(output_path, **output_options) + .run(overwrite_output=True, + capture_stdout=True, + capture_stderr=True) + ) + except ffmpeg.Error as e: + ffmpeg_error = "ffmpeg error: {}".format(e.stderr) + self.log.error(ffmpeg_error) + raise RuntimeError(ffmpeg_error) + + # Extract audio. + filename = "{0}".format(instance.name) + ".wav" + audio_path = os.path.join(staging_dir, filename) + writeSequenceAudioWithHandles( + audio_path, + item.sequence(), + item.timelineIn(), + item.timelineOut(), + 0, + 0 + ) + + input_path = output_path + filename = "{0}_with_sound".format(instance.name) + ".mov" + output_path = os.path.join(staging_dir, filename) + + args = [ + "ffmpeg", + "-i", input_path, + "-i", audio_path, + "-vcodec", "copy", + output_path + ] + + self.log.debug(subprocess.list2cmdline(args)) + p = subprocess.Popen( + args, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + stdin=subprocess.PIPE, + cwd=os.path.dirname(args[-1]) + ) + + output = p.communicate()[0] + + if p.returncode != 0: + raise ValueError(output) + + self.log.debug(output) + + # Adding representation. + ext = os.path.splitext(output_path)[1][1:] + representation = { + "files": os.path.basename(output_path), + "staging_dir": staging_dir, + "startFrame": 0, + "endFrame": end_frame - start_frame, + "step": 1, + "frameRate": framerate, + "thumbnail": False, + "name": ext, + "ext": ext + } + instance.data["representations"] = [representation] + self.log.debug("Adding representation: {}".format(representation)) + + # Adding thumbnail representation. + path = instance.data["sourcePath"].replace(".mov", ".png") + if not os.path.exists(path): + item.thumbnail(start_frame).save(path, format="png") + + representation = { + "files": os.path.basename(path), + "stagingDir": os.path.dirname(path), + "name": "thumbnail", + "thumbnail": True, + "ext": "png" + } + instance.data["representations"].append(representation) + self.log.debug("Adding representation: {}".format(representation)) diff --git a/pype/plugins/nukestudio/publish/extract_review.py b/pype/plugins/nukestudio/publish/extract_review.py new file mode 100644 index 0000000000..45a47b99aa --- /dev/null +++ b/pype/plugins/nukestudio/publish/extract_review.py @@ -0,0 +1,138 @@ +import os +import subprocess + +from hiero.exporters.FnExportUtil import writeSequenceAudioWithHandles + +import pype.api + + +class ExtractReview(pype.api.Extractor): + """Extract Quicktime with optimized codec for reviewing.""" + + label = "Review" + hosts = ["nukestudio"] + families = ["review"] + optional = True + + def process(self, instance): + staging_dir = self.staging_dir(instance) + filename = "{0}_without_sound".format(instance.name) + ".mov" + output_path = os.path.join(staging_dir, filename) + input_path = instance.data["sourcePath"] + item = instance.data["item"] + + # Has to be yuv420p for compatibility with older players and smooth + # playback. This does come with a sacrifice of more visible banding + # issues. + start_frame = item.mapTimelineToSource(item.timelineIn()) + end_frame = item.mapTimelineToSource(item.timelineOut()) + args = [ + "ffmpeg", + "-ss", str(start_frame / item.sequence().framerate().toFloat()), + "-i", input_path, + "-pix_fmt", "yuv420p", + "-crf", "18", + "-timecode", "00:00:00:01", + "-vf", "scale=trunc(iw/2)*2:trunc(ih/2)*2", + "-frames", str(int(end_frame - start_frame) + 1), + output_path + ] + + self.log.debug(subprocess.list2cmdline(args)) + p = subprocess.Popen( + args, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + stdin=subprocess.PIPE, + cwd=os.path.dirname(args[-1]) + ) + + output = p.communicate()[0] + + if p.returncode != 0: + raise ValueError(output) + + self.log.debug(output) + + # Extract audio. + filename = "{0}".format(instance.name) + ".wav" + audio_path = os.path.join(staging_dir, filename) + writeSequenceAudioWithHandles( + audio_path, + item.sequence(), + item.timelineIn(), + item.timelineOut(), + 0, + 0 + ) + + input_path = output_path + filename = "{0}_with_sound".format(instance.name) + ".mov" + output_path = os.path.join(staging_dir, filename) + + args = [ + "ffmpeg", + "-i", input_path, + "-i", audio_path, + "-vcodec", "copy", + output_path + ] + + self.log.debug(subprocess.list2cmdline(args)) + p = subprocess.Popen( + args, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + stdin=subprocess.PIPE, + cwd=os.path.dirname(args[-1]) + ) + + output = p.communicate()[0] + + if p.returncode != 0: + raise ValueError(output) + + self.log.debug(output) + + # Adding movie representation. + start_frame = int( + instance.data["sourceIn"] - ( + instance.data["handleStart"] + instance.data["handles"] + ) + ) + end_frame = int( + instance.data["sourceOut"] + ( + instance.data["handleEnd"] + instance.data["handles"] + ) + ) + representation = { + "files": os.path.basename(output_path), + "staging_dir": staging_dir, + "startFrame": 0, + "endFrame": end_frame - start_frame, + "step": 1, + "frameRate": ( + instance.context.data["activeSequence"].framerate().toFloat() + ), + "preview": True, + "thumbnail": False, + "name": "preview", + "ext": "mov", + } + instance.data["representations"] = [representation] + self.log.debug("Adding representation: {}".format(representation)) + + # Adding thumbnail representation. + path = instance.data["sourcePath"].replace(".mov", ".png") + if not os.path.exists(path): + item.thumbnail(start_frame).save(path, format="png") + + representation = { + "files": os.path.basename(path), + "stagingDir": os.path.dirname(path), + "name": "thumbnail", + "thumbnail": True, + "ext": "png" + } + instance.data["representations"].append(representation) + self.log.debug("Adding representation: {}".format(representation)) diff --git a/pype/plugins/nukestudio/publish/validate_hierarchy.py b/pype/plugins/nukestudio/publish/validate_hierarchy.py index 2ddec1bcfc..8013a98efd 100644 --- a/pype/plugins/nukestudio/publish/validate_hierarchy.py +++ b/pype/plugins/nukestudio/publish/validate_hierarchy.py @@ -7,7 +7,7 @@ class ValidateHierarchy(api.InstancePlugin): """ order = api.ValidatorOrder - families = ["clip"] + families = ["clip", "shot"] label = "Validate Hierarchy" hosts = ["nukestudio"]