diff --git a/openpype/hosts/maya/plugins/load/load_look.py b/openpype/hosts/maya/plugins/load/load_look.py index 4392d1f78d..c39bbc497e 100644 --- a/openpype/hosts/maya/plugins/load/load_look.py +++ b/openpype/hosts/maya/plugins/load/load_look.py @@ -105,7 +105,23 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): # Load relationships shader_relation = api.get_representation_path(json_representation) with open(shader_relation, "r") as f: - relationships = json.load(f) + json_data = json.load(f) + + for rel, data in json_data["relationships"].items(): + # process only non-shading nodes + current_node = "{}:{}".format(container["namespace"], rel) + if current_node in shader_nodes: + continue + print("processing {}".format(rel)) + current_members = set(cmds.ls( + cmds.sets(current_node, query=True) or [], long=True)) + new_members = {"{}".format( + m["name"]) for m in data["members"] or []} + dif = new_members.difference(current_members) + + # add to set + cmds.sets( + dif, forceElement="{}:{}".format(container["namespace"], rel)) # update of reference could result in failed edits - material is not # present because of renaming etc. @@ -120,7 +136,7 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): cmds.file(cr=reference_node) # cleanReference # reapply shading groups from json representation on orig nodes - openpype.hosts.maya.api.lib.apply_shaders(relationships, + openpype.hosts.maya.api.lib.apply_shaders(json_data, shader_nodes, orig_nodes) @@ -128,12 +144,13 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): "All successful edits were kept intact.\n", "Failed and removed edits:"] msg.extend(failed_edits) + msg = ScrollMessageBox(QtWidgets.QMessageBox.Warning, "Some reference edit failed", msg) msg.exec_() - attributes = relationships.get("attributes", []) + attributes = json_data.get("attributes", []) # region compute lookup nodes_by_id = defaultdict(list) diff --git a/openpype/hosts/maya/plugins/publish/collect_look.py b/openpype/hosts/maya/plugins/publish/collect_look.py index acc6d8f128..238213c000 100644 --- a/openpype/hosts/maya/plugins/publish/collect_look.py +++ b/openpype/hosts/maya/plugins/publish/collect_look.py @@ -1,8 +1,10 @@ +# -*- coding: utf-8 -*- +"""Maya look collector.""" import re import os import glob -from maya import cmds +from maya import cmds # noqa import pyblish.api from openpype.hosts.maya.api import lib @@ -16,6 +18,11 @@ SHAPE_ATTRS = ["castsShadows", "doubleSided", "opposite"] +RENDERER_NODE_TYPES = [ + # redshift + "RedshiftMeshParameters" +] + SHAPE_ATTRS = set(SHAPE_ATTRS) @@ -29,7 +36,6 @@ def get_look_attrs(node): list: Attribute names to extract """ - # When referenced get only attributes that are "changed since file open" # which includes any reference edits, otherwise take *all* user defined # attributes @@ -219,9 +225,13 @@ class CollectLook(pyblish.api.InstancePlugin): with lib.renderlayer(instance.data["renderlayer"]): self.collect(instance) - def collect(self, instance): + """Collect looks. + Args: + instance: Instance to collect. + + """ self.log.info("Looking for look associations " "for %s" % instance.data['name']) @@ -235,48 +245,91 @@ class CollectLook(pyblish.api.InstancePlugin): self.log.info("Gathering set relations..") # Ensure iteration happen in a list so we can remove keys from the # dict within the loop - for objset in list(sets): - self.log.debug("From %s.." % objset) + + # skipped types of attribute on render specific nodes + disabled_types = ["message", "TdataCompound"] + + for obj_set in list(sets): + self.log.debug("From {}".format(obj_set)) + + # if node is specified as renderer node type, it will be + # serialized with its attributes. + if cmds.nodeType(obj_set) in RENDERER_NODE_TYPES: + self.log.info("- {} is {}".format( + obj_set, cmds.nodeType(obj_set))) + + node_attrs = [] + + # serialize its attributes so they can be recreated on look + # load. + for attr in cmds.listAttr(obj_set): + # skip publishedNodeInfo attributes as they break + # getAttr() and we don't need them anyway + if attr.startswith("publishedNodeInfo"): + continue + + # skip attributes types defined in 'disabled_type' list + if cmds.getAttr("{}.{}".format(obj_set, attr), type=True) in disabled_types: # noqa + continue + + node_attrs.append(( + attr, + cmds.getAttr("{}.{}".format(obj_set, attr)), + cmds.getAttr( + "{}.{}".format(obj_set, attr), type=True) + )) + + for member in cmds.ls( + cmds.sets(obj_set, query=True), long=True): + member_data = self.collect_member_data(member, + instance_lookup) + if not member_data: + continue + + # Add information of the node to the members list + sets[obj_set]["members"].append(member_data) # Get all nodes of the current objectSet (shadingEngine) - for member in cmds.ls(cmds.sets(objset, query=True), long=True): + for member in cmds.ls(cmds.sets(obj_set, query=True), long=True): member_data = self.collect_member_data(member, instance_lookup) if not member_data: continue # Add information of the node to the members list - sets[objset]["members"].append(member_data) + sets[obj_set]["members"].append(member_data) # Remove sets that didn't have any members assigned in the end # Thus the data will be limited to only what we need. - self.log.info("objset {}".format(sets[objset])) - if not sets[objset]["members"] or (not objset.endswith("SG")): - self.log.info("Removing redundant set information: " - "%s" % objset) - sets.pop(objset, None) + self.log.info("obj_set {}".format(sets[obj_set])) + if not sets[obj_set]["members"]: + self.log.info( + "Removing redundant set information: {}".format(obj_set)) + sets.pop(obj_set, None) self.log.info("Gathering attribute changes to instance members..") attributes = self.collect_attributes_changed(instance) # Store data on the instance - instance.data["lookData"] = {"attributes": attributes, - "relationships": sets} + instance.data["lookData"] = { + "attributes": attributes, + "relationships": sets + } # Collect file nodes used by shading engines (if we have any) - files = list() - looksets = sets.keys() - shaderAttrs = [ - "surfaceShader", - "volumeShader", - "displacementShader", - "aiSurfaceShader", - "aiVolumeShader"] - materials = list() + files = [] + look_sets = sets.keys() + shader_attrs = [ + "surfaceShader", + "volumeShader", + "displacementShader", + "aiSurfaceShader", + "aiVolumeShader"] + if look_sets: + materials = [] - if looksets: - for look in looksets: - for at in shaderAttrs: + for look in look_sets: + for at in shader_attrs: try: con = cmds.listConnections("{}.{}".format(look, at)) except ValueError: @@ -289,10 +342,10 @@ class CollectLook(pyblish.api.InstancePlugin): self.log.info("Found materials:\n{}".format(materials)) - self.log.info("Found the following sets:\n{}".format(looksets)) + self.log.info("Found the following sets:\n{}".format(look_sets)) # Get the entire node chain of the look sets - # history = cmds.listHistory(looksets) - history = list() + # history = cmds.listHistory(look_sets) + history = [] for material in materials: history.extend(cmds.listHistory(material)) files = cmds.ls(history, type="file", long=True) @@ -313,7 +366,7 @@ class CollectLook(pyblish.api.InstancePlugin): # Ensure unique shader sets # Add shader sets to the instance for unify ID validation - instance.extend(shader for shader in looksets if shader + instance.extend(shader for shader in look_sets if shader not in instance_lookup) self.log.info("Collected look for %s" % instance) @@ -331,7 +384,7 @@ class CollectLook(pyblish.api.InstancePlugin): dict """ - sets = dict() + sets = {} for node in instance: related_sets = lib.get_related_sets(node) if not related_sets: @@ -427,6 +480,11 @@ class CollectLook(pyblish.api.InstancePlugin): """ self.log.debug("processing: {}".format(node)) + if cmds.nodeType(node) not in ["file", "aiImage"]: + self.log.error( + "Unsupported file node: {}".format(cmds.nodeType(node))) + raise AssertionError("Unsupported file node") + if cmds.nodeType(node) == 'file': self.log.debug(" - file node") attribute = "{}.fileTextureName".format(node) @@ -435,6 +493,7 @@ class CollectLook(pyblish.api.InstancePlugin): self.log.debug("aiImage node") attribute = "{}.filename".format(node) computed_attribute = attribute + source = cmds.getAttr(attribute) self.log.info(" - file source: {}".format(source)) color_space_attr = "{}.colorSpace".format(node) diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py index 79488a372c..bdd061578e 100644 --- a/openpype/hosts/maya/plugins/publish/extract_look.py +++ b/openpype/hosts/maya/plugins/publish/extract_look.py @@ -1,13 +1,14 @@ +# -*- coding: utf-8 -*- +"""Maya look extractor.""" import os import sys import json -import copy import tempfile import contextlib import subprocess from collections import OrderedDict -from maya import cmds +from maya import cmds # noqa import pyblish.api import avalon.maya @@ -22,23 +23,38 @@ HARDLINK = 2 def find_paths_by_hash(texture_hash): - # Find the texture hash key in the dictionary and all paths that - # originate from it. + """Find the texture hash key in the dictionary. + + All paths that originate from it. + + Args: + texture_hash (str): Hash of the texture. + + Return: + str: path to texture if found. + + """ key = "data.sourceHashes.{0}".format(texture_hash) return io.distinct(key, {"type": "version"}) def maketx(source, destination, *args): - """Make .tx using maketx with some default settings. + """Make `.tx` using `maketx` with some default settings. + The settings are based on default as used in Arnold's txManager in the scene. This function requires the `maketx` executable to be on the `PATH`. + Args: source (str): Path to source file. destination (str): Writing destination path. - """ + *args: Additional arguments for `maketx`. + Returns: + str: Output of `maketx` command. + + """ cmd = [ "maketx", "-v", # verbose @@ -56,7 +72,7 @@ def maketx(source, destination, *args): cmd = " ".join(cmd) - CREATE_NO_WINDOW = 0x08000000 + CREATE_NO_WINDOW = 0x08000000 # noqa kwargs = dict(args=cmd, stderr=subprocess.STDOUT) if sys.platform == "win32": @@ -118,12 +134,58 @@ class ExtractLook(openpype.api.Extractor): hosts = ["maya"] families = ["look"] order = pyblish.api.ExtractorOrder + 0.2 + scene_type = "ma" + + @staticmethod + def get_renderer_name(): + """Get renderer name from Maya. + + Returns: + str: Renderer name. + + """ + renderer = cmds.getAttr( + "defaultRenderGlobals.currentRenderer" + ).lower() + # handle various renderman names + if renderer.startswith("renderman"): + renderer = "renderman" + return renderer + + def get_maya_scene_type(self, instance): + """Get Maya scene type from settings. + + Args: + instance (pyblish.api.Instance): Instance with collected + project settings. + + """ + ext_mapping = ( + instance.context.data["project_settings"]["maya"]["ext_mapping"] + ) + if ext_mapping: + self.log.info("Looking in settings for scene type ...") + # use extension mapping for first family found + for family in self.families: + try: + self.scene_type = ext_mapping[family] + self.log.info( + "Using {} as scene type".format(self.scene_type)) + break + except KeyError: + # no preset found + pass def process(self, instance): + """Plugin entry point. + Args: + instance: Instance to process. + + """ # Define extract output file path dir_path = self.staging_dir(instance) - maya_fname = "{0}.ma".format(instance.name) + maya_fname = "{0}.{1}".format(instance.name, self.scene_type) json_fname = "{0}.json".format(instance.name) # Make texture dump folder @@ -148,7 +210,7 @@ class ExtractLook(openpype.api.Extractor): # Collect all unique files used in the resources files = set() - files_metadata = dict() + files_metadata = {} for resource in resources: # Preserve color space values (force value after filepath change) # This will also trigger in the same order at end of context to @@ -162,35 +224,33 @@ class ExtractLook(openpype.api.Extractor): # files.update(os.path.normpath(f)) # Process the resource files - transfers = list() - hardlinks = list() - hashes = dict() - forceCopy = instance.data.get("forceCopy", False) + transfers = [] + hardlinks = [] + hashes = {} + force_copy = instance.data.get("forceCopy", False) self.log.info(files) for filepath in files_metadata: - cspace = files_metadata[filepath]["color_space"] - linearise = False - if do_maketx: - if cspace == "sRGB": - linearise = True - # set its file node to 'raw' as tx will be linearized - files_metadata[filepath]["color_space"] = "raw" + linearize = False + if do_maketx and files_metadata[filepath]["color_space"] == "sRGB": # noqa: E501 + linearize = True + # set its file node to 'raw' as tx will be linearized + files_metadata[filepath]["color_space"] = "raw" - source, mode, hash = self._process_texture( + source, mode, texture_hash = self._process_texture( filepath, do_maketx, staging=dir_path, - linearise=linearise, - force=forceCopy + linearize=linearize, + force=force_copy ) destination = self.resource_destination(instance, source, do_maketx) # Force copy is specified. - if forceCopy: + if force_copy: mode = COPY if mode == COPY: @@ -202,10 +262,10 @@ class ExtractLook(openpype.api.Extractor): # Store the hashes from hash to destination to include in the # database - hashes[hash] = destination + hashes[texture_hash] = destination # Remap the resources to the destination path (change node attributes) - destinations = dict() + destinations = {} remap = OrderedDict() # needs to be ordered, see color space values for resource in resources: source = os.path.normpath(resource["source"]) @@ -222,7 +282,7 @@ class ExtractLook(openpype.api.Extractor): color_space_attr = resource["node"] + ".colorSpace" color_space = cmds.getAttr(color_space_attr) if files_metadata[source]["color_space"] == "raw": - # set colorpsace to raw if we linearized it + # set color space to raw if we linearized it color_space = "Raw" # Remap file node filename to destination attr = resource["attribute"] @@ -267,11 +327,11 @@ class ExtractLook(openpype.api.Extractor): json.dump(data, f) if "files" not in instance.data: - instance.data["files"] = list() + instance.data["files"] = [] if "hardlinks" not in instance.data: - instance.data["hardlinks"] = list() + instance.data["hardlinks"] = [] if "transfers" not in instance.data: - instance.data["transfers"] = list() + instance.data["transfers"] = [] instance.data["files"].append(maya_fname) instance.data["files"].append(json_fname) @@ -311,14 +371,26 @@ class ExtractLook(openpype.api.Extractor): maya_path)) def resource_destination(self, instance, filepath, do_maketx): - anatomy = instance.context.data["anatomy"] + """Get resource destination path. + This is utility function to change path if resource file name is + changed by some external tool like `maketx`. + + Args: + instance: Current Instance. + filepath (str): Resource path + do_maketx (bool): Flag if resource is processed by `maketx`. + + Returns: + str: Path to resource file + + """ resources_dir = instance.data["resourcesDir"] # Compute destination location basename, ext = os.path.splitext(os.path.basename(filepath)) - # If maketx then the texture will always end with .tx + # If `maketx` then the texture will always end with .tx if do_maketx: ext = ".tx" @@ -326,7 +398,7 @@ class ExtractLook(openpype.api.Extractor): resources_dir, basename + ext ) - def _process_texture(self, filepath, do_maketx, staging, linearise, force): + def _process_texture(self, filepath, do_maketx, staging, linearize, force): """Process a single texture file on disk for publishing. This will: 1. Check whether it's already published, if so it will do hardlink @@ -363,7 +435,7 @@ class ExtractLook(openpype.api.Extractor): # Produce .tx file in staging if source file is not .tx converted = os.path.join(staging, "resources", fname + ".tx") - if linearise: + if linearize: self.log.info("tx: converting sRGB -> linear") colorconvert = "--colorconvert sRGB linear" else: diff --git a/openpype/hosts/maya/plugins/publish/validate_look_sets.py b/openpype/hosts/maya/plugins/publish/validate_look_sets.py index 48431d0906..5e737ca876 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_sets.py +++ b/openpype/hosts/maya/plugins/publish/validate_look_sets.py @@ -73,8 +73,10 @@ class ValidateLookSets(pyblish.api.InstancePlugin): # check if any objectSets are not present ion the relationships missing_sets = [s for s in sets if s not in relationships] if missing_sets: - for set in missing_sets: - if '_SET' not in set: + for missing_set in missing_sets: + cls.log.debug(missing_set) + + if '_SET' not in missing_set: # A set of this node is not coming along, this is wrong! cls.log.error("Missing sets '{}' for node " "'{}'".format(missing_sets, node)) @@ -82,8 +84,8 @@ class ValidateLookSets(pyblish.api.InstancePlugin): continue # Ensure the node is in the sets that are collected - for shaderset, data in relationships.items(): - if shaderset not in sets: + for shader_set, data in relationships.items(): + if shader_set not in sets: # no need to check for a set if the node # isn't in it anyway continue @@ -94,7 +96,7 @@ class ValidateLookSets(pyblish.api.InstancePlugin): # The node is not found in the collected set # relationships cls.log.error("Missing '{}' in collected set node " - "'{}'".format(node, shaderset)) + "'{}'".format(node, shader_set)) invalid.append(node) continue diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py index 0808dc06b1..cc236734e5 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py @@ -78,6 +78,14 @@ class CollectInstances(pyblish.api.ContextPlugin): if instance is None: continue + any_visible = False + for layer in instance.data["layers"]: + if layer["visible"]: + any_visible = True + break + + instance.data["publish"] = any_visible + instance.data["frameStart"] = context.data["sceneMarkIn"] + 1 instance.data["frameEnd"] = context.data["sceneMarkOut"] + 1 @@ -108,7 +116,7 @@ class CollectInstances(pyblish.api.ContextPlugin): group_id = instance_data["group_id"] group_layers = [] for layer in layers_data: - if layer["group_id"] == group_id and layer["visible"]: + if layer["group_id"] == group_id: group_layers.append(layer) if not group_layers: diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py index 4409413ff6..13c6c9eb78 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py @@ -57,7 +57,10 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): # Collect context from workfile metadata self.log.info("Collecting workfile context") + workfile_context = pipeline.get_current_workfile_context() + # Store workfile context to pyblish context + context.data["workfile_context"] = workfile_context if workfile_context: # Change current context with context from workfile key_map = ( @@ -67,16 +70,27 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): for env_key, key in key_map: avalon.api.Session[env_key] = workfile_context[key] os.environ[env_key] = workfile_context[key] + self.log.info("Context changed to: {}".format(workfile_context)) + + asset_name = workfile_context["asset"] + task_name = workfile_context["task"] + else: + asset_name = current_context["asset"] + task_name = current_context["task"] # Handle older workfiles or workfiles without metadata - self.log.warning( + self.log.warning(( "Workfile does not contain information about context." " Using current Session context." - ) - workfile_context = current_context.copy() + )) - context.data["workfile_context"] = workfile_context - self.log.info("Context changed to: {}".format(workfile_context)) + # Store context asset name + context.data["asset"] = asset_name + self.log.info( + "Context is set to Asset: \"{}\" and Task: \"{}\"".format( + asset_name, task_name + ) + ) # Collect instances self.log.info("Collecting instance data from workfile") diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py new file mode 100644 index 0000000000..4ce8d5347d --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py @@ -0,0 +1,55 @@ +import pyblish.api +from avalon.tvpaint import pipeline + + +class FixAssetNames(pyblish.api.Action): + """Repair the asset names. + + Change instanace metadata in the workfile. + """ + + label = "Repair" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + context_asset_name = context.data["asset"] + old_instance_items = pipeline.list_instances() + new_instance_items = [] + for instance_item in old_instance_items: + instance_asset_name = instance_item.get("asset") + if ( + instance_asset_name + and instance_asset_name != context_asset_name + ): + instance_item["asset"] = context_asset_name + new_instance_items.append(instance_item) + pipeline._write_instances(new_instance_items) + + +class ValidateMissingLayers(pyblish.api.ContextPlugin): + """Validate assset name present on instance. + + Asset name on instance should be the same as context's. + """ + + label = "Validate Asset Names" + order = pyblish.api.ValidatorOrder + hosts = ["tvpaint"] + actions = [FixAssetNames] + + def process(self, context): + context_asset_name = context.data["asset"] + for instance in context: + asset_name = instance.data.get("asset") + if asset_name and asset_name == context_asset_name: + continue + + instance_label = ( + instance.data.get("label") or instance.data["name"] + ) + raise AssertionError(( + "Different asset name on instance then context's." + " Instance \"{}\" has asset name: \"{}\"" + " Context asset name is: \"{}\"" + ).format(instance_label, asset_name, context_asset_name)) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py index 7c1032fcad..cc664d8030 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py @@ -13,7 +13,15 @@ class ValidateWorkfileProjectName(pyblish.api.ContextPlugin): order = pyblish.api.ValidatorOrder def process(self, context): - workfile_context = context.data["workfile_context"] + workfile_context = context.data.get("workfile_context") + # If workfile context is missing than project is matching to + # `AVALON_PROJECT` value for 100% + if not workfile_context: + self.log.info( + "Workfile context (\"workfile_context\") is not filled." + ) + return + workfile_project_name = workfile_context["project"] env_project_name = os.environ["AVALON_PROJECT"] if workfile_project_name == env_project_name: diff --git a/openpype/settings/defaults/project_settings/tvpaint.json b/openpype/settings/defaults/project_settings/tvpaint.json index a6c10b3809..4a424b1c03 100644 --- a/openpype/settings/defaults/project_settings/tvpaint.json +++ b/openpype/settings/defaults/project_settings/tvpaint.json @@ -9,6 +9,11 @@ "enabled": true, "optional": true, "active": true + }, + "ValidateAssetName": { + "enabled": true, + "optional": true, + "active": true } }, "filters": {} diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json index 0a9e7139dd..ab404f03ff 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json @@ -33,6 +33,17 @@ "docstring": "Validate MarkIn/Out match Frame start/end on shot data" } ] + }, + { + "type": "schema_template", + "name": "template_publish_plugin", + "template_data": [ + { + "key": "ValidateAssetName", + "label": "ValidateAssetName", + "docstring": "Validate if shot on instances metadata is same as workfiles shot" + } + ] } ] },