Merge pull request #3629 from BigRoy/collect_input_links

This commit is contained in:
Milan Kolar 2022-09-14 11:30:29 +02:00 committed by GitHub
commit e5cf456a95
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 451 additions and 3 deletions

View file

@ -0,0 +1,114 @@
from bson.objectid import ObjectId
import pyblish.api
from openpype.pipeline import registered_host
def collect_input_containers(tools):
"""Collect containers that contain any of the node in `nodes`.
This will return any loaded Avalon container that contains at least one of
the nodes. As such, the Avalon container is an input for it. Or in short,
there are member nodes of that container.
Returns:
list: Input avalon containers
"""
# Lookup by node ids
lookup = frozenset([tool.Name for tool in tools])
containers = []
host = registered_host()
for container in host.ls():
name = container["_tool"].Name
# We currently assume no "groups" as containers but just single tools
# like a single "Loader" operator. As such we just check whether the
# Loader is part of the processing queue.
if name in lookup:
containers.append(container)
return containers
def iter_upstream(tool):
"""Yields all upstream inputs for the current tool.
Yields:
tool: The input tools.
"""
def get_connected_input_tools(tool):
"""Helper function that returns connected input tools for a tool."""
inputs = []
# Filter only to actual types that will have sensible upstream
# connections. So we ignore just "Number" inputs as they can be
# many to iterate, slowing things down quite a bit - and in practice
# they don't have upstream connections.
VALID_INPUT_TYPES = ['Image', 'Particles', 'Mask', 'DataType3D']
for type_ in VALID_INPUT_TYPES:
for input_ in tool.GetInputList(type_).values():
output = input_.GetConnectedOutput()
if output:
input_tool = output.GetTool()
inputs.append(input_tool)
return inputs
# Initialize process queue with the node's inputs itself
queue = get_connected_input_tools(tool)
# We keep track of which node names we have processed so far, to ensure we
# don't process the same hierarchy again. We are not pushing the tool
# itself into the set as that doesn't correctly recognize the same tool.
# Since tool names are unique in a comp in Fusion we rely on that.
collected = set(tool.Name for tool in queue)
# Traverse upstream references for all nodes and yield them as we
# process the queue.
while queue:
upstream_tool = queue.pop()
yield upstream_tool
# Find upstream tools that are not collected yet.
upstream_inputs = get_connected_input_tools(upstream_tool)
upstream_inputs = [t for t in upstream_inputs if
t.Name not in collected]
queue.extend(upstream_inputs)
collected.update(tool.Name for tool in upstream_inputs)
class CollectUpstreamInputs(pyblish.api.InstancePlugin):
"""Collect source input containers used for this publish.
This will include `inputs` data of which loaded publishes were used in the
generation of this publish. This leaves an upstream trace to what was used
as input.
"""
label = "Collect Inputs"
order = pyblish.api.CollectorOrder + 0.2
hosts = ["fusion"]
def process(self, instance):
# Get all upstream and include itself
tool = instance[0]
nodes = list(iter_upstream(tool))
nodes.append(tool)
# Collect containers for the given set of nodes
containers = collect_input_containers(nodes)
inputs = [ObjectId(c["representation"]) for c in containers]
instance.data["inputRepresentations"] = inputs
self.log.info("Collected inputs: %s" % inputs)

View file

@ -1,3 +1,5 @@
from bson.objectid import ObjectId
import pyblish.api
from openpype.pipeline import registered_host
@ -115,7 +117,7 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
# Collect containers for the given set of nodes
containers = collect_input_containers(nodes)
inputs = [c["representation"] for c in containers]
instance.data["inputs"] = inputs
inputs = [ObjectId(c["representation"]) for c in containers]
instance.data["inputRepresentations"] = inputs
self.log.info("Collected inputs: %s" % inputs)

View file

@ -348,3 +348,71 @@ def get_attr_overrides(node_attr, layer,
break
return reversed(plug_overrides)
def get_shader_in_layer(node, layer):
"""Return the assigned shader in a renderlayer without switching layers.
This has been developed and tested for Legacy Renderlayers and *not* for
Render Setup.
Note: This will also return the shader for any face assignments, however
it will *not* return the components they are assigned to. This could
be implemented, but since Maya's renderlayers are famous for breaking
with face assignments there has been no need for this function to
support that.
Returns:
list: The list of assigned shaders in the given layer.
"""
def _get_connected_shader(plug):
"""Return current shader"""
return cmds.listConnections(plug,
source=False,
destination=True,
plugs=False,
connections=False,
type="shadingEngine") or []
# We check the instObjGroups (shader connection) for layer overrides.
plug = node + ".instObjGroups"
# Ignore complex query if we're in the layer anyway (optimization)
current_layer = cmds.editRenderLayerGlobals(query=True,
currentRenderLayer=True)
if layer == current_layer:
return _get_connected_shader(plug)
connections = cmds.listConnections(plug,
plugs=True,
source=False,
destination=True,
type="renderLayer") or []
connections = filter(lambda x: x.endswith(".outPlug"), connections)
if not connections:
# If no overrides anywhere on the shader, just get the current shader
return _get_connected_shader(plug)
def _get_override(connections, layer):
"""Return the overridden connection for that layer in connections"""
# If there's an override on that layer, return that.
for connection in connections:
if (connection.startswith(layer + ".outAdjustments") and
connection.endswith(".outPlug")):
# This is a shader override on that layer so get the shader
# connected to .outValue of the .outAdjustment[i]
out_adjustment = connection.rsplit(".", 1)[0]
connection_attr = out_adjustment + ".outValue"
override = cmds.listConnections(connection_attr) or []
return override
override_shader = _get_override(connections, layer)
if override_shader is not None:
return override_shader
else:
# Get the override for "defaultRenderLayer" (=masterLayer)
return _get_override(connections, layer="defaultRenderLayer")

View file

@ -0,0 +1,215 @@
import copy
from bson.objectid import ObjectId
from maya import cmds
import maya.api.OpenMaya as om
import pyblish.api
from openpype.pipeline import registered_host
from openpype.hosts.maya.api.lib import get_container_members
from openpype.hosts.maya.api.lib_rendersetup import get_shader_in_layer
def iter_history(nodes,
filter=om.MFn.kInvalid,
direction=om.MItDependencyGraph.kUpstream):
"""Iterate unique upstream history for list of nodes.
This acts as a replacement to maya.cmds.listHistory.
It's faster by about 2x-3x. It returns less than
maya.cmds.listHistory as it excludes the input nodes
from the output (unless an input node was history
for another input node). It also excludes duplicates.
Args:
nodes (list): Maya node names to start search from.
filter (om.MFn.Type): Filter to only specific types.
e.g. to dag nodes using om.MFn.kDagNode
direction (om.MItDependencyGraph.Direction): Direction to traverse in.
Defaults to upstream.
Yields:
str: Node names in upstream history.
"""
if not nodes:
return
sel = om.MSelectionList()
for node in nodes:
sel.add(node)
it = om.MItDependencyGraph(sel.getDependNode(0)) # init iterator
handle = om.MObjectHandle
traversed = set()
fn_dep = om.MFnDependencyNode()
fn_dag = om.MFnDagNode()
for i in range(sel.length()):
start_node = sel.getDependNode(i)
start_node_hash = handle(start_node).hashCode()
if start_node_hash in traversed:
continue
it.resetTo(start_node,
filter=filter,
direction=direction)
while not it.isDone():
node = it.currentNode()
node_hash = handle(node).hashCode()
if node_hash in traversed:
it.prune()
it.next() # noqa: B305
continue
traversed.add(node_hash)
if node.hasFn(om.MFn.kDagNode):
fn_dag.setObject(node)
yield fn_dag.fullPathName()
else:
fn_dep.setObject(node)
yield fn_dep.name()
it.next() # noqa: B305
def collect_input_containers(containers, nodes):
"""Collect containers that contain any of the node in `nodes`.
This will return any loaded Avalon container that contains at least one of
the nodes. As such, the Avalon container is an input for it. Or in short,
there are member nodes of that container.
Returns:
list: Input avalon containers
"""
# Assume the containers have collected their cached '_members' data
# in the collector.
return [container for container in containers
if any(node in container["_members"] for node in nodes)]
class CollectUpstreamInputs(pyblish.api.InstancePlugin):
"""Collect input source inputs for this publish.
This will include `inputs` data of which loaded publishes were used in the
generation of this publish. This leaves an upstream trace to what was used
as input.
"""
label = "Collect Inputs"
order = pyblish.api.CollectorOrder + 0.34
hosts = ["maya"]
def process(self, instance):
# For large scenes the querying of "host.ls()" can be relatively slow
# e.g. up to a second. Many instances calling it easily slows this
# down. As such, we cache it so we trigger it only once.
# todo: Instead of hidden cache make "CollectContainers" plug-in
cache_key = "__cache_containers"
scene_containers = instance.context.data.get(cache_key, None)
if scene_containers is None:
# Query the scenes' containers if there's no cache yet
host = registered_host()
scene_containers = list(host.ls())
for container in scene_containers:
# Embed the members into the container dictionary
container_members = set(get_container_members(container))
container["_members"] = container_members
instance.context.data["__cache_containers"] = scene_containers
# Collect the relevant input containers for this instance
if "renderlayer" in set(instance.data.get("families", [])):
# Special behavior for renderlayers
self.log.debug("Collecting renderlayer inputs....")
containers = self._collect_renderlayer_inputs(scene_containers,
instance)
else:
# Basic behavior
nodes = instance[:]
# Include any input connections of history with long names
# For optimization purposes only trace upstream from shape nodes
# looking for used dag nodes. This way having just a constraint
# on a transform is also ignored which tended to give irrelevant
# inputs for the majority of our use cases. We tend to care more
# about geometry inputs.
shapes = cmds.ls(nodes,
type=("mesh", "nurbsSurface", "nurbsCurve"),
noIntermediate=True)
if shapes:
history = list(iter_history(shapes, filter=om.MFn.kShape))
history = cmds.ls(history, long=True)
# Include the transforms in the collected history as shapes
# are excluded from containers
transforms = cmds.listRelatives(cmds.ls(history, shapes=True),
parent=True,
fullPath=True,
type="transform")
if transforms:
history.extend(transforms)
if history:
nodes = list(set(nodes + history))
# Collect containers for the given set of nodes
containers = collect_input_containers(scene_containers,
nodes)
inputs = [ObjectId(c["representation"]) for c in containers]
instance.data["inputRepresentations"] = inputs
self.log.info("Collected inputs: %s" % inputs)
def _collect_renderlayer_inputs(self, scene_containers, instance):
"""Collects inputs from nodes in renderlayer, incl. shaders + camera"""
# Get the renderlayer
renderlayer = instance.data.get("setMembers")
if renderlayer == "defaultRenderLayer":
# Assume all loaded containers in the scene are inputs
# for the masterlayer
return copy.deepcopy(scene_containers)
else:
# Get the members of the layer
members = cmds.editRenderLayerMembers(renderlayer,
query=True,
fullNames=True) or []
# In some cases invalid objects are returned from
# `editRenderLayerMembers` so we filter them out
members = cmds.ls(members, long=True)
# Include all children
children = cmds.listRelatives(members,
allDescendents=True,
fullPath=True) or []
members.extend(children)
# Include assigned shaders in renderlayer
shapes = cmds.ls(members, shapes=True, long=True)
shaders = set()
for shape in shapes:
shape_shaders = get_shader_in_layer(shape, layer=renderlayer)
if not shape_shaders:
continue
shaders.update(shape_shaders)
members.extend(shaders)
# Explicitly include the camera being rendered in renderlayer
cameras = instance.data.get("cameras")
members.extend(cameras)
containers = collect_input_containers(scene_containers, members)
return containers

View file

@ -778,7 +778,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"resolutionHeight": data.get("resolutionHeight", 1080),
"multipartExr": data.get("multipartExr", False),
"jobBatchName": data.get("jobBatchName", ""),
"useSequenceForReview": data.get("useSequenceForReview", True)
"useSequenceForReview": data.get("useSequenceForReview", True),
# map inputVersions `ObjectId` -> `str` so json supports it
"inputVersions": list(map(str, data.get("inputVersions", [])))
}
# skip locking version if we are creating v01

View file

@ -0,0 +1,47 @@
import pyblish.api
from bson.objectid import ObjectId
from openpype.client import get_representations
class CollectInputRepresentationsToVersions(pyblish.api.ContextPlugin):
"""Converts collected input representations to input versions.
Any data in `instance.data["inputRepresentations"]` gets converted into
`instance.data["inputVersions"]` as supported in OpenPype v3.
"""
# This is a ContextPlugin because then we can query the database only once
# for the conversion of representation ids to version ids (optimization)
label = "Input Representations to Versions"
order = pyblish.api.CollectorOrder + 0.499
hosts = ["*"]
def process(self, context):
# Query all version ids for representation ids from the database once
representations = set()
for instance in context:
inst_repre = instance.data.get("inputRepresentations", [])
representations.update(inst_repre)
representations_docs = get_representations(
project_name=context.data["projectEntity"]["name"],
representation_ids=representations,
fields=["_id", "parent"])
representation_id_to_version_id = {
repre["_id"]: repre["parent"] for repre in representations_docs
}
for instance in context:
inst_repre = instance.data.get("inputRepresentations", [])
if not inst_repre:
continue
input_versions = instance.data.get("inputVersions", [])
for repre_id in inst_repre:
repre_id = ObjectId(repre_id)
version_id = representation_id_to_version_id[repre_id]
input_versions.append(version_id)
instance.data["inputVersions"] = input_versions