mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 12:54:40 +01:00
Merge remote-tracking branch 'upstream/develop' into substance_integration
This commit is contained in:
commit
439967c8c2
33 changed files with 1012 additions and 327 deletions
|
|
@ -3675,3 +3675,43 @@ def len_flattened(components):
|
|||
else:
|
||||
n += 1
|
||||
return n
|
||||
|
||||
|
||||
def get_all_children(nodes):
|
||||
"""Return all children of `nodes` including each instanced child.
|
||||
Using maya.cmds.listRelatives(allDescendents=True) includes only the first
|
||||
instance. As such, this function acts as an optimal replacement with a
|
||||
focus on a fast query.
|
||||
|
||||
"""
|
||||
|
||||
sel = OpenMaya.MSelectionList()
|
||||
traversed = set()
|
||||
iterator = OpenMaya.MItDag(OpenMaya.MItDag.kDepthFirst)
|
||||
for node in nodes:
|
||||
|
||||
if node in traversed:
|
||||
# Ignore if already processed as a child
|
||||
# before
|
||||
continue
|
||||
|
||||
sel.clear()
|
||||
sel.add(node)
|
||||
dag = sel.getDagPath(0)
|
||||
|
||||
iterator.reset(dag)
|
||||
# ignore self
|
||||
iterator.next() # noqa: B305
|
||||
while not iterator.isDone():
|
||||
|
||||
path = iterator.fullPathName()
|
||||
|
||||
if path in traversed:
|
||||
iterator.prune()
|
||||
iterator.next() # noqa: B305
|
||||
continue
|
||||
|
||||
traversed.add(path)
|
||||
iterator.next() # noqa: B305
|
||||
|
||||
return list(traversed)
|
||||
|
|
|
|||
|
|
@ -857,6 +857,7 @@ class RenderProductsVray(ARenderProducts):
|
|||
if default_ext in {"exr (multichannel)", "exr (deep)"}:
|
||||
default_ext = "exr"
|
||||
|
||||
colorspace = lib.get_color_management_output_transform()
|
||||
products = []
|
||||
|
||||
# add beauty as default when not disabled
|
||||
|
|
@ -868,7 +869,7 @@ class RenderProductsVray(ARenderProducts):
|
|||
productName="",
|
||||
ext=default_ext,
|
||||
camera=camera,
|
||||
colorspace=lib.get_color_management_output_transform(),
|
||||
colorspace=colorspace,
|
||||
multipart=self.multipart
|
||||
)
|
||||
)
|
||||
|
|
@ -882,6 +883,7 @@ class RenderProductsVray(ARenderProducts):
|
|||
productName="Alpha",
|
||||
ext=default_ext,
|
||||
camera=camera,
|
||||
colorspace=colorspace,
|
||||
multipart=self.multipart
|
||||
)
|
||||
)
|
||||
|
|
@ -917,7 +919,8 @@ class RenderProductsVray(ARenderProducts):
|
|||
product = RenderProduct(productName=name,
|
||||
ext=default_ext,
|
||||
aov=aov,
|
||||
camera=camera)
|
||||
camera=camera,
|
||||
colorspace=colorspace)
|
||||
products.append(product)
|
||||
# Continue as we've processed this special case AOV
|
||||
continue
|
||||
|
|
@ -929,7 +932,7 @@ class RenderProductsVray(ARenderProducts):
|
|||
ext=default_ext,
|
||||
aov=aov,
|
||||
camera=camera,
|
||||
colorspace=lib.get_color_management_output_transform()
|
||||
colorspace=colorspace
|
||||
)
|
||||
products.append(product)
|
||||
|
||||
|
|
@ -1130,6 +1133,7 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
products = []
|
||||
light_groups_enabled = False
|
||||
has_beauty_aov = False
|
||||
colorspace = lib.get_color_management_output_transform()
|
||||
for aov in aovs:
|
||||
enabled = self._get_attr(aov, "enabled")
|
||||
if not enabled:
|
||||
|
|
@ -1173,7 +1177,8 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
ext=ext,
|
||||
multipart=False,
|
||||
camera=camera,
|
||||
driver=aov)
|
||||
driver=aov,
|
||||
colorspace=colorspace)
|
||||
products.append(product)
|
||||
|
||||
if light_groups:
|
||||
|
|
@ -1188,7 +1193,8 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
ext=ext,
|
||||
multipart=False,
|
||||
camera=camera,
|
||||
driver=aov)
|
||||
driver=aov,
|
||||
colorspace=colorspace)
|
||||
products.append(product)
|
||||
|
||||
# When a Beauty AOV is added manually, it will be rendered as
|
||||
|
|
@ -1204,7 +1210,8 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
RenderProduct(productName=beauty_name,
|
||||
ext=ext,
|
||||
multipart=self.multipart,
|
||||
camera=camera))
|
||||
camera=camera,
|
||||
colorspace=colorspace))
|
||||
|
||||
return products
|
||||
|
||||
|
|
@ -1236,6 +1243,8 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
"""
|
||||
from rfm2.api.displays import get_displays # noqa
|
||||
|
||||
colorspace = lib.get_color_management_output_transform()
|
||||
|
||||
cameras = [
|
||||
self.sanitize_camera_name(c)
|
||||
for c in self.get_renderable_cameras()
|
||||
|
|
@ -1302,7 +1311,8 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
productName=aov_name,
|
||||
ext=extensions,
|
||||
camera=camera,
|
||||
multipart=True
|
||||
multipart=True,
|
||||
colorspace=colorspace
|
||||
)
|
||||
|
||||
if has_cryptomatte and matte_enabled:
|
||||
|
|
@ -1311,7 +1321,8 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
aov=cryptomatte_aov,
|
||||
ext=extensions,
|
||||
camera=camera,
|
||||
multipart=True
|
||||
multipart=True,
|
||||
colorspace=colorspace
|
||||
)
|
||||
else:
|
||||
# this code should handle the case where no multipart
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ class ArnoldStandinLoader(load.LoaderPlugin):
|
|||
sequence = is_sequence(os.listdir(os.path.dirname(self.fname)))
|
||||
cmds.setAttr(standin_shape + ".useFrameExtension", sequence)
|
||||
|
||||
nodes = [root, standin]
|
||||
nodes = [root, standin, standin_shape]
|
||||
if operator is not None:
|
||||
nodes.append(operator)
|
||||
self[:] = nodes
|
||||
|
|
@ -183,7 +183,7 @@ class ArnoldStandinLoader(load.LoaderPlugin):
|
|||
# If no proxy exists, the string operator won't replace anything.
|
||||
cmds.setAttr(
|
||||
string_replace_operator + ".match",
|
||||
"resources/" + proxy_basename,
|
||||
proxy_basename,
|
||||
type="string"
|
||||
)
|
||||
cmds.setAttr(
|
||||
|
|
|
|||
|
|
@ -1,5 +1,9 @@
|
|||
import os
|
||||
|
||||
import maya.cmds as cmds
|
||||
|
||||
from openpype.hosts.maya.api.pipeline import containerise
|
||||
from openpype.hosts.maya.api.lib import unique_namespace
|
||||
from openpype.pipeline import (
|
||||
load,
|
||||
get_representation_path
|
||||
|
|
@ -11,19 +15,15 @@ class GpuCacheLoader(load.LoaderPlugin):
|
|||
"""Load Alembic as gpuCache"""
|
||||
|
||||
families = ["model", "animation", "proxyAbc", "pointcache"]
|
||||
representations = ["abc"]
|
||||
representations = ["abc", "gpu_cache"]
|
||||
|
||||
label = "Import Gpu Cache"
|
||||
label = "Load Gpu Cache"
|
||||
order = -5
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
|
||||
import maya.cmds as cmds
|
||||
from openpype.hosts.maya.api.pipeline import containerise
|
||||
from openpype.hosts.maya.api.lib import unique_namespace
|
||||
|
||||
asset = context['asset']['name']
|
||||
namespace = namespace or unique_namespace(
|
||||
asset + "_",
|
||||
|
|
@ -42,10 +42,9 @@ class GpuCacheLoader(load.LoaderPlugin):
|
|||
c = colors.get('model')
|
||||
if c is not None:
|
||||
cmds.setAttr(root + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(root + ".outlinerColor",
|
||||
(float(c[0])/255),
|
||||
(float(c[1])/255),
|
||||
(float(c[2])/255)
|
||||
cmds.setAttr(
|
||||
root + ".outlinerColor",
|
||||
(float(c[0]) / 255), (float(c[1]) / 255), (float(c[2]) / 255)
|
||||
)
|
||||
|
||||
# Create transform with shape
|
||||
|
|
@ -74,9 +73,6 @@ class GpuCacheLoader(load.LoaderPlugin):
|
|||
loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
|
||||
import maya.cmds as cmds
|
||||
|
||||
path = get_representation_path(representation)
|
||||
|
||||
# Update the cache
|
||||
|
|
@ -96,7 +92,6 @@ class GpuCacheLoader(load.LoaderPlugin):
|
|||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
import maya.cmds as cmds
|
||||
members = cmds.sets(container['objectName'], query=True)
|
||||
cmds.lockNode(members, lock=False)
|
||||
cmds.delete([container['objectName']] + members)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
from openpype.hosts.maya.api.lib import get_all_children
|
||||
|
||||
|
||||
class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
|
||||
|
|
@ -21,18 +22,21 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
|
|||
self.log.warning("Skipped empty instance: \"%s\" " % objset)
|
||||
continue
|
||||
if objset.endswith("content_SET"):
|
||||
instance.data["setMembers"] = cmds.ls(members, long=True)
|
||||
self.log.debug("content members: {}".format(members))
|
||||
members = cmds.ls(members, long=True)
|
||||
children = get_all_children(members)
|
||||
instance.data["contentMembers"] = children
|
||||
self.log.debug("content members: {}".format(children))
|
||||
elif objset.endswith("proxy_SET"):
|
||||
instance.data["proxy"] = cmds.ls(members, long=True)
|
||||
self.log.debug("proxy members: {}".format(members))
|
||||
set_members = get_all_children(cmds.ls(members, long=True))
|
||||
instance.data["proxy"] = set_members
|
||||
self.log.debug("proxy members: {}".format(set_members))
|
||||
|
||||
# Use camera in object set if present else default to render globals
|
||||
# camera.
|
||||
cameras = cmds.ls(type="camera", long=True)
|
||||
renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)]
|
||||
camera = renderable[0]
|
||||
for node in instance.data["setMembers"]:
|
||||
for node in instance.data["contentMembers"]:
|
||||
camera_shapes = cmds.listRelatives(
|
||||
node, shapes=True, type="camera"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,48 +1,8 @@
|
|||
from maya import cmds
|
||||
import maya.api.OpenMaya as om
|
||||
|
||||
import pyblish.api
|
||||
import json
|
||||
|
||||
|
||||
def get_all_children(nodes):
|
||||
"""Return all children of `nodes` including each instanced child.
|
||||
Using maya.cmds.listRelatives(allDescendents=True) includes only the first
|
||||
instance. As such, this function acts as an optimal replacement with a
|
||||
focus on a fast query.
|
||||
|
||||
"""
|
||||
|
||||
sel = om.MSelectionList()
|
||||
traversed = set()
|
||||
iterator = om.MItDag(om.MItDag.kDepthFirst)
|
||||
for node in nodes:
|
||||
|
||||
if node in traversed:
|
||||
# Ignore if already processed as a child
|
||||
# before
|
||||
continue
|
||||
|
||||
sel.clear()
|
||||
sel.add(node)
|
||||
dag = sel.getDagPath(0)
|
||||
|
||||
iterator.reset(dag)
|
||||
# ignore self
|
||||
iterator.next() # noqa: B305
|
||||
while not iterator.isDone():
|
||||
|
||||
path = iterator.fullPathName()
|
||||
|
||||
if path in traversed:
|
||||
iterator.prune()
|
||||
iterator.next() # noqa: B305
|
||||
continue
|
||||
|
||||
traversed.add(path)
|
||||
iterator.next() # noqa: B305
|
||||
|
||||
return list(traversed)
|
||||
from openpype.hosts.maya.api.lib import get_all_children
|
||||
|
||||
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
|
|
|
|||
|
|
@ -556,7 +556,7 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
continue
|
||||
if cmds.getAttr(attribute, type=True) == "message":
|
||||
continue
|
||||
node_attributes[attr] = cmds.getAttr(attribute)
|
||||
node_attributes[attr] = cmds.getAttr(attribute, asString=True)
|
||||
# Only include if there are any properties we care about
|
||||
if not node_attributes:
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import pymel.core as pm
|
|||
import pyblish.api
|
||||
|
||||
from openpype.client import get_subset_by_name
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline import legacy_io, KnownPublishError
|
||||
from openpype.hosts.maya.api.lib import get_attribute_input
|
||||
|
||||
|
||||
|
|
@ -16,7 +16,6 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.CollectorOrder + 0.3
|
||||
label = 'Collect Review Data'
|
||||
families = ["review"]
|
||||
legacy = True
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -36,57 +35,68 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
self.log.debug('members: {}'.format(members))
|
||||
|
||||
# validate required settings
|
||||
assert len(cameras) == 1, "Not a single camera found in extraction"
|
||||
if len(cameras) == 0:
|
||||
raise KnownPublishError("No camera found in review "
|
||||
"instance: {}".format(instance))
|
||||
elif len(cameras) > 2:
|
||||
raise KnownPublishError(
|
||||
"Only a single camera is allowed for a review instance but "
|
||||
"more than one camera found in review instance: {}. "
|
||||
"Cameras found: {}".format(instance, ", ".join(cameras)))
|
||||
|
||||
camera = cameras[0]
|
||||
self.log.debug('camera: {}'.format(camera))
|
||||
|
||||
objectset = instance.context.data['objectsets']
|
||||
context = instance.context
|
||||
objectset = context.data['objectsets']
|
||||
|
||||
reviewable_subset = None
|
||||
reviewable_subset = list(set(members) & set(objectset))
|
||||
if reviewable_subset:
|
||||
assert len(reviewable_subset) <= 1, "Multiple subsets for review"
|
||||
self.log.debug('subset for review: {}'.format(reviewable_subset))
|
||||
reviewable_subsets = list(set(members) & set(objectset))
|
||||
if reviewable_subsets:
|
||||
if len(reviewable_subsets) > 1:
|
||||
raise KnownPublishError(
|
||||
"Multiple attached subsets for review are not supported. "
|
||||
"Attached: {}".format(", ".join(reviewable_subsets))
|
||||
)
|
||||
|
||||
i = 0
|
||||
for inst in instance.context:
|
||||
reviewable_subset = reviewable_subsets[0]
|
||||
self.log.debug(
|
||||
"Subset attached to review: {}".format(reviewable_subset)
|
||||
)
|
||||
|
||||
self.log.debug('filtering {}'.format(inst))
|
||||
data = instance.context[i].data
|
||||
# Find the relevant publishing instance in the current context
|
||||
reviewable_inst = next(inst for inst in context
|
||||
if inst.name == reviewable_subset)
|
||||
data = reviewable_inst.data
|
||||
|
||||
if inst.name != reviewable_subset[0]:
|
||||
self.log.debug('subset name does not match {}'.format(
|
||||
reviewable_subset[0]))
|
||||
i += 1
|
||||
continue
|
||||
self.log.debug(
|
||||
'Adding review family to {}'.format(reviewable_subset)
|
||||
)
|
||||
if data.get('families'):
|
||||
data['families'].append('review')
|
||||
else:
|
||||
data['families'] = ['review']
|
||||
|
||||
data['review_camera'] = camera
|
||||
data['frameStartFtrack'] = instance.data["frameStartHandle"]
|
||||
data['frameEndFtrack'] = instance.data["frameEndHandle"]
|
||||
data['frameStartHandle'] = instance.data["frameStartHandle"]
|
||||
data['frameEndHandle'] = instance.data["frameEndHandle"]
|
||||
data["frameStart"] = instance.data["frameStart"]
|
||||
data["frameEnd"] = instance.data["frameEnd"]
|
||||
data['handles'] = instance.data.get('handles', None)
|
||||
data['step'] = instance.data['step']
|
||||
data['fps'] = instance.data['fps']
|
||||
data['review_width'] = instance.data['review_width']
|
||||
data['review_height'] = instance.data['review_height']
|
||||
data["isolate"] = instance.data["isolate"]
|
||||
data["panZoom"] = instance.data.get("panZoom", False)
|
||||
data["panel"] = instance.data["panel"]
|
||||
|
||||
# The review instance must be active
|
||||
cmds.setAttr(str(instance) + '.active', 1)
|
||||
|
||||
instance.data['remove'] = True
|
||||
|
||||
if data.get('families'):
|
||||
data['families'].append('review')
|
||||
else:
|
||||
data['families'] = ['review']
|
||||
self.log.debug('adding review family to {}'.format(
|
||||
reviewable_subset))
|
||||
data['review_camera'] = camera
|
||||
# data["publish"] = False
|
||||
data['frameStartFtrack'] = instance.data["frameStartHandle"]
|
||||
data['frameEndFtrack'] = instance.data["frameEndHandle"]
|
||||
data['frameStartHandle'] = instance.data["frameStartHandle"]
|
||||
data['frameEndHandle'] = instance.data["frameEndHandle"]
|
||||
data["frameStart"] = instance.data["frameStart"]
|
||||
data["frameEnd"] = instance.data["frameEnd"]
|
||||
data['handles'] = instance.data.get('handles', None)
|
||||
data['step'] = instance.data['step']
|
||||
data['fps'] = instance.data['fps']
|
||||
data['review_width'] = instance.data['review_width']
|
||||
data['review_height'] = instance.data['review_height']
|
||||
data["isolate"] = instance.data["isolate"]
|
||||
data["panZoom"] = instance.data.get("panZoom", False)
|
||||
data["panel"] = instance.data["panel"]
|
||||
cmds.setAttr(str(instance) + '.active', 1)
|
||||
self.log.debug('data {}'.format(instance.context[i].data))
|
||||
instance.context[i].data.update(data)
|
||||
instance.data['remove'] = True
|
||||
self.log.debug('isntance data {}'.format(instance.data))
|
||||
else:
|
||||
legacy_subset_name = task + 'Review'
|
||||
asset_doc = instance.context.data['assetEntity']
|
||||
|
|
@ -108,7 +118,7 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
instance.data["frameEndHandle"]
|
||||
|
||||
# make ftrack publishable
|
||||
instance.data["families"] = ['ftrack']
|
||||
instance.data.setdefault("families", []).append('ftrack')
|
||||
|
||||
cmds.setAttr(str(instance) + '.active', 1)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
import os
|
||||
from collections import defaultdict
|
||||
import json
|
||||
|
||||
from maya import cmds
|
||||
import arnold
|
||||
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.maya.api.lib import (
|
||||
maintained_selection, attribute_values, delete_after
|
||||
)
|
||||
from openpype.hosts.maya.api import lib
|
||||
|
||||
|
||||
class ExtractArnoldSceneSource(publish.Extractor):
|
||||
|
|
@ -19,8 +19,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
|||
|
||||
def process(self, instance):
|
||||
staging_dir = self.staging_dir(instance)
|
||||
filename = "{}.ass".format(instance.name)
|
||||
file_path = os.path.join(staging_dir, filename)
|
||||
file_path = os.path.join(staging_dir, "{}.ass".format(instance.name))
|
||||
|
||||
# Mask
|
||||
mask = arnold.AI_NODE_ALL
|
||||
|
|
@ -71,8 +70,8 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
|||
"mask": mask
|
||||
}
|
||||
|
||||
filenames = self._extract(
|
||||
instance.data["setMembers"], attribute_data, kwargs
|
||||
filenames, nodes_by_id = self._extract(
|
||||
instance.data["contentMembers"], attribute_data, kwargs
|
||||
)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
|
|
@ -88,6 +87,19 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
|||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
json_path = os.path.join(staging_dir, "{}.json".format(instance.name))
|
||||
with open(json_path, "w") as f:
|
||||
json.dump(nodes_by_id, f)
|
||||
|
||||
representation = {
|
||||
"name": "json",
|
||||
"ext": "json",
|
||||
"files": os.path.basename(json_path),
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info(
|
||||
"Extracted instance {} to: {}".format(instance.name, staging_dir)
|
||||
)
|
||||
|
|
@ -97,7 +109,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
|||
return
|
||||
|
||||
kwargs["filename"] = file_path.replace(".ass", "_proxy.ass")
|
||||
filenames = self._extract(
|
||||
filenames, _ = self._extract(
|
||||
instance.data["proxy"], attribute_data, kwargs
|
||||
)
|
||||
|
||||
|
|
@ -113,34 +125,60 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
|||
instance.data["representations"].append(representation)
|
||||
|
||||
def _extract(self, nodes, attribute_data, kwargs):
|
||||
self.log.info("Writing: " + kwargs["filename"])
|
||||
self.log.info(
|
||||
"Writing {} with:\n{}".format(kwargs["filename"], kwargs)
|
||||
)
|
||||
filenames = []
|
||||
nodes_by_id = defaultdict(list)
|
||||
# Duplicating nodes so they are direct children of the world. This
|
||||
# makes the hierarchy of any exported ass file the same.
|
||||
with delete_after() as delete_bin:
|
||||
with lib.delete_after() as delete_bin:
|
||||
duplicate_nodes = []
|
||||
for node in nodes:
|
||||
# Only interested in transforms:
|
||||
if cmds.nodeType(node) != "transform":
|
||||
continue
|
||||
|
||||
# Only interested in transforms with shapes.
|
||||
shapes = cmds.listRelatives(
|
||||
node, shapes=True, noIntermediate=True
|
||||
)
|
||||
if not shapes:
|
||||
continue
|
||||
|
||||
duplicate_transform = cmds.duplicate(node)[0]
|
||||
|
||||
# Discard the children.
|
||||
shapes = cmds.listRelatives(duplicate_transform, shapes=True)
|
||||
if cmds.listRelatives(duplicate_transform, parent=True):
|
||||
duplicate_transform = cmds.parent(
|
||||
duplicate_transform, world=True
|
||||
)[0]
|
||||
|
||||
basename = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1]
|
||||
duplicate_transform = cmds.rename(
|
||||
duplicate_transform, basename
|
||||
)
|
||||
|
||||
# Discard children nodes that are not shapes
|
||||
shapes = cmds.listRelatives(
|
||||
duplicate_transform, shapes=True, fullPath=True
|
||||
)
|
||||
children = cmds.listRelatives(
|
||||
duplicate_transform, children=True
|
||||
duplicate_transform, children=True, fullPath=True
|
||||
)
|
||||
cmds.delete(set(children) - set(shapes))
|
||||
|
||||
duplicate_transform = cmds.parent(
|
||||
duplicate_transform, world=True
|
||||
)[0]
|
||||
|
||||
cmds.rename(duplicate_transform, node.split("|")[-1])
|
||||
duplicate_transform = "|" + node.split("|")[-1]
|
||||
|
||||
duplicate_nodes.append(duplicate_transform)
|
||||
duplicate_nodes.extend(shapes)
|
||||
delete_bin.append(duplicate_transform)
|
||||
|
||||
with attribute_values(attribute_data):
|
||||
with maintained_selection():
|
||||
# Copy cbId to mtoa_constant.
|
||||
for node in duplicate_nodes:
|
||||
# Converting Maya hierarchy separator "|" to Arnold
|
||||
# separator "/".
|
||||
nodes_by_id[lib.get_id(node)].append(node.replace("|", "/"))
|
||||
|
||||
with lib.attribute_values(attribute_data):
|
||||
with lib.maintained_selection():
|
||||
self.log.info(
|
||||
"Writing: {}".format(duplicate_nodes)
|
||||
)
|
||||
|
|
@ -157,4 +195,4 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
|||
|
||||
self.log.info("Exported: {}".format(filenames))
|
||||
|
||||
return filenames
|
||||
return filenames, nodes_by_id
|
||||
|
|
|
|||
65
openpype/hosts/maya/plugins/publish/extract_gpu_cache.py
Normal file
65
openpype/hosts/maya/plugins/publish/extract_gpu_cache.py
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
import json
|
||||
|
||||
from maya import cmds
|
||||
|
||||
from openpype.pipeline import publish
|
||||
|
||||
|
||||
class ExtractGPUCache(publish.Extractor):
|
||||
"""Extract the content of the instance to a GPU cache file."""
|
||||
|
||||
label = "GPU Cache"
|
||||
hosts = ["maya"]
|
||||
families = ["model", "animation", "pointcache"]
|
||||
step = 1.0
|
||||
stepSave = 1
|
||||
optimize = True
|
||||
optimizationThreshold = 40000
|
||||
optimizeAnimationsForMotionBlur = True
|
||||
writeMaterials = True
|
||||
useBaseTessellation = True
|
||||
|
||||
def process(self, instance):
|
||||
cmds.loadPlugin("gpuCache", quiet=True)
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
filename = "{}_gpu_cache".format(instance.name)
|
||||
|
||||
# Write out GPU cache file.
|
||||
kwargs = {
|
||||
"directory": staging_dir,
|
||||
"fileName": filename,
|
||||
"saveMultipleFiles": False,
|
||||
"simulationRate": self.step,
|
||||
"sampleMultiplier": self.stepSave,
|
||||
"optimize": self.optimize,
|
||||
"optimizationThreshold": self.optimizationThreshold,
|
||||
"optimizeAnimationsForMotionBlur": (
|
||||
self.optimizeAnimationsForMotionBlur
|
||||
),
|
||||
"writeMaterials": self.writeMaterials,
|
||||
"useBaseTessellation": self.useBaseTessellation
|
||||
}
|
||||
self.log.debug(
|
||||
"Extract {} with:\n{}".format(
|
||||
instance[:], json.dumps(kwargs, indent=4, sort_keys=True)
|
||||
)
|
||||
)
|
||||
cmds.gpuCache(instance[:], **kwargs)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
"name": "gpu_cache",
|
||||
"ext": "abc",
|
||||
"files": filename + ".abc",
|
||||
"stagingDir": staging_dir,
|
||||
"outputName": "gpu_cache"
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info(
|
||||
"Extracted instance {} to: {}".format(instance.name, staging_dir)
|
||||
)
|
||||
|
|
@ -1,5 +1,3 @@
|
|||
import maya.cmds as cmds
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline.publish import (
|
||||
ValidateContentsOrder, PublishValidationError
|
||||
|
|
@ -22,10 +20,11 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
|
|||
families = ["ass"]
|
||||
label = "Validate Arnold Scene Source"
|
||||
|
||||
def _get_nodes_data(self, nodes):
|
||||
def _get_nodes_by_name(self, nodes):
|
||||
ungrouped_nodes = []
|
||||
nodes_by_name = {}
|
||||
parents = []
|
||||
same_named_nodes = {}
|
||||
for node in nodes:
|
||||
node_split = node.split("|")
|
||||
if len(node_split) == 2:
|
||||
|
|
@ -35,21 +34,38 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
|
|||
if parent:
|
||||
parents.append(parent)
|
||||
|
||||
nodes_by_name[node_split[-1]] = node
|
||||
for shape in cmds.listRelatives(node, shapes=True):
|
||||
nodes_by_name[shape.split("|")[-1]] = shape
|
||||
node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1]
|
||||
|
||||
# Check for same same nodes, which can happen in different
|
||||
# hierarchies.
|
||||
if node_name in nodes_by_name:
|
||||
try:
|
||||
same_named_nodes[node_name].append(node)
|
||||
except KeyError:
|
||||
same_named_nodes[node_name] = [
|
||||
nodes_by_name[node_name], node
|
||||
]
|
||||
|
||||
nodes_by_name[node_name] = node
|
||||
|
||||
if same_named_nodes:
|
||||
message = "Found nodes with the same name:"
|
||||
for name, nodes in same_named_nodes.items():
|
||||
message += "\n\n\"{}\":\n{}".format(name, "\n".join(nodes))
|
||||
|
||||
raise PublishValidationError(message)
|
||||
|
||||
return ungrouped_nodes, nodes_by_name, parents
|
||||
|
||||
def process(self, instance):
|
||||
ungrouped_nodes = []
|
||||
|
||||
nodes, content_nodes_by_name, content_parents = self._get_nodes_data(
|
||||
instance.data["setMembers"]
|
||||
nodes, content_nodes_by_name, content_parents = (
|
||||
self._get_nodes_by_name(instance.data["contentMembers"])
|
||||
)
|
||||
ungrouped_nodes.extend(nodes)
|
||||
|
||||
nodes, proxy_nodes_by_name, proxy_parents = self._get_nodes_data(
|
||||
nodes, proxy_nodes_by_name, proxy_parents = self._get_nodes_by_name(
|
||||
instance.data.get("proxy", [])
|
||||
)
|
||||
ungrouped_nodes.extend(nodes)
|
||||
|
|
@ -66,11 +82,11 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
|
|||
return
|
||||
|
||||
# Validate for content and proxy nodes amount being the same.
|
||||
if len(instance.data["setMembers"]) != len(instance.data["proxy"]):
|
||||
if len(instance.data["contentMembers"]) != len(instance.data["proxy"]):
|
||||
raise PublishValidationError(
|
||||
"Amount of content nodes ({}) and proxy nodes ({}) needs to "
|
||||
"be the same.".format(
|
||||
len(instance.data["setMembers"]),
|
||||
len(instance.data["contentMembers"]),
|
||||
len(instance.data["proxy"])
|
||||
)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,74 @@
|
|||
import pyblish.api
|
||||
from openpype.hosts.maya.api import lib
|
||||
from openpype.pipeline.publish import (
|
||||
ValidateContentsOrder, PublishValidationError, RepairAction
|
||||
)
|
||||
|
||||
|
||||
class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin):
|
||||
"""Validate Arnold Scene Source Cbid.
|
||||
|
||||
It is required for the proxy and content nodes to share the same cbid.
|
||||
"""
|
||||
|
||||
order = ValidateContentsOrder
|
||||
hosts = ["maya"]
|
||||
families = ["ass"]
|
||||
label = "Validate Arnold Scene Source CBID"
|
||||
actions = [RepairAction]
|
||||
|
||||
@staticmethod
|
||||
def _get_nodes_by_name(nodes):
|
||||
nodes_by_name = {}
|
||||
for node in nodes:
|
||||
node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1]
|
||||
nodes_by_name[node_name] = node
|
||||
|
||||
return nodes_by_name
|
||||
|
||||
@classmethod
|
||||
def get_invalid_couples(cls, instance):
|
||||
content_nodes_by_name = cls._get_nodes_by_name(
|
||||
instance.data["contentMembers"]
|
||||
)
|
||||
proxy_nodes_by_name = cls._get_nodes_by_name(
|
||||
instance.data.get("proxy", [])
|
||||
)
|
||||
|
||||
invalid_couples = []
|
||||
for content_name, content_node in content_nodes_by_name.items():
|
||||
proxy_node = proxy_nodes_by_name.get(content_name, None)
|
||||
|
||||
if not proxy_node:
|
||||
cls.log.debug(
|
||||
"Content node '{}' has no matching proxy node.".format(
|
||||
content_node
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
content_id = lib.get_id(content_node)
|
||||
proxy_id = lib.get_id(proxy_node)
|
||||
if content_id != proxy_id:
|
||||
invalid_couples.append((content_node, proxy_node))
|
||||
|
||||
return invalid_couples
|
||||
|
||||
def process(self, instance):
|
||||
# Proxy validation.
|
||||
if not instance.data.get("proxy", []):
|
||||
return
|
||||
|
||||
# Validate for proxy nodes sharing the same cbId as content nodes.
|
||||
invalid_couples = self.get_invalid_couples(instance)
|
||||
if invalid_couples:
|
||||
raise PublishValidationError(
|
||||
"Found proxy nodes with mismatching cbid:\n{}".format(
|
||||
invalid_couples
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
for content_node, proxy_node in cls.get_invalid_couples(cls, instance):
|
||||
lib.set_id(proxy_node, lib.get_id(content_node), overwrite=False)
|
||||
|
|
@ -275,15 +275,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
# go through definitions and test if such node.attribute exists.
|
||||
# if so, compare its value from the one required.
|
||||
for attribute, data in cls.get_nodes(instance, renderer).items():
|
||||
# Validate the settings has values.
|
||||
if not data["values"]:
|
||||
cls.log.error(
|
||||
"Settings for {}.{} is missing values.".format(
|
||||
node, attribute
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
for node in data["nodes"]:
|
||||
try:
|
||||
render_value = cmds.getAttr(
|
||||
|
|
@ -316,6 +307,15 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
)
|
||||
result = {}
|
||||
for attr, values in OrderedDict(validation_settings).items():
|
||||
values = [convert_to_int_or_float(v) for v in values if v]
|
||||
|
||||
# Validate the settings has values.
|
||||
if not values:
|
||||
cls.log.error(
|
||||
"Settings for {} is missing values.".format(attr)
|
||||
)
|
||||
continue
|
||||
|
||||
cls.log.debug("{}: {}".format(attr, values))
|
||||
if "." not in attr:
|
||||
cls.log.warning(
|
||||
|
|
@ -324,8 +324,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
)
|
||||
continue
|
||||
|
||||
values = [convert_to_int_or_float(v) for v in values]
|
||||
|
||||
node_type, attribute_name = attr.split(".", 1)
|
||||
|
||||
# first get node of that type
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ from .commands import (
|
|||
remove_unused_looks
|
||||
)
|
||||
from .vray_proxies import vrayproxy_assign_look
|
||||
from . import arnold_standin
|
||||
|
||||
module = sys.modules[__name__]
|
||||
module.window = None
|
||||
|
|
@ -43,7 +44,7 @@ class MayaLookAssignerWindow(QtWidgets.QWidget):
|
|||
filename = get_workfile()
|
||||
|
||||
self.setObjectName("lookManager")
|
||||
self.setWindowTitle("Look Manager 1.3.0 - [{}]".format(filename))
|
||||
self.setWindowTitle("Look Manager 1.4.0 - [{}]".format(filename))
|
||||
self.setWindowFlags(QtCore.Qt.Window)
|
||||
self.setParent(parent)
|
||||
|
||||
|
|
@ -240,18 +241,37 @@ class MayaLookAssignerWindow(QtWidgets.QWidget):
|
|||
))
|
||||
nodes = item["nodes"]
|
||||
|
||||
# Assign Vray Proxy look.
|
||||
if cmds.pluginInfo('vrayformaya', query=True, loaded=True):
|
||||
self.echo("Getting vray proxy nodes ...")
|
||||
vray_proxies = set(cmds.ls(type="VRayProxy", long=True))
|
||||
|
||||
if vray_proxies:
|
||||
for vp in vray_proxies:
|
||||
if vp in nodes:
|
||||
vrayproxy_assign_look(vp, subset_name)
|
||||
for vp in vray_proxies:
|
||||
if vp in nodes:
|
||||
vrayproxy_assign_look(vp, subset_name)
|
||||
|
||||
nodes = list(set(item["nodes"]).difference(vray_proxies))
|
||||
nodes = list(set(item["nodes"]).difference(vray_proxies))
|
||||
else:
|
||||
self.echo(
|
||||
"Could not assign to VRayProxy because vrayformaya plugin "
|
||||
"is not loaded."
|
||||
)
|
||||
|
||||
# Assign look
|
||||
# Assign Arnold Standin look.
|
||||
if cmds.pluginInfo("mtoa", query=True, loaded=True):
|
||||
arnold_standins = set(cmds.ls(type="aiStandIn", long=True))
|
||||
for standin in arnold_standins:
|
||||
if standin in nodes:
|
||||
arnold_standin.assign_look(standin, subset_name)
|
||||
else:
|
||||
self.echo(
|
||||
"Could not assign to aiStandIn because mtoa plugin is not "
|
||||
"loaded."
|
||||
)
|
||||
|
||||
nodes = list(set(item["nodes"]).difference(arnold_standins))
|
||||
|
||||
# Assign look
|
||||
if nodes:
|
||||
assign_look_by_version(nodes, version_id=version["_id"])
|
||||
|
||||
|
|
|
|||
247
openpype/hosts/maya/tools/mayalookassigner/arnold_standin.py
Normal file
247
openpype/hosts/maya/tools/mayalookassigner/arnold_standin.py
Normal file
|
|
@ -0,0 +1,247 @@
|
|||
import os
|
||||
import json
|
||||
from collections import defaultdict
|
||||
import logging
|
||||
|
||||
from maya import cmds
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.client import get_last_version_by_subset_name
|
||||
from openpype.hosts.maya import api
|
||||
from . import lib
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
ATTRIBUTE_MAPPING = {
|
||||
"primaryVisibility": "visibility", # Camera
|
||||
"castsShadows": "visibility", # Shadow
|
||||
"receiveShadows": "receive_shadows",
|
||||
"aiSelfShadows": "self_shadows",
|
||||
"aiOpaque": "opaque",
|
||||
"aiMatte": "matte",
|
||||
"aiVisibleInDiffuseTransmission": "visibility",
|
||||
"aiVisibleInSpecularTransmission": "visibility",
|
||||
"aiVisibleInVolume": "visibility",
|
||||
"aiVisibleInDiffuseReflection": "visibility",
|
||||
"aiVisibleInSpecularReflection": "visibility",
|
||||
"aiSubdivUvSmoothing": "subdiv_uv_smoothing",
|
||||
"aiDispHeight": "disp_height",
|
||||
"aiDispPadding": "disp_padding",
|
||||
"aiDispZeroValue": "disp_zero_value",
|
||||
"aiStepSize": "step_size",
|
||||
"aiVolumePadding": "volume_padding",
|
||||
"aiSubdivType": "subdiv_type",
|
||||
"aiSubdivIterations": "subdiv_iterations"
|
||||
}
|
||||
|
||||
|
||||
def calculate_visibility_mask(attributes):
|
||||
# https://arnoldsupport.com/2018/11/21/backdoor-setting-visibility/
|
||||
mapping = {
|
||||
"primaryVisibility": 1, # Camera
|
||||
"castsShadows": 2, # Shadow
|
||||
"aiVisibleInDiffuseTransmission": 4,
|
||||
"aiVisibleInSpecularTransmission": 8,
|
||||
"aiVisibleInVolume": 16,
|
||||
"aiVisibleInDiffuseReflection": 32,
|
||||
"aiVisibleInSpecularReflection": 64
|
||||
}
|
||||
mask = 255
|
||||
for attr, value in mapping.items():
|
||||
if attributes.get(attr, True):
|
||||
continue
|
||||
|
||||
mask -= value
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
def get_nodes_by_id(standin):
|
||||
"""Get node id from aiStandIn via json sidecar.
|
||||
|
||||
Args:
|
||||
standin (string): aiStandIn node.
|
||||
|
||||
Returns:
|
||||
(dict): Dictionary with node full name/path and id.
|
||||
"""
|
||||
path = cmds.getAttr(standin + ".dso")
|
||||
json_path = None
|
||||
for f in os.listdir(os.path.dirname(path)):
|
||||
if f.endswith(".json"):
|
||||
json_path = os.path.join(os.path.dirname(path), f)
|
||||
break
|
||||
|
||||
if not json_path:
|
||||
log.warning("Could not find json file for {}.".format(standin))
|
||||
return {}
|
||||
|
||||
with open(json_path, "r") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def shading_engine_assignments(shading_engine, attribute, nodes, assignments):
|
||||
"""Full assignments with shader or disp_map.
|
||||
|
||||
Args:
|
||||
shading_engine (string): Shading engine for material.
|
||||
attribute (string): "surfaceShader" or "displacementShader"
|
||||
nodes: (list): Nodes paths relative to aiStandIn.
|
||||
assignments (dict): Assignments by nodes.
|
||||
"""
|
||||
shader_inputs = cmds.listConnections(
|
||||
shading_engine + "." + attribute, source=True
|
||||
)
|
||||
if not shader_inputs:
|
||||
log.info(
|
||||
"Shading engine \"{}\" missing input \"{}\"".format(
|
||||
shading_engine, attribute
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
# Strip off component assignments
|
||||
for i, node in enumerate(nodes):
|
||||
if "." in node:
|
||||
log.warning(
|
||||
"Converting face assignment to full object assignment. This "
|
||||
"conversion can be lossy: {}".format(node)
|
||||
)
|
||||
nodes[i] = node.split(".")[0]
|
||||
|
||||
shader_type = "shader" if attribute == "surfaceShader" else "disp_map"
|
||||
assignment = "{}='{}'".format(shader_type, shader_inputs[0])
|
||||
for node in nodes:
|
||||
assignments[node].append(assignment)
|
||||
|
||||
|
||||
def assign_look(standin, subset):
|
||||
log.info("Assigning {} to {}.".format(subset, standin))
|
||||
|
||||
nodes_by_id = get_nodes_by_id(standin)
|
||||
|
||||
# Group by asset id so we run over the look per asset
|
||||
node_ids_by_asset_id = defaultdict(set)
|
||||
for node_id in nodes_by_id:
|
||||
asset_id = node_id.split(":", 1)[0]
|
||||
node_ids_by_asset_id[asset_id].add(node_id)
|
||||
|
||||
project_name = legacy_io.active_project()
|
||||
for asset_id, node_ids in node_ids_by_asset_id.items():
|
||||
|
||||
# Get latest look version
|
||||
version = get_last_version_by_subset_name(
|
||||
project_name,
|
||||
subset_name=subset,
|
||||
asset_id=asset_id,
|
||||
fields=["_id"]
|
||||
)
|
||||
if not version:
|
||||
log.info("Didn't find last version for subset name {}".format(
|
||||
subset
|
||||
))
|
||||
continue
|
||||
|
||||
relationships = lib.get_look_relationships(version["_id"])
|
||||
shader_nodes, container_node = lib.load_look(version["_id"])
|
||||
namespace = shader_nodes[0].split(":")[0]
|
||||
|
||||
# Get only the node ids and paths related to this asset
|
||||
# And get the shader edits the look supplies
|
||||
asset_nodes_by_id = {
|
||||
node_id: nodes_by_id[node_id] for node_id in node_ids
|
||||
}
|
||||
edits = list(
|
||||
api.lib.iter_shader_edits(
|
||||
relationships, shader_nodes, asset_nodes_by_id
|
||||
)
|
||||
)
|
||||
|
||||
# Create assignments
|
||||
node_assignments = {}
|
||||
for edit in edits:
|
||||
for node in edit["nodes"]:
|
||||
if node not in node_assignments:
|
||||
node_assignments[node] = []
|
||||
|
||||
if edit["action"] == "assign":
|
||||
if not cmds.ls(edit["shader"], type="shadingEngine"):
|
||||
log.info("Skipping non-shader: %s" % edit["shader"])
|
||||
continue
|
||||
|
||||
shading_engine_assignments(
|
||||
shading_engine=edit["shader"],
|
||||
attribute="surfaceShader",
|
||||
nodes=edit["nodes"],
|
||||
assignments=node_assignments
|
||||
)
|
||||
shading_engine_assignments(
|
||||
shading_engine=edit["shader"],
|
||||
attribute="displacementShader",
|
||||
nodes=edit["nodes"],
|
||||
assignments=node_assignments
|
||||
)
|
||||
|
||||
if edit["action"] == "setattr":
|
||||
visibility = False
|
||||
for attr, value in edit["attributes"].items():
|
||||
if attr not in ATTRIBUTE_MAPPING:
|
||||
log.warning(
|
||||
"Skipping setting attribute {} on {} because it is"
|
||||
" not recognized.".format(attr, edit["nodes"])
|
||||
)
|
||||
continue
|
||||
|
||||
if isinstance(value, str):
|
||||
value = "'{}'".format(value)
|
||||
|
||||
if ATTRIBUTE_MAPPING[attr] == "visibility":
|
||||
visibility = True
|
||||
continue
|
||||
|
||||
assignment = "{}={}".format(ATTRIBUTE_MAPPING[attr], value)
|
||||
|
||||
for node in edit["nodes"]:
|
||||
node_assignments[node].append(assignment)
|
||||
|
||||
if visibility:
|
||||
mask = calculate_visibility_mask(edit["attributes"])
|
||||
assignment = "visibility={}".format(mask)
|
||||
|
||||
for node in edit["nodes"]:
|
||||
node_assignments[node].append(assignment)
|
||||
|
||||
# Assign shader
|
||||
# Clear all current shader assignments
|
||||
plug = standin + ".operators"
|
||||
num = cmds.getAttr(plug, size=True)
|
||||
for i in reversed(range(num)):
|
||||
cmds.removeMultiInstance("{}[{}]".format(plug, i), b=True)
|
||||
|
||||
# Create new assignment overrides
|
||||
index = 0
|
||||
for node, assignments in node_assignments.items():
|
||||
if not assignments:
|
||||
continue
|
||||
|
||||
with api.lib.maintained_selection():
|
||||
operator = cmds.createNode("aiSetParameter")
|
||||
operator = cmds.rename(operator, namespace + ":" + operator)
|
||||
|
||||
cmds.setAttr(operator + ".selection", node, type="string")
|
||||
for i, assignment in enumerate(assignments):
|
||||
cmds.setAttr(
|
||||
"{}.assignment[{}]".format(operator, i),
|
||||
assignment,
|
||||
type="string"
|
||||
)
|
||||
|
||||
cmds.connectAttr(
|
||||
operator + ".out", "{}[{}]".format(plug, index)
|
||||
)
|
||||
|
||||
index += 1
|
||||
|
||||
cmds.sets(operator, edit=True, addElement=container_node)
|
||||
|
|
@ -13,6 +13,7 @@ from openpype.pipeline import (
|
|||
from openpype.hosts.maya.api import lib
|
||||
|
||||
from .vray_proxies import get_alembic_ids_cache
|
||||
from . import arnold_standin
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -44,33 +45,11 @@ def get_namespace_from_node(node):
|
|||
return parts[0] if len(parts) > 1 else u":"
|
||||
|
||||
|
||||
def list_descendents(nodes):
|
||||
"""Include full descendant hierarchy of given nodes.
|
||||
|
||||
This is a workaround to cmds.listRelatives(allDescendents=True) because
|
||||
this way correctly keeps children instance paths (see Maya documentation)
|
||||
|
||||
This fixes LKD-26: assignments not working as expected on instanced shapes.
|
||||
|
||||
Return:
|
||||
list: List of children descendents of nodes
|
||||
|
||||
"""
|
||||
result = []
|
||||
while True:
|
||||
nodes = cmds.listRelatives(nodes,
|
||||
fullPath=True)
|
||||
if nodes:
|
||||
result.extend(nodes)
|
||||
else:
|
||||
return result
|
||||
|
||||
|
||||
def get_selected_nodes():
|
||||
"""Get information from current selection"""
|
||||
|
||||
selection = cmds.ls(selection=True, long=True)
|
||||
hierarchy = list_descendents(selection)
|
||||
hierarchy = lib.get_all_children(selection)
|
||||
return list(set(selection + hierarchy))
|
||||
|
||||
|
||||
|
|
@ -105,10 +84,12 @@ def create_asset_id_hash(nodes):
|
|||
path = cmds.getAttr("{}.fileName".format(node))
|
||||
ids = get_alembic_ids_cache(path)
|
||||
for k, _ in ids.items():
|
||||
pid = k.split(":")[0]
|
||||
if node not in node_id_hash[pid]:
|
||||
node_id_hash[pid].append(node)
|
||||
|
||||
id = k.split(":")[0]
|
||||
node_id_hash[id].append(node)
|
||||
elif cmds.nodeType(node) == "aiStandIn":
|
||||
for id, _ in arnold_standin.get_nodes_by_id(node).items():
|
||||
id = id.split(":")[0]
|
||||
node_id_hash[id].append(node)
|
||||
else:
|
||||
value = lib.get_id(node)
|
||||
if value is None:
|
||||
|
|
|
|||
87
openpype/hosts/maya/tools/mayalookassigner/lib.py
Normal file
87
openpype/hosts/maya/tools/mayalookassigner/lib.py
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
import json
|
||||
import logging
|
||||
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
get_representation_path,
|
||||
registered_host,
|
||||
discover_loader_plugins,
|
||||
loaders_from_representation,
|
||||
load_container
|
||||
)
|
||||
from openpype.client import get_representation_by_name
|
||||
from openpype.hosts.maya.api import lib
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_look_relationships(version_id):
|
||||
# type: (str) -> dict
|
||||
"""Get relations for the look.
|
||||
|
||||
Args:
|
||||
version_id (str): Parent version Id.
|
||||
|
||||
Returns:
|
||||
dict: Dictionary of relations.
|
||||
"""
|
||||
|
||||
project_name = legacy_io.active_project()
|
||||
json_representation = get_representation_by_name(
|
||||
project_name, representation_name="json", version_id=version_id
|
||||
)
|
||||
|
||||
# Load relationships
|
||||
shader_relation = get_representation_path(json_representation)
|
||||
with open(shader_relation, "r") as f:
|
||||
relationships = json.load(f)
|
||||
|
||||
return relationships
|
||||
|
||||
|
||||
def load_look(version_id):
|
||||
# type: (str) -> list
|
||||
"""Load look from version.
|
||||
|
||||
Get look from version and invoke Loader for it.
|
||||
|
||||
Args:
|
||||
version_id (str): Version ID
|
||||
|
||||
Returns:
|
||||
list of shader nodes.
|
||||
|
||||
"""
|
||||
|
||||
project_name = legacy_io.active_project()
|
||||
# Get representations of shader file and relationships
|
||||
look_representation = get_representation_by_name(
|
||||
project_name, representation_name="ma", version_id=version_id
|
||||
)
|
||||
|
||||
# See if representation is already loaded, if so reuse it.
|
||||
host = registered_host()
|
||||
representation_id = str(look_representation['_id'])
|
||||
for container in host.ls():
|
||||
if (container['loader'] == "LookLoader" and
|
||||
container['representation'] == representation_id):
|
||||
log.info("Reusing loaded look ...")
|
||||
container_node = container['objectName']
|
||||
break
|
||||
else:
|
||||
log.info("Using look for the first time ...")
|
||||
|
||||
# Load file
|
||||
all_loaders = discover_loader_plugins()
|
||||
loaders = loaders_from_representation(all_loaders, representation_id)
|
||||
loader = next(
|
||||
(i for i in loaders if i.__name__ == "LookLoader"), None)
|
||||
if loader is None:
|
||||
raise RuntimeError("Could not find LookLoader, this is a bug")
|
||||
|
||||
# Reference the look file
|
||||
with lib.maintained_selection():
|
||||
container_node = load_container(loader, look_representation)[0]
|
||||
|
||||
return lib.get_container_members(container_node), container_node
|
||||
|
|
@ -3,26 +3,16 @@
|
|||
import os
|
||||
from collections import defaultdict
|
||||
import logging
|
||||
import json
|
||||
|
||||
import six
|
||||
|
||||
import alembic.Abc
|
||||
from maya import cmds
|
||||
|
||||
from openpype.client import (
|
||||
get_representation_by_name,
|
||||
get_last_version_by_subset_name,
|
||||
)
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
load_container,
|
||||
loaders_from_representation,
|
||||
discover_loader_plugins,
|
||||
get_representation_path,
|
||||
registered_host,
|
||||
)
|
||||
from openpype.hosts.maya.api import lib
|
||||
from openpype.client import get_last_version_by_subset_name
|
||||
from openpype.pipeline import legacy_io
|
||||
import openpype.hosts.maya.lib as maya_lib
|
||||
from . import lib
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
|
@ -149,79 +139,6 @@ def assign_vrayproxy_shaders(vrayproxy, assignments):
|
|||
index += 1
|
||||
|
||||
|
||||
def get_look_relationships(version_id):
|
||||
# type: (str) -> dict
|
||||
"""Get relations for the look.
|
||||
|
||||
Args:
|
||||
version_id (str): Parent version Id.
|
||||
|
||||
Returns:
|
||||
dict: Dictionary of relations.
|
||||
"""
|
||||
|
||||
project_name = legacy_io.active_project()
|
||||
json_representation = get_representation_by_name(
|
||||
project_name, representation_name="json", version_id=version_id
|
||||
)
|
||||
|
||||
# Load relationships
|
||||
shader_relation = get_representation_path(json_representation)
|
||||
with open(shader_relation, "r") as f:
|
||||
relationships = json.load(f)
|
||||
|
||||
return relationships
|
||||
|
||||
|
||||
def load_look(version_id):
|
||||
# type: (str) -> list
|
||||
"""Load look from version.
|
||||
|
||||
Get look from version and invoke Loader for it.
|
||||
|
||||
Args:
|
||||
version_id (str): Version ID
|
||||
|
||||
Returns:
|
||||
list of shader nodes.
|
||||
|
||||
"""
|
||||
|
||||
project_name = legacy_io.active_project()
|
||||
# Get representations of shader file and relationships
|
||||
look_representation = get_representation_by_name(
|
||||
project_name, representation_name="ma", version_id=version_id
|
||||
)
|
||||
|
||||
# See if representation is already loaded, if so reuse it.
|
||||
host = registered_host()
|
||||
representation_id = str(look_representation['_id'])
|
||||
for container in host.ls():
|
||||
if (container['loader'] == "LookLoader" and
|
||||
container['representation'] == representation_id):
|
||||
log.info("Reusing loaded look ...")
|
||||
container_node = container['objectName']
|
||||
break
|
||||
else:
|
||||
log.info("Using look for the first time ...")
|
||||
|
||||
# Load file
|
||||
all_loaders = discover_loader_plugins()
|
||||
loaders = loaders_from_representation(all_loaders, representation_id)
|
||||
loader = next(
|
||||
(i for i in loaders if i.__name__ == "LookLoader"), None)
|
||||
if loader is None:
|
||||
raise RuntimeError("Could not find LookLoader, this is a bug")
|
||||
|
||||
# Reference the look file
|
||||
with lib.maintained_selection():
|
||||
container_node = load_container(loader, look_representation)
|
||||
|
||||
# Get container members
|
||||
shader_nodes = lib.get_container_members(container_node)
|
||||
return shader_nodes
|
||||
|
||||
|
||||
def vrayproxy_assign_look(vrayproxy, subset="lookDefault"):
|
||||
# type: (str, str) -> None
|
||||
"""Assign look to vray proxy.
|
||||
|
|
@ -263,8 +180,8 @@ def vrayproxy_assign_look(vrayproxy, subset="lookDefault"):
|
|||
))
|
||||
continue
|
||||
|
||||
relationships = get_look_relationships(version["_id"])
|
||||
shadernodes = load_look(version["_id"])
|
||||
relationships = lib.get_look_relationships(version["_id"])
|
||||
shadernodes, _ = lib.load_look(version["_id"])
|
||||
|
||||
# Get only the node ids and paths related to this asset
|
||||
# And get the shader edits the look supplies
|
||||
|
|
@ -272,8 +189,10 @@ def vrayproxy_assign_look(vrayproxy, subset="lookDefault"):
|
|||
node_id: nodes_by_id[node_id] for node_id in node_ids
|
||||
}
|
||||
edits = list(
|
||||
lib.iter_shader_edits(
|
||||
relationships, shadernodes, asset_nodes_by_id))
|
||||
maya_lib.iter_shader_edits(
|
||||
relationships, shadernodes, asset_nodes_by_id
|
||||
)
|
||||
)
|
||||
|
||||
# Create assignments
|
||||
assignments = {}
|
||||
|
|
|
|||
|
|
@ -170,11 +170,13 @@ def clean_envs_for_openpype_process(env=None):
|
|||
"""
|
||||
if env is None:
|
||||
env = os.environ
|
||||
return {
|
||||
key: value
|
||||
for key, value in env.items()
|
||||
if key not in ("PYTHONPATH",)
|
||||
}
|
||||
|
||||
# Exclude some environment variables from a copy of the environment
|
||||
env = env.copy()
|
||||
for key in ["PYTHONPATH", "PYTHONHOME"]:
|
||||
env.pop(key, None)
|
||||
|
||||
return env
|
||||
|
||||
|
||||
def run_openpype_process(*args, **kwargs):
|
||||
|
|
|
|||
|
|
@ -329,6 +329,7 @@ def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne:
|
|||
"code": project_code,
|
||||
"fps": float(project["fps"]),
|
||||
"zou_id": project["id"],
|
||||
"active": project['project_status_name'] != "Closed",
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -379,7 +380,7 @@ def sync_all_projects(
|
|||
# Iterate projects
|
||||
dbcon = AvalonMongoDB()
|
||||
dbcon.install()
|
||||
all_projects = gazu.project.all_open_projects()
|
||||
all_projects = gazu.project.all_projects()
|
||||
for project in all_projects:
|
||||
if ignore_projects and project["name"] in ignore_projects:
|
||||
continue
|
||||
|
|
@ -404,7 +405,21 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict):
|
|||
if not project:
|
||||
project = gazu.project.get_project_by_name(project["name"])
|
||||
|
||||
log.info("Synchronizing {}...".format(project["name"]))
|
||||
# Get all statuses for projects from Kitsu
|
||||
all_status = gazu.project.all_project_status()
|
||||
for status in all_status:
|
||||
if project['project_status_id'] == status['id']:
|
||||
project['project_status_name'] = status['name']
|
||||
break
|
||||
|
||||
# Do not sync closed kitsu project that is not found in openpype
|
||||
if (
|
||||
project['project_status_name'] == "Closed"
|
||||
and not get_project(project['name'])
|
||||
):
|
||||
return
|
||||
|
||||
log.info(f"Synchronizing {project['name']}...")
|
||||
|
||||
# Get all assets from zou
|
||||
all_assets = gazu.asset.all_assets_for_project(project)
|
||||
|
|
@ -429,6 +444,9 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict):
|
|||
log.info("Project created: {}".format(project_name))
|
||||
bulk_writes.append(write_project_to_op(project, dbcon))
|
||||
|
||||
if project['project_status_name'] == "Closed":
|
||||
return
|
||||
|
||||
# Try to find project document
|
||||
if not project_dict:
|
||||
project_dict = get_project(project_name)
|
||||
|
|
|
|||
|
|
@ -218,8 +218,7 @@ def get_data_subprocess(config_path, data_type):
|
|||
log.info("Executing: {}".format(" ".join(args)))
|
||||
|
||||
process_kwargs = {
|
||||
"logger": log,
|
||||
"env": {}
|
||||
"logger": log
|
||||
}
|
||||
|
||||
run_openpype_process(*args, **process_kwargs)
|
||||
|
|
|
|||
|
|
@ -42,16 +42,17 @@ class CollectCustomStagingDir(pyblish.api.InstancePlugin):
|
|||
subset_name = instance.data["subset"]
|
||||
host_name = instance.context.data["hostName"]
|
||||
project_name = instance.context.data["projectName"]
|
||||
|
||||
project_settings = instance.context.data["project_settings"]
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
anatomy_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
task = anatomy_data.get("task", {})
|
||||
task = instance.data["anatomyData"].get("task", {})
|
||||
|
||||
transient_tml, is_persistent = get_custom_staging_dir_info(
|
||||
project_name, host_name, family, task.get("name"),
|
||||
task.get("type"), subset_name, anatomy=anatomy, log=self.log)
|
||||
result_str = "Not adding"
|
||||
task.get("type"), subset_name, project_settings=project_settings,
|
||||
anatomy=anatomy, log=self.log)
|
||||
|
||||
if transient_tml:
|
||||
anatomy_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
anatomy_data["root"] = anatomy.roots
|
||||
scene_name = instance.context.data.get("currentFile")
|
||||
if scene_name:
|
||||
|
|
@ -61,6 +62,8 @@ class CollectCustomStagingDir(pyblish.api.InstancePlugin):
|
|||
|
||||
instance.data["stagingDir_persistent"] = is_persistent
|
||||
result_str = "Adding '{}' as".format(transient_dir)
|
||||
else:
|
||||
result_str = "Not adding"
|
||||
|
||||
self.log.info("{} custom staging dir for instance with '{}'".format(
|
||||
result_str, family
|
||||
|
|
|
|||
|
|
@ -336,8 +336,7 @@ class ExtractBurnin(publish.Extractor):
|
|||
|
||||
# Run burnin script
|
||||
process_kwargs = {
|
||||
"logger": self.log,
|
||||
"env": {}
|
||||
"logger": self.log
|
||||
}
|
||||
|
||||
run_openpype_process(*args, **process_kwargs)
|
||||
|
|
|
|||
|
|
@ -417,7 +417,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
self.log.debug("{}".format(op_session.to_data()))
|
||||
op_session.commit()
|
||||
|
||||
# Backwards compatibility
|
||||
# Backwards compatibility used in hero integration.
|
||||
# todo: can we avoid the need to store this?
|
||||
instance.data["published_representations"] = {
|
||||
p["representation"]["_id"]: p for p in prepared_representations
|
||||
|
|
|
|||
|
|
@ -869,7 +869,6 @@
|
|||
"dynamics": false,
|
||||
"fluids": false,
|
||||
"follicles": false,
|
||||
"gpuCacheDisplayFilter": false,
|
||||
"greasePencils": false,
|
||||
"grid": false,
|
||||
"hairSystems": true,
|
||||
|
|
@ -896,7 +895,10 @@
|
|||
"polymeshes": true,
|
||||
"strokes": false,
|
||||
"subdivSurfaces": false,
|
||||
"textures": false
|
||||
"textures": false,
|
||||
"pluginObjects": {
|
||||
"gpuCacheDisplayFilter": false
|
||||
}
|
||||
},
|
||||
"Camera Options": {
|
||||
"displayGateMask": false,
|
||||
|
|
@ -930,6 +932,21 @@
|
|||
},
|
||||
"ExtractLook": {
|
||||
"maketx_arguments": []
|
||||
},
|
||||
"ExtractGPUCache": {
|
||||
"enabled": false,
|
||||
"families": [
|
||||
"model",
|
||||
"animation",
|
||||
"pointcache"
|
||||
],
|
||||
"step": 1.0,
|
||||
"stepSave": 1,
|
||||
"optimize": true,
|
||||
"optimizationThreshold": 40000,
|
||||
"optimizeAnimationsForMotionBlur": true,
|
||||
"writeMaterials": true,
|
||||
"useBaseTessellation": true
|
||||
}
|
||||
},
|
||||
"load": {
|
||||
|
|
|
|||
|
|
@ -426,11 +426,6 @@
|
|||
"key": "follicles",
|
||||
"label": "Follicles"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "gpuCacheDisplayFilter",
|
||||
"label": "GPU Cache"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "greasePencils",
|
||||
|
|
@ -565,6 +560,12 @@
|
|||
"type": "boolean",
|
||||
"key": "textures",
|
||||
"label": "Texture Placements"
|
||||
},
|
||||
{
|
||||
"type": "dict-modifiable",
|
||||
"key": "pluginObjects",
|
||||
"label": "Plugin Objects",
|
||||
"object_type": "boolean"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
|
|||
|
|
@ -1025,6 +1025,65 @@
|
|||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "ExtractGPUCache",
|
||||
"label": "Extract GPU Cache",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"key": "families",
|
||||
"label": "Families",
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
},
|
||||
{
|
||||
"key": "step",
|
||||
"label": "Step",
|
||||
"type": "number",
|
||||
"decimal": 4,
|
||||
"minimum": 1
|
||||
},
|
||||
{
|
||||
"key": "stepSave",
|
||||
"label": "Step Save",
|
||||
"type": "number",
|
||||
"minimum": 1
|
||||
},
|
||||
{
|
||||
"key": "optimize",
|
||||
"label": "Optimize Hierarchy",
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"key": "optimizationThreshold",
|
||||
"label": "Optimization Threshold",
|
||||
"type": "number",
|
||||
"minimum": 1
|
||||
},
|
||||
{
|
||||
"key": "optimizeAnimationsForMotionBlur",
|
||||
"label": "Optimize Animations For Motion Blur",
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"key": "writeMaterials",
|
||||
"label": "Write Materials",
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"key": "useBaseTessellation",
|
||||
"label": "User Base Tesselation",
|
||||
"type": "boolean"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -379,7 +379,7 @@ class FilesWidget(QtWidgets.QWidget):
|
|||
|
||||
# Disable/Enable buttons based on available files in model
|
||||
has_valid_items = self._workarea_files_model.has_valid_items()
|
||||
self._btn_browse.setEnabled(has_valid_items)
|
||||
self._btn_browse.setEnabled(True)
|
||||
self._btn_open.setEnabled(has_valid_items)
|
||||
|
||||
if self._publish_context_select_mode:
|
||||
|
|
@ -617,14 +617,24 @@ class FilesWidget(QtWidgets.QWidget):
|
|||
ext_filter = "Work File (*{0})".format(
|
||||
" *".join(self._get_host_extensions())
|
||||
)
|
||||
dir_key = "directory"
|
||||
if qtpy.API in ("pyside", "pyside2", "pyside6"):
|
||||
dir_key = "dir"
|
||||
|
||||
workfile_root = self._workfiles_root
|
||||
# Find existing directory of workfile root
|
||||
# - Qt will use 'cwd' instead, if path does not exist, which may lead
|
||||
# to igniter directory
|
||||
while workfile_root:
|
||||
if os.path.exists(workfile_root):
|
||||
break
|
||||
workfile_root = os.path.dirname(workfile_root)
|
||||
|
||||
kwargs = {
|
||||
"caption": "Work Files",
|
||||
"filter": ext_filter
|
||||
"filter": ext_filter,
|
||||
dir_key: workfile_root
|
||||
}
|
||||
if qtpy.API in ("pyside", "pyside2", "pyside6"):
|
||||
kwargs["dir"] = self._workfiles_root
|
||||
else:
|
||||
kwargs["directory"] = self._workfiles_root
|
||||
|
||||
work_file = QtWidgets.QFileDialog.getOpenFileName(**kwargs)[0]
|
||||
if work_file:
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ class CommentMatcher(object):
|
|||
# Create a regex group for extensions
|
||||
extensions = registered_host().file_extensions()
|
||||
any_extension = "(?:{})".format(
|
||||
"|".join(re.escape(ext[1:]) for ext in extensions)
|
||||
"|".join(re.escape(ext.lstrip(".")) for ext in extensions)
|
||||
)
|
||||
|
||||
# Use placeholders that will never be in the filename
|
||||
|
|
@ -373,7 +373,7 @@ class SaveAsDialog(QtWidgets.QDialog):
|
|||
if not data["comment"]:
|
||||
data.pop("comment", None)
|
||||
|
||||
data["ext"] = data["ext"][1:]
|
||||
data["ext"] = data["ext"].lstrip(".")
|
||||
|
||||
anatomy_filled = self.anatomy.format(data)
|
||||
return anatomy_filled[self.template_key]["file"]
|
||||
|
|
@ -413,7 +413,7 @@ class SaveAsDialog(QtWidgets.QDialog):
|
|||
if not data["comment"]:
|
||||
data.pop("comment", None)
|
||||
|
||||
data["ext"] = data["ext"][1:]
|
||||
data["ext"] = data["ext"].lstrip(".")
|
||||
|
||||
version = get_last_workfile_with_version(
|
||||
self.root, template, data, extensions
|
||||
|
|
|
|||
25
openpype/vendor/python/common/capture.py
vendored
25
openpype/vendor/python/common/capture.py
vendored
|
|
@ -732,11 +732,23 @@ def _applied_viewport_options(options, panel):
|
|||
"""Context manager for applying `options` to `panel`"""
|
||||
|
||||
options = dict(ViewportOptions, **(options or {}))
|
||||
plugin_options = options.pop("pluginObjects", {})
|
||||
|
||||
# BUGFIX Maya 2020 some keys in viewport options dict may not be unicode
|
||||
# This is a local OpenPype edit to capture.py for issue #4730
|
||||
# TODO: Remove when dropping Maya 2020 compatibility
|
||||
if int(cmds.about(version=True)) <= 2020:
|
||||
options = {
|
||||
str(key): value for key, value in options.items()
|
||||
}
|
||||
plugin_options = {
|
||||
str(key): value for key, value in plugin_options.items()
|
||||
}
|
||||
|
||||
# Backwards compatibility for `pluginObjects` flattened into `options`
|
||||
# separate the plugin display filter options since they need to
|
||||
# be set differently (see #55)
|
||||
plugins = cmds.pluginDisplayFilter(query=True, listFilters=True)
|
||||
plugin_options = dict()
|
||||
plugins = set(cmds.pluginDisplayFilter(query=True, listFilters=True))
|
||||
for plugin in plugins:
|
||||
if plugin in options:
|
||||
plugin_options[plugin] = options.pop(plugin)
|
||||
|
|
@ -745,7 +757,14 @@ def _applied_viewport_options(options, panel):
|
|||
try:
|
||||
cmds.modelEditor(panel, edit=True, **options)
|
||||
except TypeError as e:
|
||||
logger.error("Cannot apply options {}".format(e))
|
||||
# Try to set as much as possible of the state by setting them one by
|
||||
# one. This way we can also report the failing key values explicitly.
|
||||
for key, value in options.items():
|
||||
try:
|
||||
cmds.modelEditor(panel, edit=True, **{key: value})
|
||||
except TypeError:
|
||||
logger.error("Failing to apply option '{}': {}".format(key,
|
||||
value))
|
||||
|
||||
# plugin display filter options
|
||||
for plugin, state in plugin_options.items():
|
||||
|
|
|
|||
|
|
@ -50,10 +50,6 @@ just one instance of this node type but if that is not so, validator will go thr
|
|||
instances and check the value there. Node type for **VRay** settings is `VRaySettingsNode`, for **Renderman**
|
||||
it is `rmanGlobals`, for **Redshift** it is `RedshiftOptions`.
|
||||
|
||||
:::info getting attribute values
|
||||
If you do not know what an attributes value is supposed to be, for example for dropdown menu (enum), try changing the attribute and look in the script editor where it should log what the attribute was set to.
|
||||
:::
|
||||
|
||||
### Model Name Validator
|
||||
|
||||
`ValidateRenderSettings`
|
||||
|
|
@ -110,6 +106,35 @@ or Deadlines **Draft Tile Assembler**.
|
|||
This is useful to fix some specific renderer glitches and advanced hacking of Maya Scene files. `Patch name` is label for patch for easier orientation.
|
||||
`Patch regex` is regex used to find line in file, after `Patch line` string is inserted. Note that you need to add line ending.
|
||||
|
||||
### Extract GPU Cache
|
||||
|
||||

|
||||
|
||||
- **Step** Specifies how often samples are taken during file creation. By default, one sample of your object's transformations is taken every frame and saved to the Alembic file.
|
||||
|
||||
For example, a value of 2 caches the transformations of the current object at every other frame of the Cache Time Range.
|
||||
|
||||
- **Step Save** Specifies which samples are saved during cache creation. For example, a value of 2 specifies that only every other sample specified by the Step # frame(s) option is saved to your Alembic file.
|
||||
|
||||
- **Optimize Hierarchy** When on, nodes and objects in a selected hierarchy are consolidated to maximize the performance of the cache file during playback.
|
||||
- **Optimization Threshold** (Available only when Optimize Hierarchy is on.) Specifies the maximum number of vertices contained in a single draw primitive. The default value of 40000 may be ideal for most Maya supported graphics cards. When set to the default value, after optimization, each object in the GPU cache file(s) will have no more than 40000 vertices. This value can be set higher depending on the memory available on your system graphics card.
|
||||
|
||||
- **Optimize Animations for Motion Blur** When on, objects with animated transform nodes display with motion blur when the cache is played back in Viewport 2.0 render mode. See Viewport 2.0 options.
|
||||
|
||||
Maya first determines if the GPU cache includes animation data. If the GPU cache is static and does not contain animation data, Maya does not optimize the GPU cache for motion blur.
|
||||
|
||||
:::note Motion Blur does not support Cached Playback.
|
||||
:::
|
||||
|
||||
- **Write Materials** When on, Maya exports the Lambert and Phong materials from source geometry to the GPU Cache file. These materials display when the GPU-cached file is played back in Viewport 2.0.
|
||||
|
||||
GPU-cached objects support all the high-quality lighting and shading effects provide by the Viewport 2.0 rendering mode. See Viewport 2.0 options.
|
||||
|
||||
:::note Lambert and Phong materials do not display on GPU-cached files when they are played back in scene view's High Quality Rendering or Default Quality Rendering modes.
|
||||
:::
|
||||
|
||||
- **Use Base Tessellation** Exports geometry with base tessellation and no smoothing applied. If this setting is turned off, the extractor will export geometry with the current Smooth Mesh Preview setting applied.
|
||||
|
||||
### Extract Playblast Settings (review)
|
||||
These settings provide granular control over how the playblasts or reviews are produced in Maya.
|
||||
|
||||
|
|
|
|||
BIN
website/docs/assets/maya-admin_gpu_cache.png
Normal file
BIN
website/docs/assets/maya-admin_gpu_cache.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 20 KiB |
|
|
@ -194,6 +194,74 @@ A profile may generate multiple outputs from a single input. Each output must de
|
|||
- Nuke extractor settings path: `project_settings/nuke/publish/ExtractReviewDataMov/outputs/baking/add_custom_tags`
|
||||
- Filtering by input length. Input may be video, sequence or single image. It is possible that `.mp4` should be created only when input is video or sequence and to create review `.png` when input is single frame. In some cases the output should be created even if it's single frame or multi frame input.
|
||||
|
||||
|
||||
### Extract Burnin
|
||||
|
||||
Plugin is responsible for adding burnins into review representations.
|
||||
|
||||
Burnins are text values painted on top of input and may be surrounded with box in 6 available positions `Top Left`, `Top Center`, `Top Right`, `Bottom Left`, `Bottom Center`, `Bottom Right`.
|
||||
|
||||

|
||||
|
||||
The Extract Burnin plugin creates new representations based on plugin presets, representations in instance and whether the reviewable matches the profile filter.
|
||||
A burnin can also be directly linked by name in the output definitions of the [Extract Review plug-in settings](#extract-review) so _can_ be triggered without a matching profile.
|
||||
|
||||
#### Burnin formatting options (`options`)
|
||||
|
||||
The formatting options define the font style for the burnin texts.
|
||||
The X and Y offset define the margin around texts and (background) boxes.
|
||||
|
||||
#### Burnin profiles (`profiles`)
|
||||
|
||||
Plugin process is skipped if `profiles` are not set at all. Profiles contain list of profile items. Each burnin profile may specify filters for **hosts**, **tasks** and **families**. Filters work the same way as described in [Profile Filters](#profile-filters).
|
||||
|
||||
#### Profile burnins
|
||||
|
||||
A burnin profile may set multiple burnin outputs from one input. The burnin's name represents the unique **filename suffix** to avoid overriding files with same name.
|
||||
|
||||
| Key | Description | Type | Example |
|
||||
| --- | --- | --- | --- |
|
||||
| **Top Left** | Top left corner content. | str | "{dd}.{mm}.{yyyy}" |
|
||||
| **Top Centered** | Top center content. | str | "v{version:0>3}" |
|
||||
| **Top Right** | Top right corner content. | str | "Static text" |
|
||||
| **Bottom Left** | Bottom left corner content. | str | "{asset}" |
|
||||
| **Bottom Centered** | Bottom center content. | str | "{username}" |
|
||||
| **Bottom Right** | Bottom right corner content. | str | "{frame_start}-{current_frame}-{frame_end}" |
|
||||
|
||||
Each burnin profile can be configured with additional family filtering and can
|
||||
add additional tags to the burnin representation, these can be configured under
|
||||
the profile's **Additional filtering** section.
|
||||
|
||||
:::note Filename suffix
|
||||
The filename suffix is appended to filename of the source representation. For
|
||||
example, if the source representation has suffix **"h264"** and the burnin
|
||||
suffix is **"client"** then the final suffix is **"h264_client"**.
|
||||
:::
|
||||
|
||||
**Available keys in burnin content**
|
||||
|
||||
- It is possible to use same keys as in [Anatomy](admin_settings_project_anatomy.md#available-template-keys).
|
||||
- It is allowed to use Anatomy templates themselves in burnins if they can be filled with available data.
|
||||
|
||||
- Additional keys in burnins:
|
||||
|
||||
| Burnin key | Description |
|
||||
| --- | --- |
|
||||
| frame_start | First frame number. |
|
||||
| frame_end | Last frame number. |
|
||||
| current_frame | Frame number for each frame. |
|
||||
| duration | Count number of frames. |
|
||||
| resolution_width | Resolution width. |
|
||||
| resolution_height | Resolution height. |
|
||||
| fps | Fps of an output. |
|
||||
| timecode | Timecode by frame start and fps. |
|
||||
| focalLength | **Only available in Maya**<br /><br />Camera focal length per frame. Use syntax `{focalLength:.2f}` for decimal truncating. Eg. `35.234985` with `{focalLength:.2f}` would produce `35.23`, whereas `{focalLength:.0f}` would produce `35`. |
|
||||
|
||||
:::warning
|
||||
`timecode` is a specific key that can be **only at the end of content**. (`"BOTTOM_RIGHT": "TC: {timecode}"`)
|
||||
:::
|
||||
|
||||
|
||||
### IntegrateAssetNew
|
||||
|
||||
Saves information for all published subsets into DB, published assets are available for other hosts, tools and tasks after.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue