diff --git a/.github/pr-branch-labeler.yml b/.github/pr-branch-labeler.yml
index ca82051006..b434326236 100644
--- a/.github/pr-branch-labeler.yml
+++ b/.github/pr-branch-labeler.yml
@@ -12,4 +12,4 @@
# Apply label "release" if base matches "release/*"
'Bump Minor':
- base: "release/next-minor"
\ No newline at end of file
+ base: "release/next-minor"
diff --git a/.github/workflows/project_actions.yml b/.github/workflows/project_actions.yml
index f425b233d4..b21946f0ee 100644
--- a/.github/workflows/project_actions.yml
+++ b/.github/workflows/project_actions.yml
@@ -5,28 +5,74 @@ on:
types: [opened, assigned]
pull_request_review:
types: [submitted]
+ issue_comment:
+ types: [created]
+ pull_request_review_comment:
+ types: [created]
jobs:
- pr_review_requested:
- name: pr_review_requested
+
+ pr_review_started:
+ name: pr_review_started
runs-on: ubuntu-latest
- if: github.event_name == 'pull_request_review' && github.event.review.state == 'changes_requested'
+ # -----------------------------
+ # conditions are:
+ # - PR issue comment which is not form Ynbot
+ # - PR review comment which is not Hound (or any other bot)
+ # - PR review submitted which is not from Hound (or any other bot) and is not 'Changes requested'
+ # - make sure it only runs if not forked repo
+ # -----------------------------
+ if: |
+ (github.event_name == 'issue_comment' && github.event.pull_request.head.repo.owner.login == 'ynput' && github.event.comment.user.id != 82967070) ||
+ (github.event_name == 'pull_request_review_comment' && github.event.pull_request.head.repo.owner.login == 'ynput' && github.event.comment.user.type != 'Bot') ||
+ (github.event_name == 'pull_request_review' && github.event.pull_request.head.repo.owner.login == 'ynput' && github.event.review.state != 'changes_requested' && github.event.review.user.type != 'Bot')
steps:
- - name: Move PR to 'Change Requested'
+ - name: Move PR to 'Review In Progress'
uses: leonsteinhaeuser/project-beta-automations@v2.1.0
with:
gh_token: ${{ secrets.YNPUT_BOT_TOKEN }}
organization: ynput
project_id: 11
- resource_node_id: ${{ github.event.pull_request.node_id }}
- status_value: Change Requested
+ resource_node_id: ${{ github.event.pull_request.node_id || github.event.issue.node_id }}
+ status_value: Review In Progress
+
+ pr_review_requested:
+ # -----------------------------
+ # Resets Clickup Task status to 'In Progress' after 'Changes Requested' were submitted to PR
+ # It only runs if custom clickup task id was found in ref branch of PR
+ # -----------------------------
+ name: pr_review_requested
+ runs-on: ubuntu-latest
+ if: github.event_name == 'pull_request_review' && github.event.pull_request.head.repo.owner.login == 'ynput' && github.event.review.state == 'changes_requested'
+ steps:
+ - name: Set branch env
+ run: echo "BRANCH_NAME=${{ github.event.pull_request.head.ref}}" >> $GITHUB_ENV
+ - name: Get ClickUp ID from ref head name
+ id: get_cuID
+ run: |
+ echo ${{ env.BRANCH_NAME }}
+ echo "cuID=$(echo $BRANCH_NAME | sed 's/.*\/\(OP\-[0-9]\{4\}\).*/\1/')" >> $GITHUB_OUTPUT
+
+ - name: Print ClickUp ID
+ run: echo ${{ steps.get_cuID.outputs.cuID }}
+
+ - name: Move found Clickup task to 'Review in Progress'
+ if: steps.get_cuID.outputs.cuID
+ run: |
+ curl -i -X PUT \
+ 'https://api.clickup.com/api/v2/task/${{ steps.get_cuID.outputs.cuID }}?custom_task_ids=true&team_id=${{secrets.CLICKUP_TEAM_ID}}' \
+ -H 'Authorization: ${{secrets.CLICKUP_API_KEY}}' \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "status": "in progress"
+ }'
size-label:
name: pr_size_label
runs-on: ubuntu-latest
if: |
- ${{(github.event_name == 'pull_request' && github.event.action == 'assigned')
- || (github.event_name == 'pull_request' && github.event.action == 'opened')}}
+ (github.event_name == 'pull_request' && github.event.action == 'assigned') ||
+ (github.event_name == 'pull_request' && github.event.action == 'opened')
steps:
- name: Add size label
@@ -49,8 +95,8 @@ jobs:
name: pr_branch_label
runs-on: ubuntu-latest
if: |
- ${{(github.event_name == 'pull_request' && github.event.action == 'assigned')
- || (github.event_name == 'pull_request' && github.event.action == 'opened')}}
+ (github.event_name == 'pull_request' && github.event.action == 'assigned') ||
+ (github.event_name == 'pull_request' && github.event.action == 'opened')
steps:
- name: Label PRs - Branch name detection
uses: ffittschen/pr-branch-labeler@v1
@@ -61,8 +107,8 @@ jobs:
name: pr_globe_label
runs-on: ubuntu-latest
if: |
- ${{(github.event_name == 'pull_request' && github.event.action == 'assigned')
- || (github.event_name == 'pull_request' && github.event.action == 'opened')}}
+ (github.event_name == 'pull_request' && github.event.action == 'assigned') ||
+ (github.event_name == 'pull_request' && github.event.action == 'opened')
steps:
- name: Label PRs - Globe detection
uses: actions/labeler@v4.0.3
diff --git a/openpype/hosts/fusion/plugins/load/actions.py b/openpype/hosts/fusion/plugins/load/actions.py
index 3b14f022e5..f83ab433ee 100644
--- a/openpype/hosts/fusion/plugins/load/actions.py
+++ b/openpype/hosts/fusion/plugins/load/actions.py
@@ -72,8 +72,7 @@ class FusionSetFrameRangeWithHandlesLoader(load.LoaderPlugin):
return
# Include handles
- handles = version_data.get("handles", 0)
- start -= handles
- end += handles
+ start -= version_data.get("handleStart", 0)
+ end += version_data.get("handleEnd", 0)
lib.update_frame_range(start, end)
diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py
index 13f5a62ec3..f19dc64992 100644
--- a/openpype/hosts/houdini/api/lib.py
+++ b/openpype/hosts/houdini/api/lib.py
@@ -479,23 +479,13 @@ def reset_framerange():
frame_start = asset_data.get("frameStart")
frame_end = asset_data.get("frameEnd")
- # Backwards compatibility
- if frame_start is None or frame_end is None:
- frame_start = asset_data.get("edit_in")
- frame_end = asset_data.get("edit_out")
if frame_start is None or frame_end is None:
log.warning("No edit information found for %s" % asset_name)
return
- handles = asset_data.get("handles") or 0
- handle_start = asset_data.get("handleStart")
- if handle_start is None:
- handle_start = handles
-
- handle_end = asset_data.get("handleEnd")
- if handle_end is None:
- handle_end = handles
+ handle_start = asset_data.get("handleStart", 0)
+ handle_end = asset_data.get("handleEnd", 0)
frame_start -= int(handle_start)
frame_end += int(handle_end)
diff --git a/openpype/hosts/houdini/api/pipeline.py b/openpype/hosts/houdini/api/pipeline.py
index 9793679b45..45e2f8f87f 100644
--- a/openpype/hosts/houdini/api/pipeline.py
+++ b/openpype/hosts/houdini/api/pipeline.py
@@ -144,13 +144,10 @@ class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
"""
obj_network = hou.node("/obj")
- op_ctx = obj_network.createNode("null", node_name="OpenPypeContext")
-
- # A null in houdini by default comes with content inside to visualize
- # the null. However since we explicitly want to hide the node lets
- # remove the content and disable the display flag of the node
- for node in op_ctx.children():
- node.destroy()
+ op_ctx = obj_network.createNode("subnet",
+ node_name="OpenPypeContext",
+ run_init_scripts=False,
+ load_contents=False)
op_ctx.moveToGoodPosition()
op_ctx.setBuiltExplicitly(False)
diff --git a/openpype/hosts/max/api/lib.py b/openpype/hosts/max/api/lib.py
index ac7d75db08..ad9a450cad 100644
--- a/openpype/hosts/max/api/lib.py
+++ b/openpype/hosts/max/api/lib.py
@@ -209,19 +209,12 @@ def get_frame_range() -> dict:
asset = get_current_project_asset()
frame_start = asset["data"].get("frameStart")
frame_end = asset["data"].get("frameEnd")
- # Backwards compatibility
- if frame_start is None or frame_end is None:
- frame_start = asset["data"].get("edit_in")
- frame_end = asset["data"].get("edit_out")
+
if frame_start is None or frame_end is None:
return
- handles = asset["data"].get("handles") or 0
- handle_start = asset["data"].get("handleStart")
- if handle_start is None:
- handle_start = handles
- handle_end = asset["data"].get("handleEnd")
- if handle_end is None:
- handle_end = handles
+
+ handle_start = asset["data"].get("handleStart", 0)
+ handle_end = asset["data"].get("handleEnd", 0)
return {
"frameStart": frame_start,
"frameEnd": frame_end,
diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py
index 1a62e7dbc3..22803a2e3a 100644
--- a/openpype/hosts/maya/api/lib.py
+++ b/openpype/hosts/maya/api/lib.py
@@ -1367,6 +1367,71 @@ def set_id(node, unique_id, overwrite=False):
cmds.setAttr(attr, unique_id, type="string")
+def get_attribute(plug,
+ asString=False,
+ expandEnvironmentVariables=False,
+ **kwargs):
+ """Maya getAttr with some fixes based on `pymel.core.general.getAttr()`.
+
+ Like Pymel getAttr this applies some changes to `maya.cmds.getAttr`
+ - maya pointlessly returned vector results as a tuple wrapped in a list
+ (ex. '[(1,2,3)]'). This command unpacks the vector for you.
+ - when getting a multi-attr, maya would raise an error, but this will
+ return a list of values for the multi-attr
+ - added support for getting message attributes by returning the
+ connections instead
+
+ Note that the asString + expandEnvironmentVariables argument naming
+ convention matches the `maya.cmds.getAttr` arguments so that it can
+ act as a direct replacement for it.
+
+ Args:
+ plug (str): Node's attribute plug as `node.attribute`
+ asString (bool): Return string value for enum attributes instead
+ of the index. Note that the return value can be dependent on the
+ UI language Maya is running in.
+ expandEnvironmentVariables (bool): Expand any environment variable and
+ (tilde characters on UNIX) found in string attributes which are
+ returned.
+
+ Kwargs:
+ Supports the keyword arguments of `maya.cmds.getAttr`
+
+ Returns:
+ object: The value of the maya attribute.
+
+ """
+ attr_type = cmds.getAttr(plug, type=True)
+ if asString:
+ kwargs["asString"] = True
+ if expandEnvironmentVariables:
+ kwargs["expandEnvironmentVariables"] = True
+ try:
+ res = cmds.getAttr(plug, **kwargs)
+ except RuntimeError:
+ if attr_type == "message":
+ return cmds.listConnections(plug)
+
+ node, attr = plug.split(".", 1)
+ children = cmds.attributeQuery(attr, node=node, listChildren=True)
+ if children:
+ return [
+ get_attribute("{}.{}".format(node, child))
+ for child in children
+ ]
+
+ raise
+
+ # Convert vector result wrapped in tuple
+ if isinstance(res, list) and len(res):
+ if isinstance(res[0], tuple) and len(res):
+ if attr_type in {'pointArray', 'vectorArray'}:
+ return res
+ return res[0]
+
+ return res
+
+
def set_attribute(attribute, value, node):
"""Adjust attributes based on the value from the attribute data
@@ -1881,6 +1946,12 @@ def remove_other_uv_sets(mesh):
cmds.removeMultiInstance(attr, b=True)
+def get_node_parent(node):
+ """Return full path name for parent of node"""
+ parents = cmds.listRelatives(node, parent=True, fullPath=True)
+ return parents[0] if parents else None
+
+
def get_id_from_sibling(node, history_only=True):
"""Return first node id in the history chain that matches this node.
@@ -1904,10 +1975,6 @@ def get_id_from_sibling(node, history_only=True):
"""
- def _get_parent(node):
- """Return full path name for parent of node"""
- return cmds.listRelatives(node, parent=True, fullPath=True)
-
node = cmds.ls(node, long=True)[0]
# Find all similar nodes in history
@@ -1919,8 +1986,8 @@ def get_id_from_sibling(node, history_only=True):
similar_nodes = [x for x in similar_nodes if x != node]
# The node *must be* under the same parent
- parent = _get_parent(node)
- similar_nodes = [i for i in similar_nodes if _get_parent(i) == parent]
+ parent = get_node_parent(node)
+ similar_nodes = [i for i in similar_nodes if get_node_parent(i) == parent]
# Check all of the remaining similar nodes and take the first one
# with an id and assume it's the original.
@@ -2073,23 +2140,13 @@ def get_frame_range():
frame_start = asset["data"].get("frameStart")
frame_end = asset["data"].get("frameEnd")
- # Backwards compatibility
- if frame_start is None or frame_end is None:
- frame_start = asset["data"].get("edit_in")
- frame_end = asset["data"].get("edit_out")
if frame_start is None or frame_end is None:
cmds.warning("No edit information found for %s" % asset_name)
return
- handles = asset["data"].get("handles") or 0
- handle_start = asset["data"].get("handleStart")
- if handle_start is None:
- handle_start = handles
-
- handle_end = asset["data"].get("handleEnd")
- if handle_end is None:
- handle_end = handles
+ handle_start = asset["data"].get("handleStart") or 0
+ handle_end = asset["data"].get("handleEnd") or 0
return {
"frameStart": frame_start,
@@ -3176,38 +3233,78 @@ def set_colorspace():
def parent_nodes(nodes, parent=None):
# type: (list, str) -> list
"""Context manager to un-parent provided nodes and return them back."""
- import pymel.core as pm # noqa
- parent_node = None
+ def _as_mdagpath(node):
+ """Return MDagPath for node path."""
+ if not node:
+ return
+ sel = OpenMaya.MSelectionList()
+ sel.add(node)
+ return sel.getDagPath(0)
+
+ # We can only parent dag nodes so we ensure input contains only dag nodes
+ nodes = cmds.ls(nodes, type="dagNode", long=True)
+ if not nodes:
+ # opt-out early
+ yield
+ return
+
+ parent_node_path = None
delete_parent = False
-
if parent:
if not cmds.objExists(parent):
- parent_node = pm.createNode("transform", n=parent, ss=False)
+ parent_node = cmds.createNode("transform",
+ name=parent,
+ skipSelect=False)
delete_parent = True
else:
- parent_node = pm.PyNode(parent)
+ parent_node = parent
+ parent_node_path = cmds.ls(parent_node, long=True)[0]
+
+ # Store original parents
node_parents = []
for node in nodes:
- n = pm.PyNode(node)
- try:
- root = pm.listRelatives(n, parent=1)[0]
- except IndexError:
- root = None
- node_parents.append((n, root))
+ node_parent = get_node_parent(node)
+ node_parents.append((_as_mdagpath(node), _as_mdagpath(node_parent)))
+
try:
- for node in node_parents:
- if not parent:
- node[0].setParent(world=True)
+ for node, node_parent in node_parents:
+ node_parent_path = node_parent.fullPathName() if node_parent else None # noqa
+ if node_parent_path == parent_node_path:
+ # Already a child
+ continue
+
+ if parent_node_path:
+ cmds.parent(node.fullPathName(), parent_node_path)
else:
- node[0].setParent(parent_node)
+ cmds.parent(node.fullPathName(), world=True)
+
yield
finally:
- for node in node_parents:
- if node[1]:
- node[0].setParent(node[1])
+ # Reparent to original parents
+ for node, original_parent in node_parents:
+ node_path = node.fullPathName()
+ if not node_path:
+ # Node must have been deleted
+ continue
+
+ node_parent_path = get_node_parent(node_path)
+
+ original_parent_path = None
+ if original_parent:
+ original_parent_path = original_parent.fullPathName()
+ if not original_parent_path:
+ # Original parent node must have been deleted
+ continue
+
+ if node_parent_path != original_parent_path:
+ if not original_parent_path:
+ cmds.parent(node_path, world=True)
+ else:
+ cmds.parent(node_path, original_parent_path)
+
if delete_parent:
- pm.delete(parent_node)
+ cmds.delete(parent_node_path)
@contextlib.contextmanager
@@ -3675,3 +3772,43 @@ def len_flattened(components):
else:
n += 1
return n
+
+
+def get_all_children(nodes):
+ """Return all children of `nodes` including each instanced child.
+ Using maya.cmds.listRelatives(allDescendents=True) includes only the first
+ instance. As such, this function acts as an optimal replacement with a
+ focus on a fast query.
+
+ """
+
+ sel = OpenMaya.MSelectionList()
+ traversed = set()
+ iterator = OpenMaya.MItDag(OpenMaya.MItDag.kDepthFirst)
+ for node in nodes:
+
+ if node in traversed:
+ # Ignore if already processed as a child
+ # before
+ continue
+
+ sel.clear()
+ sel.add(node)
+ dag = sel.getDagPath(0)
+
+ iterator.reset(dag)
+ # ignore self
+ iterator.next() # noqa: B305
+ while not iterator.isDone():
+
+ path = iterator.fullPathName()
+
+ if path in traversed:
+ iterator.prune()
+ iterator.next() # noqa: B305
+ continue
+
+ traversed.add(path)
+ iterator.next() # noqa: B305
+
+ return list(traversed)
diff --git a/openpype/hosts/maya/api/lib_renderproducts.py b/openpype/hosts/maya/api/lib_renderproducts.py
index 324496c964..a6bcd003a5 100644
--- a/openpype/hosts/maya/api/lib_renderproducts.py
+++ b/openpype/hosts/maya/api/lib_renderproducts.py
@@ -857,6 +857,7 @@ class RenderProductsVray(ARenderProducts):
if default_ext in {"exr (multichannel)", "exr (deep)"}:
default_ext = "exr"
+ colorspace = lib.get_color_management_output_transform()
products = []
# add beauty as default when not disabled
@@ -868,7 +869,7 @@ class RenderProductsVray(ARenderProducts):
productName="",
ext=default_ext,
camera=camera,
- colorspace=lib.get_color_management_output_transform(),
+ colorspace=colorspace,
multipart=self.multipart
)
)
@@ -882,6 +883,7 @@ class RenderProductsVray(ARenderProducts):
productName="Alpha",
ext=default_ext,
camera=camera,
+ colorspace=colorspace,
multipart=self.multipart
)
)
@@ -917,7 +919,8 @@ class RenderProductsVray(ARenderProducts):
product = RenderProduct(productName=name,
ext=default_ext,
aov=aov,
- camera=camera)
+ camera=camera,
+ colorspace=colorspace)
products.append(product)
# Continue as we've processed this special case AOV
continue
@@ -929,7 +932,7 @@ class RenderProductsVray(ARenderProducts):
ext=default_ext,
aov=aov,
camera=camera,
- colorspace=lib.get_color_management_output_transform()
+ colorspace=colorspace
)
products.append(product)
@@ -1130,6 +1133,7 @@ class RenderProductsRedshift(ARenderProducts):
products = []
light_groups_enabled = False
has_beauty_aov = False
+ colorspace = lib.get_color_management_output_transform()
for aov in aovs:
enabled = self._get_attr(aov, "enabled")
if not enabled:
@@ -1173,7 +1177,8 @@ class RenderProductsRedshift(ARenderProducts):
ext=ext,
multipart=False,
camera=camera,
- driver=aov)
+ driver=aov,
+ colorspace=colorspace)
products.append(product)
if light_groups:
@@ -1188,7 +1193,8 @@ class RenderProductsRedshift(ARenderProducts):
ext=ext,
multipart=False,
camera=camera,
- driver=aov)
+ driver=aov,
+ colorspace=colorspace)
products.append(product)
# When a Beauty AOV is added manually, it will be rendered as
@@ -1204,7 +1210,8 @@ class RenderProductsRedshift(ARenderProducts):
RenderProduct(productName=beauty_name,
ext=ext,
multipart=self.multipart,
- camera=camera))
+ camera=camera,
+ colorspace=colorspace))
return products
@@ -1236,6 +1243,8 @@ class RenderProductsRenderman(ARenderProducts):
"""
from rfm2.api.displays import get_displays # noqa
+ colorspace = lib.get_color_management_output_transform()
+
cameras = [
self.sanitize_camera_name(c)
for c in self.get_renderable_cameras()
@@ -1302,7 +1311,8 @@ class RenderProductsRenderman(ARenderProducts):
productName=aov_name,
ext=extensions,
camera=camera,
- multipart=True
+ multipart=True,
+ colorspace=colorspace
)
if has_cryptomatte and matte_enabled:
@@ -1311,7 +1321,8 @@ class RenderProductsRenderman(ARenderProducts):
aov=cryptomatte_aov,
ext=extensions,
camera=camera,
- multipart=True
+ multipart=True,
+ colorspace=colorspace
)
else:
# this code should handle the case where no multipart
diff --git a/openpype/hosts/maya/api/lib_rendersetup.py b/openpype/hosts/maya/api/lib_rendersetup.py
index e616f26e1b..440ee21a52 100644
--- a/openpype/hosts/maya/api/lib_rendersetup.py
+++ b/openpype/hosts/maya/api/lib_rendersetup.py
@@ -19,6 +19,8 @@ from maya.app.renderSetup.model.override import (
UniqueOverride
)
+from openpype.hosts.maya.api.lib import get_attribute
+
EXACT_MATCH = 0
PARENT_MATCH = 1
CLIENT_MATCH = 2
@@ -96,9 +98,6 @@ def get_attr_in_layer(node_attr, layer):
"""
- # Delay pymel import to here because it's slow to load
- import pymel.core as pm
-
def _layer_needs_update(layer):
"""Return whether layer needs updating."""
# Use `getattr` as e.g. DEFAULT_RENDER_LAYER does not have
@@ -125,7 +124,7 @@ def get_attr_in_layer(node_attr, layer):
node = history_overrides[-1] if history_overrides else override
node_attr_ = node + ".original"
- return pm.getAttr(node_attr_, asString=True)
+ return get_attribute(node_attr_, asString=True)
layer = get_rendersetup_layer(layer)
rs = renderSetup.instance()
@@ -145,7 +144,7 @@ def get_attr_in_layer(node_attr, layer):
# we will let it error out.
rs.switchToLayer(current_layer)
- return pm.getAttr(node_attr, asString=True)
+ return get_attribute(node_attr, asString=True)
overrides = get_attr_overrides(node_attr, layer)
default_layer_value = get_default_layer_value(node_attr)
@@ -156,7 +155,7 @@ def get_attr_in_layer(node_attr, layer):
for match, layer_override, index in overrides:
if isinstance(layer_override, AbsOverride):
# Absolute override
- value = pm.getAttr(layer_override.name() + ".attrValue")
+ value = get_attribute(layer_override.name() + ".attrValue")
if match == EXACT_MATCH:
# value = value
pass
@@ -168,8 +167,8 @@ def get_attr_in_layer(node_attr, layer):
elif isinstance(layer_override, RelOverride):
# Relative override
# Value = Original * Multiply + Offset
- multiply = pm.getAttr(layer_override.name() + ".multiply")
- offset = pm.getAttr(layer_override.name() + ".offset")
+ multiply = get_attribute(layer_override.name() + ".multiply")
+ offset = get_attribute(layer_override.name() + ".offset")
if match == EXACT_MATCH:
value = value * multiply + offset
diff --git a/openpype/hosts/maya/plugins/load/load_arnold_standin.py b/openpype/hosts/maya/plugins/load/load_arnold_standin.py
index 21b2246f6c..7c3a732389 100644
--- a/openpype/hosts/maya/plugins/load/load_arnold_standin.py
+++ b/openpype/hosts/maya/plugins/load/load_arnold_standin.py
@@ -84,7 +84,7 @@ class ArnoldStandinLoader(load.LoaderPlugin):
sequence = is_sequence(os.listdir(os.path.dirname(self.fname)))
cmds.setAttr(standin_shape + ".useFrameExtension", sequence)
- nodes = [root, standin]
+ nodes = [root, standin, standin_shape]
if operator is not None:
nodes.append(operator)
self[:] = nodes
@@ -183,7 +183,7 @@ class ArnoldStandinLoader(load.LoaderPlugin):
# If no proxy exists, the string operator won't replace anything.
cmds.setAttr(
string_replace_operator + ".match",
- "resources/" + proxy_basename,
+ proxy_basename,
type="string"
)
cmds.setAttr(
diff --git a/openpype/hosts/maya/plugins/load/load_audio.py b/openpype/hosts/maya/plugins/load/load_audio.py
index 6f60cb5726..9e7fd96bdb 100644
--- a/openpype/hosts/maya/plugins/load/load_audio.py
+++ b/openpype/hosts/maya/plugins/load/load_audio.py
@@ -11,7 +11,7 @@ from openpype.pipeline import (
get_representation_path,
)
from openpype.hosts.maya.api.pipeline import containerise
-from openpype.hosts.maya.api.lib import unique_namespace
+from openpype.hosts.maya.api.lib import unique_namespace, get_container_members
class AudioLoader(load.LoaderPlugin):
@@ -52,17 +52,15 @@ class AudioLoader(load.LoaderPlugin):
)
def update(self, container, representation):
- import pymel.core as pm
- audio_node = None
- for node in pm.PyNode(container["objectName"]).members():
- if node.nodeType() == "audio":
- audio_node = node
+ members = get_container_members(container)
+ audio_nodes = cmds.ls(members, type="audio")
- assert audio_node is not None, "Audio node not found."
+ assert audio_nodes is not None, "Audio node not found."
+ audio_node = audio_nodes[0]
path = get_representation_path(representation)
- audio_node.filename.set(path)
+ cmds.setAttr("{}.filename".format(audio_node), path, type="string")
cmds.setAttr(
container["objectName"] + ".representation",
str(representation["_id"]),
@@ -80,8 +78,12 @@ class AudioLoader(load.LoaderPlugin):
asset = get_asset_by_id(
project_name, subset["parent"], fields=["parent"]
)
- audio_node.sourceStart.set(1 - asset["data"]["frameStart"])
- audio_node.sourceEnd.set(asset["data"]["frameEnd"])
+
+ source_start = 1 - asset["data"]["frameStart"]
+ source_end = asset["data"]["frameEnd"]
+
+ cmds.setAttr("{}.sourceStart".format(audio_node), source_start)
+ cmds.setAttr("{}.sourceEnd".format(audio_node), source_end)
def switch(self, container, representation):
self.update(container, representation)
diff --git a/openpype/hosts/maya/plugins/load/load_gpucache.py b/openpype/hosts/maya/plugins/load/load_gpucache.py
index 07e5734f43..794b21eb5d 100644
--- a/openpype/hosts/maya/plugins/load/load_gpucache.py
+++ b/openpype/hosts/maya/plugins/load/load_gpucache.py
@@ -1,5 +1,9 @@
import os
+import maya.cmds as cmds
+
+from openpype.hosts.maya.api.pipeline import containerise
+from openpype.hosts.maya.api.lib import unique_namespace
from openpype.pipeline import (
load,
get_representation_path
@@ -11,19 +15,15 @@ class GpuCacheLoader(load.LoaderPlugin):
"""Load Alembic as gpuCache"""
families = ["model", "animation", "proxyAbc", "pointcache"]
- representations = ["abc"]
+ representations = ["abc", "gpu_cache"]
- label = "Import Gpu Cache"
+ label = "Load Gpu Cache"
order = -5
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, data):
- import maya.cmds as cmds
- from openpype.hosts.maya.api.pipeline import containerise
- from openpype.hosts.maya.api.lib import unique_namespace
-
asset = context['asset']['name']
namespace = namespace or unique_namespace(
asset + "_",
@@ -42,10 +42,9 @@ class GpuCacheLoader(load.LoaderPlugin):
c = colors.get('model')
if c is not None:
cmds.setAttr(root + ".useOutlinerColor", 1)
- cmds.setAttr(root + ".outlinerColor",
- (float(c[0])/255),
- (float(c[1])/255),
- (float(c[2])/255)
+ cmds.setAttr(
+ root + ".outlinerColor",
+ (float(c[0]) / 255), (float(c[1]) / 255), (float(c[2]) / 255)
)
# Create transform with shape
@@ -74,9 +73,6 @@ class GpuCacheLoader(load.LoaderPlugin):
loader=self.__class__.__name__)
def update(self, container, representation):
-
- import maya.cmds as cmds
-
path = get_representation_path(representation)
# Update the cache
@@ -96,7 +92,6 @@ class GpuCacheLoader(load.LoaderPlugin):
self.update(container, representation)
def remove(self, container):
- import maya.cmds as cmds
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
diff --git a/openpype/hosts/maya/plugins/load/load_image_plane.py b/openpype/hosts/maya/plugins/load/load_image_plane.py
index 6421f3ffe2..bf13708e9b 100644
--- a/openpype/hosts/maya/plugins/load/load_image_plane.py
+++ b/openpype/hosts/maya/plugins/load/load_image_plane.py
@@ -11,11 +11,26 @@ from openpype.pipeline import (
get_representation_path
)
from openpype.hosts.maya.api.pipeline import containerise
-from openpype.hosts.maya.api.lib import unique_namespace
+from openpype.hosts.maya.api.lib import (
+ unique_namespace,
+ namespaced,
+ pairwise,
+ get_container_members
+)
from maya import cmds
+def disconnect_inputs(plug):
+ overrides = cmds.listConnections(plug,
+ source=True,
+ destination=False,
+ plugs=True,
+ connections=True) or []
+ for dest, src in pairwise(overrides):
+ cmds.disconnectAttr(src, dest)
+
+
class CameraWindow(QtWidgets.QDialog):
def __init__(self, cameras):
@@ -74,6 +89,7 @@ class CameraWindow(QtWidgets.QDialog):
self.camera = None
self.close()
+
class ImagePlaneLoader(load.LoaderPlugin):
"""Specific loader of plate for image planes on selected camera."""
@@ -84,9 +100,7 @@ class ImagePlaneLoader(load.LoaderPlugin):
color = "orange"
def load(self, context, name, namespace, data, options=None):
- import pymel.core as pm
- new_nodes = []
image_plane_depth = 1000
asset = context['asset']['name']
namespace = namespace or unique_namespace(
@@ -96,16 +110,20 @@ class ImagePlaneLoader(load.LoaderPlugin):
)
# Get camera from user selection.
- camera = None
# is_static_image_plane = None
# is_in_all_views = None
- if data:
- camera = pm.PyNode(data.get("camera"))
+ camera = data.get("camera") if data else None
if not camera:
- cameras = pm.ls(type="camera")
- camera_names = {x.getParent().name(): x for x in cameras}
- camera_names["Create new camera."] = "create_camera"
+ cameras = cmds.ls(type="camera")
+
+ # Cameras by names
+ camera_names = {}
+ for camera in cameras:
+ parent = cmds.listRelatives(camera, parent=True, path=True)[0]
+ camera_names[parent] = camera
+
+ camera_names["Create new camera."] = "create-camera"
window = CameraWindow(camera_names.keys())
window.exec_()
# Skip if no camera was selected (Dialog was closed)
@@ -113,43 +131,48 @@ class ImagePlaneLoader(load.LoaderPlugin):
return
camera = camera_names[window.camera]
- if camera == "create_camera":
- camera = pm.createNode("camera")
+ if camera == "create-camera":
+ camera = cmds.createNode("camera")
if camera is None:
return
try:
- camera.displayResolution.set(1)
- camera.farClipPlane.set(image_plane_depth * 10)
+ cmds.setAttr("{}.displayResolution".format(camera), True)
+ cmds.setAttr("{}.farClipPlane".format(camera),
+ image_plane_depth * 10)
except RuntimeError:
pass
# Create image plane
- image_plane_transform, image_plane_shape = pm.imagePlane(
- fileName=context["representation"]["data"]["path"],
- camera=camera)
- image_plane_shape.depth.set(image_plane_depth)
+ with namespaced(namespace):
+ # Create inside the namespace
+ image_plane_transform, image_plane_shape = cmds.imagePlane(
+ fileName=context["representation"]["data"]["path"],
+ camera=camera
+ )
+ start_frame = cmds.playbackOptions(query=True, min=True)
+ end_frame = cmds.playbackOptions(query=True, max=True)
-
- start_frame = pm.playbackOptions(q=True, min=True)
- end_frame = pm.playbackOptions(q=True, max=True)
-
- image_plane_shape.frameOffset.set(0)
- image_plane_shape.frameIn.set(start_frame)
- image_plane_shape.frameOut.set(end_frame)
- image_plane_shape.frameCache.set(end_frame)
- image_plane_shape.useFrameExtension.set(1)
+ for attr, value in {
+ "depth": image_plane_depth,
+ "frameOffset": 0,
+ "frameIn": start_frame,
+ "frameOut": end_frame,
+ "frameCache": end_frame,
+ "useFrameExtension": True
+ }.items():
+ plug = "{}.{}".format(image_plane_shape, attr)
+ cmds.setAttr(plug, value)
movie_representations = ["mov", "preview"]
if context["representation"]["name"] in movie_representations:
- # Need to get "type" by string, because its a method as well.
- pm.Attribute(image_plane_shape + ".type").set(2)
+ cmds.setAttr(image_plane_shape + ".type", 2)
# Ask user whether to use sequence or still image.
if context["representation"]["name"] == "exr":
# Ensure OpenEXRLoader plugin is loaded.
- pm.loadPlugin("OpenEXRLoader.mll", quiet=True)
+ cmds.loadPlugin("OpenEXRLoader", quiet=True)
message = (
"Hold image sequence on first frame?"
@@ -161,32 +184,18 @@ class ImagePlaneLoader(load.LoaderPlugin):
None,
"Frame Hold.",
message,
- QtWidgets.QMessageBox.Ok,
- QtWidgets.QMessageBox.Cancel
+ QtWidgets.QMessageBox.Yes,
+ QtWidgets.QMessageBox.No
)
- if reply == QtWidgets.QMessageBox.Ok:
- # find the input and output of frame extension
- expressions = image_plane_shape.frameExtension.inputs()
- frame_ext_output = image_plane_shape.frameExtension.outputs()
- if expressions:
- # the "time1" node is non-deletable attr
- # in Maya, use disconnectAttr instead
- pm.disconnectAttr(expressions, frame_ext_output)
+ if reply == QtWidgets.QMessageBox.Yes:
+ frame_extension_plug = "{}.frameExtension".format(image_plane_shape) # noqa
- if not image_plane_shape.frameExtension.isFreeToChange():
- raise RuntimeError("Can't set frame extension for {}".format(image_plane_shape)) # noqa
- # get the node of time instead and set the time for it.
- image_plane_shape.frameExtension.set(start_frame)
+ # Remove current frame expression
+ disconnect_inputs(frame_extension_plug)
- new_nodes.extend(
- [
- image_plane_transform.longName().split("|")[-1],
- image_plane_shape.longName().split("|")[-1]
- ]
- )
+ cmds.setAttr(frame_extension_plug, start_frame)
- for node in new_nodes:
- pm.rename(node, "{}:{}".format(namespace, node))
+ new_nodes = [image_plane_transform, image_plane_shape]
return containerise(
name=name,
@@ -197,21 +206,19 @@ class ImagePlaneLoader(load.LoaderPlugin):
)
def update(self, container, representation):
- import pymel.core as pm
- image_plane_shape = None
- for node in pm.PyNode(container["objectName"]).members():
- if node.nodeType() == "imagePlane":
- image_plane_shape = node
- assert image_plane_shape is not None, "Image plane not found."
+ members = get_container_members(container)
+ image_planes = cmds.ls(members, type="imagePlane")
+ assert image_planes, "Image plane not found."
+ image_plane_shape = image_planes[0]
path = get_representation_path(representation)
- image_plane_shape.imageName.set(path)
- cmds.setAttr(
- container["objectName"] + ".representation",
- str(representation["_id"]),
- type="string"
- )
+ cmds.setAttr("{}.imageName".format(image_plane_shape),
+ path,
+ type="string")
+ cmds.setAttr("{}.representation".format(container["objectName"]),
+ str(representation["_id"]),
+ type="string")
# Set frame range.
project_name = legacy_io.active_project()
@@ -227,10 +234,14 @@ class ImagePlaneLoader(load.LoaderPlugin):
start_frame = asset["data"]["frameStart"]
end_frame = asset["data"]["frameEnd"]
- image_plane_shape.frameOffset.set(0)
- image_plane_shape.frameIn.set(start_frame)
- image_plane_shape.frameOut.set(end_frame)
- image_plane_shape.frameCache.set(end_frame)
+ for attr, value in {
+ "frameOffset": 0,
+ "frameIn": start_frame,
+ "frameOut": end_frame,
+ "frameCache": end_frame
+ }:
+ plug = "{}.{}".format(image_plane_shape, attr)
+ cmds.setAttr(plug, value)
def switch(self, container, representation):
self.update(container, representation)
diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py
index 82c15ab899..461f4258aa 100644
--- a/openpype/hosts/maya/plugins/load/load_reference.py
+++ b/openpype/hosts/maya/plugins/load/load_reference.py
@@ -12,7 +12,8 @@ from openpype.pipeline.create import (
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api.lib import (
maintained_selection,
- get_container_members
+ get_container_members,
+ parent_nodes
)
@@ -118,7 +119,6 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
def process_reference(self, context, name, namespace, options):
import maya.cmds as cmds
- import pymel.core as pm
try:
family = context["representation"]["context"]["family"]
@@ -148,7 +148,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
# if there are cameras, try to lock their transforms
self._lock_camera_transforms(new_nodes)
- current_namespace = pm.namespaceInfo(currentNamespace=True)
+ current_namespace = cmds.namespaceInfo(currentNamespace=True)
if current_namespace != ":":
group_name = current_namespace + ":" + group_name
@@ -158,37 +158,29 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
self[:] = new_nodes
if attach_to_root:
- group_node = pm.PyNode(group_name)
- roots = set()
+ roots = cmds.listRelatives(group_name,
+ children=True,
+ fullPath=True) or []
- for node in new_nodes:
- try:
- roots.add(pm.PyNode(node).getAllParents()[-2])
- except: # noqa: E722
- pass
+ if family not in {"layout", "setdress",
+ "mayaAscii", "mayaScene"}:
+ # QUESTION Why do we need to exclude these families?
+ with parent_nodes(roots, parent=None):
+ cmds.xform(group_name, zeroTransformPivots=True)
- if family not in ["layout", "setdress",
- "mayaAscii", "mayaScene"]:
- for root in roots:
- root.setParent(world=True)
-
- group_node.zeroTransformPivots()
- for root in roots:
- root.setParent(group_node)
-
- cmds.setAttr(group_name + ".displayHandle", 1)
+ cmds.setAttr("{}.displayHandle".format(group_name), 1)
settings = get_project_settings(os.environ['AVALON_PROJECT'])
colors = settings['maya']['load']['colors']
c = colors.get(family)
if c is not None:
- group_node.useOutlinerColor.set(1)
- group_node.outlinerColor.set(
- (float(c[0]) / 255),
- (float(c[1]) / 255),
- (float(c[2]) / 255))
+ cmds.setAttr("{}.useOutlinerColor".format(group_name), 1)
+ cmds.setAttr("{}.outlinerColor".format(group_name),
+ (float(c[0]) / 255),
+ (float(c[1]) / 255),
+ (float(c[2]) / 255))
- cmds.setAttr(group_name + ".displayHandle", 1)
+ cmds.setAttr("{}.displayHandle".format(group_name), 1)
# get bounding box
bbox = cmds.exactWorldBoundingBox(group_name)
# get pivot position on world space
@@ -202,15 +194,16 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
cy = cy + pivot[1]
cz = cz + pivot[2]
# set selection handle offset to center of bounding box
- cmds.setAttr(group_name + ".selectHandleX", cx)
- cmds.setAttr(group_name + ".selectHandleY", cy)
- cmds.setAttr(group_name + ".selectHandleZ", cz)
+ cmds.setAttr("{}.selectHandleX".format(group_name), cx)
+ cmds.setAttr("{}.selectHandleY".format(group_name), cy)
+ cmds.setAttr("{}.selectHandleZ".format(group_name), cz)
if family == "rig":
self._post_process_rig(name, namespace, context, options)
else:
if "translate" in options:
- cmds.setAttr(group_name + ".t", *options["translate"])
+ cmds.setAttr("{}.translate".format(group_name),
+ *options["translate"])
return new_nodes
def switch(self, container, representation):
diff --git a/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py
index 0415808b7a..0845f653b1 100644
--- a/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py
+++ b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py
@@ -1,6 +1,7 @@
from maya import cmds
import pyblish.api
+from openpype.hosts.maya.api.lib import get_all_children
class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
@@ -21,18 +22,21 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
self.log.warning("Skipped empty instance: \"%s\" " % objset)
continue
if objset.endswith("content_SET"):
- instance.data["setMembers"] = cmds.ls(members, long=True)
- self.log.debug("content members: {}".format(members))
+ members = cmds.ls(members, long=True)
+ children = get_all_children(members)
+ instance.data["contentMembers"] = children
+ self.log.debug("content members: {}".format(children))
elif objset.endswith("proxy_SET"):
- instance.data["proxy"] = cmds.ls(members, long=True)
- self.log.debug("proxy members: {}".format(members))
+ set_members = get_all_children(cmds.ls(members, long=True))
+ instance.data["proxy"] = set_members
+ self.log.debug("proxy members: {}".format(set_members))
# Use camera in object set if present else default to render globals
# camera.
cameras = cmds.ls(type="camera", long=True)
renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)]
camera = renderable[0]
- for node in instance.data["setMembers"]:
+ for node in instance.data["contentMembers"]:
camera_shapes = cmds.listRelatives(
node, shapes=True, type="camera"
)
diff --git a/openpype/hosts/maya/plugins/publish/collect_instances.py b/openpype/hosts/maya/plugins/publish/collect_instances.py
index c594626569..87a4de162d 100644
--- a/openpype/hosts/maya/plugins/publish/collect_instances.py
+++ b/openpype/hosts/maya/plugins/publish/collect_instances.py
@@ -1,48 +1,8 @@
from maya import cmds
-import maya.api.OpenMaya as om
import pyblish.api
import json
-
-
-def get_all_children(nodes):
- """Return all children of `nodes` including each instanced child.
- Using maya.cmds.listRelatives(allDescendents=True) includes only the first
- instance. As such, this function acts as an optimal replacement with a
- focus on a fast query.
-
- """
-
- sel = om.MSelectionList()
- traversed = set()
- iterator = om.MItDag(om.MItDag.kDepthFirst)
- for node in nodes:
-
- if node in traversed:
- # Ignore if already processed as a child
- # before
- continue
-
- sel.clear()
- sel.add(node)
- dag = sel.getDagPath(0)
-
- iterator.reset(dag)
- # ignore self
- iterator.next() # noqa: B305
- while not iterator.isDone():
-
- path = iterator.fullPathName()
-
- if path in traversed:
- iterator.prune()
- iterator.next() # noqa: B305
- continue
-
- traversed.add(path)
- iterator.next() # noqa: B305
-
- return list(traversed)
+from openpype.hosts.maya.api.lib import get_all_children
class CollectInstances(pyblish.api.ContextPlugin):
@@ -149,13 +109,6 @@ class CollectInstances(pyblish.api.ContextPlugin):
# Append start frame and end frame to label if present
if "frameStart" and "frameEnd" in data:
-
- # Backwards compatibility for 'handles' data
- if "handles" in data:
- data["handleStart"] = data["handles"]
- data["handleEnd"] = data["handles"]
- data.pop('handles')
-
# Take handles from context if not set locally on the instance
for key in ["handleStart", "handleEnd"]:
if key not in data:
diff --git a/openpype/hosts/maya/plugins/publish/collect_look.py b/openpype/hosts/maya/plugins/publish/collect_look.py
index b01160a1c0..287ddc228b 100644
--- a/openpype/hosts/maya/plugins/publish/collect_look.py
+++ b/openpype/hosts/maya/plugins/publish/collect_look.py
@@ -556,7 +556,7 @@ class CollectLook(pyblish.api.InstancePlugin):
continue
if cmds.getAttr(attribute, type=True) == "message":
continue
- node_attributes[attr] = cmds.getAttr(attribute)
+ node_attributes[attr] = cmds.getAttr(attribute, asString=True)
# Only include if there are any properties we care about
if not node_attributes:
continue
diff --git a/openpype/hosts/maya/plugins/publish/collect_review.py b/openpype/hosts/maya/plugins/publish/collect_review.py
index 00565c5819..0b03988002 100644
--- a/openpype/hosts/maya/plugins/publish/collect_review.py
+++ b/openpype/hosts/maya/plugins/publish/collect_review.py
@@ -1,10 +1,9 @@
from maya import cmds, mel
-import pymel.core as pm
import pyblish.api
from openpype.client import get_subset_by_name
-from openpype.pipeline import legacy_io
+from openpype.pipeline import legacy_io, KnownPublishError
from openpype.hosts.maya.api.lib import get_attribute_input
@@ -16,7 +15,6 @@ class CollectReview(pyblish.api.InstancePlugin):
order = pyblish.api.CollectorOrder + 0.3
label = 'Collect Review Data'
families = ["review"]
- legacy = True
def process(self, instance):
@@ -36,57 +34,67 @@ class CollectReview(pyblish.api.InstancePlugin):
self.log.debug('members: {}'.format(members))
# validate required settings
- assert len(cameras) == 1, "Not a single camera found in extraction"
+ if len(cameras) == 0:
+ raise KnownPublishError("No camera found in review "
+ "instance: {}".format(instance))
+ elif len(cameras) > 2:
+ raise KnownPublishError(
+ "Only a single camera is allowed for a review instance but "
+ "more than one camera found in review instance: {}. "
+ "Cameras found: {}".format(instance, ", ".join(cameras)))
+
camera = cameras[0]
self.log.debug('camera: {}'.format(camera))
- objectset = instance.context.data['objectsets']
+ context = instance.context
+ objectset = context.data['objectsets']
- reviewable_subset = None
- reviewable_subset = list(set(members) & set(objectset))
- if reviewable_subset:
- assert len(reviewable_subset) <= 1, "Multiple subsets for review"
- self.log.debug('subset for review: {}'.format(reviewable_subset))
+ reviewable_subsets = list(set(members) & set(objectset))
+ if reviewable_subsets:
+ if len(reviewable_subsets) > 1:
+ raise KnownPublishError(
+ "Multiple attached subsets for review are not supported. "
+ "Attached: {}".format(", ".join(reviewable_subsets))
+ )
- i = 0
- for inst in instance.context:
+ reviewable_subset = reviewable_subsets[0]
+ self.log.debug(
+ "Subset attached to review: {}".format(reviewable_subset)
+ )
- self.log.debug('filtering {}'.format(inst))
- data = instance.context[i].data
+ # Find the relevant publishing instance in the current context
+ reviewable_inst = next(inst for inst in context
+ if inst.name == reviewable_subset)
+ data = reviewable_inst.data
- if inst.name != reviewable_subset[0]:
- self.log.debug('subset name does not match {}'.format(
- reviewable_subset[0]))
- i += 1
- continue
+ self.log.debug(
+ 'Adding review family to {}'.format(reviewable_subset)
+ )
+ if data.get('families'):
+ data['families'].append('review')
+ else:
+ data['families'] = ['review']
+
+ data['review_camera'] = camera
+ data['frameStartFtrack'] = instance.data["frameStartHandle"]
+ data['frameEndFtrack'] = instance.data["frameEndHandle"]
+ data['frameStartHandle'] = instance.data["frameStartHandle"]
+ data['frameEndHandle'] = instance.data["frameEndHandle"]
+ data["frameStart"] = instance.data["frameStart"]
+ data["frameEnd"] = instance.data["frameEnd"]
+ data['step'] = instance.data['step']
+ data['fps'] = instance.data['fps']
+ data['review_width'] = instance.data['review_width']
+ data['review_height'] = instance.data['review_height']
+ data["isolate"] = instance.data["isolate"]
+ data["panZoom"] = instance.data.get("panZoom", False)
+ data["panel"] = instance.data["panel"]
+
+ # The review instance must be active
+ cmds.setAttr(str(instance) + '.active', 1)
+
+ instance.data['remove'] = True
- if data.get('families'):
- data['families'].append('review')
- else:
- data['families'] = ['review']
- self.log.debug('adding review family to {}'.format(
- reviewable_subset))
- data['review_camera'] = camera
- # data["publish"] = False
- data['frameStartFtrack'] = instance.data["frameStartHandle"]
- data['frameEndFtrack'] = instance.data["frameEndHandle"]
- data['frameStartHandle'] = instance.data["frameStartHandle"]
- data['frameEndHandle'] = instance.data["frameEndHandle"]
- data["frameStart"] = instance.data["frameStart"]
- data["frameEnd"] = instance.data["frameEnd"]
- data['handles'] = instance.data.get('handles', None)
- data['step'] = instance.data['step']
- data['fps'] = instance.data['fps']
- data['review_width'] = instance.data['review_width']
- data['review_height'] = instance.data['review_height']
- data["isolate"] = instance.data["isolate"]
- data["panZoom"] = instance.data.get("panZoom", False)
- data["panel"] = instance.data["panel"]
- cmds.setAttr(str(instance) + '.active', 1)
- self.log.debug('data {}'.format(instance.context[i].data))
- instance.context[i].data.update(data)
- instance.data['remove'] = True
- self.log.debug('isntance data {}'.format(instance.data))
else:
legacy_subset_name = task + 'Review'
asset_doc = instance.context.data['assetEntity']
@@ -108,49 +116,48 @@ class CollectReview(pyblish.api.InstancePlugin):
instance.data["frameEndHandle"]
# make ftrack publishable
- instance.data["families"] = ['ftrack']
+ instance.data.setdefault("families", []).append('ftrack')
cmds.setAttr(str(instance) + '.active', 1)
# Collect audio
playback_slider = mel.eval('$tmpVar=$gPlayBackSlider')
- audio_name = cmds.timeControl(playback_slider, q=True, s=True)
+ audio_name = cmds.timeControl(playback_slider,
+ query=True,
+ sound=True)
display_sounds = cmds.timeControl(
- playback_slider, q=True, displaySound=True
+ playback_slider, query=True, displaySound=True
)
- audio_nodes = []
+ def get_audio_node_data(node):
+ return {
+ "offset": cmds.getAttr("{}.offset".format(node)),
+ "filename": cmds.getAttr("{}.filename".format(node))
+ }
+
+ audio_data = []
if audio_name:
- audio_nodes.append(pm.PyNode(audio_name))
+ audio_data.append(get_audio_node_data(audio_name))
- if not audio_name and display_sounds:
- start_frame = int(pm.playbackOptions(q=True, min=True))
- end_frame = float(pm.playbackOptions(q=True, max=True))
- frame_range = range(int(start_frame), int(end_frame))
+ elif display_sounds:
+ start_frame = int(cmds.playbackOptions(query=True, min=True))
+ end_frame = int(cmds.playbackOptions(query=True, max=True))
- for node in pm.ls(type="audio"):
+ for node in cmds.ls(type="audio"):
# Check if frame range and audio range intersections,
# for whether to include this audio node or not.
- start_audio = node.offset.get()
- end_audio = node.offset.get() + node.duration.get()
- audio_range = range(int(start_audio), int(end_audio))
+ duration = cmds.getAttr("{}.duration".format(node))
+ start_audio = cmds.getAttr("{}.offset".format(node))
+ end_audio = start_audio + duration
- if bool(set(frame_range).intersection(audio_range)):
- audio_nodes.append(node)
+ if start_audio <= end_frame and end_audio > start_frame:
+ audio_data.append(get_audio_node_data(node))
- instance.data["audio"] = []
- for node in audio_nodes:
- instance.data["audio"].append(
- {
- "offset": node.offset.get(),
- "filename": node.filename.get()
- }
- )
+ instance.data["audio"] = audio_data
# Collect focal length.
attr = camera + ".focalLength"
- focal_length = None
if get_attribute_input(attr):
start = instance.data["frameStart"]
end = instance.data["frameEnd"] + 1
diff --git a/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py
index 924ac58c40..14bcc71da6 100644
--- a/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py
+++ b/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py
@@ -1,12 +1,12 @@
import os
+from collections import defaultdict
+import json
from maya import cmds
import arnold
from openpype.pipeline import publish
-from openpype.hosts.maya.api.lib import (
- maintained_selection, attribute_values, delete_after
-)
+from openpype.hosts.maya.api import lib
class ExtractArnoldSceneSource(publish.Extractor):
@@ -19,8 +19,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
def process(self, instance):
staging_dir = self.staging_dir(instance)
- filename = "{}.ass".format(instance.name)
- file_path = os.path.join(staging_dir, filename)
+ file_path = os.path.join(staging_dir, "{}.ass".format(instance.name))
# Mask
mask = arnold.AI_NODE_ALL
@@ -71,8 +70,8 @@ class ExtractArnoldSceneSource(publish.Extractor):
"mask": mask
}
- filenames = self._extract(
- instance.data["setMembers"], attribute_data, kwargs
+ filenames, nodes_by_id = self._extract(
+ instance.data["contentMembers"], attribute_data, kwargs
)
if "representations" not in instance.data:
@@ -88,6 +87,19 @@ class ExtractArnoldSceneSource(publish.Extractor):
instance.data["representations"].append(representation)
+ json_path = os.path.join(staging_dir, "{}.json".format(instance.name))
+ with open(json_path, "w") as f:
+ json.dump(nodes_by_id, f)
+
+ representation = {
+ "name": "json",
+ "ext": "json",
+ "files": os.path.basename(json_path),
+ "stagingDir": staging_dir
+ }
+
+ instance.data["representations"].append(representation)
+
self.log.info(
"Extracted instance {} to: {}".format(instance.name, staging_dir)
)
@@ -97,7 +109,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
return
kwargs["filename"] = file_path.replace(".ass", "_proxy.ass")
- filenames = self._extract(
+ filenames, _ = self._extract(
instance.data["proxy"], attribute_data, kwargs
)
@@ -113,34 +125,60 @@ class ExtractArnoldSceneSource(publish.Extractor):
instance.data["representations"].append(representation)
def _extract(self, nodes, attribute_data, kwargs):
- self.log.info("Writing: " + kwargs["filename"])
+ self.log.info(
+ "Writing {} with:\n{}".format(kwargs["filename"], kwargs)
+ )
filenames = []
+ nodes_by_id = defaultdict(list)
# Duplicating nodes so they are direct children of the world. This
# makes the hierarchy of any exported ass file the same.
- with delete_after() as delete_bin:
+ with lib.delete_after() as delete_bin:
duplicate_nodes = []
for node in nodes:
+ # Only interested in transforms:
+ if cmds.nodeType(node) != "transform":
+ continue
+
+ # Only interested in transforms with shapes.
+ shapes = cmds.listRelatives(
+ node, shapes=True, noIntermediate=True
+ )
+ if not shapes:
+ continue
+
duplicate_transform = cmds.duplicate(node)[0]
- # Discard the children.
- shapes = cmds.listRelatives(duplicate_transform, shapes=True)
+ if cmds.listRelatives(duplicate_transform, parent=True):
+ duplicate_transform = cmds.parent(
+ duplicate_transform, world=True
+ )[0]
+
+ basename = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1]
+ duplicate_transform = cmds.rename(
+ duplicate_transform, basename
+ )
+
+ # Discard children nodes that are not shapes
+ shapes = cmds.listRelatives(
+ duplicate_transform, shapes=True, fullPath=True
+ )
children = cmds.listRelatives(
- duplicate_transform, children=True
+ duplicate_transform, children=True, fullPath=True
)
cmds.delete(set(children) - set(shapes))
- duplicate_transform = cmds.parent(
- duplicate_transform, world=True
- )[0]
-
- cmds.rename(duplicate_transform, node.split("|")[-1])
- duplicate_transform = "|" + node.split("|")[-1]
-
duplicate_nodes.append(duplicate_transform)
+ duplicate_nodes.extend(shapes)
delete_bin.append(duplicate_transform)
- with attribute_values(attribute_data):
- with maintained_selection():
+ # Copy cbId to mtoa_constant.
+ for node in duplicate_nodes:
+ # Converting Maya hierarchy separator "|" to Arnold
+ # separator "/".
+ nodes_by_id[lib.get_id(node)].append(node.replace("|", "/"))
+
+ with lib.attribute_values(attribute_data):
+ with lib.maintained_selection():
self.log.info(
"Writing: {}".format(duplicate_nodes)
)
@@ -157,4 +195,4 @@ class ExtractArnoldSceneSource(publish.Extractor):
self.log.info("Exported: {}".format(filenames))
- return filenames
+ return filenames, nodes_by_id
diff --git a/openpype/hosts/maya/plugins/publish/extract_gpu_cache.py b/openpype/hosts/maya/plugins/publish/extract_gpu_cache.py
new file mode 100644
index 0000000000..422f5ad019
--- /dev/null
+++ b/openpype/hosts/maya/plugins/publish/extract_gpu_cache.py
@@ -0,0 +1,65 @@
+import json
+
+from maya import cmds
+
+from openpype.pipeline import publish
+
+
+class ExtractGPUCache(publish.Extractor):
+ """Extract the content of the instance to a GPU cache file."""
+
+ label = "GPU Cache"
+ hosts = ["maya"]
+ families = ["model", "animation", "pointcache"]
+ step = 1.0
+ stepSave = 1
+ optimize = True
+ optimizationThreshold = 40000
+ optimizeAnimationsForMotionBlur = True
+ writeMaterials = True
+ useBaseTessellation = True
+
+ def process(self, instance):
+ cmds.loadPlugin("gpuCache", quiet=True)
+
+ staging_dir = self.staging_dir(instance)
+ filename = "{}_gpu_cache".format(instance.name)
+
+ # Write out GPU cache file.
+ kwargs = {
+ "directory": staging_dir,
+ "fileName": filename,
+ "saveMultipleFiles": False,
+ "simulationRate": self.step,
+ "sampleMultiplier": self.stepSave,
+ "optimize": self.optimize,
+ "optimizationThreshold": self.optimizationThreshold,
+ "optimizeAnimationsForMotionBlur": (
+ self.optimizeAnimationsForMotionBlur
+ ),
+ "writeMaterials": self.writeMaterials,
+ "useBaseTessellation": self.useBaseTessellation
+ }
+ self.log.debug(
+ "Extract {} with:\n{}".format(
+ instance[:], json.dumps(kwargs, indent=4, sort_keys=True)
+ )
+ )
+ cmds.gpuCache(instance[:], **kwargs)
+
+ if "representations" not in instance.data:
+ instance.data["representations"] = []
+
+ representation = {
+ "name": "gpu_cache",
+ "ext": "abc",
+ "files": filename + ".abc",
+ "stagingDir": staging_dir,
+ "outputName": "gpu_cache"
+ }
+
+ instance.data["representations"].append(representation)
+
+ self.log.info(
+ "Extracted instance {} to: {}".format(instance.name, staging_dir)
+ )
diff --git a/openpype/hosts/maya/plugins/publish/extract_playblast.py b/openpype/hosts/maya/plugins/publish/extract_playblast.py
index 27bd7dc8ea..0f3425a1de 100644
--- a/openpype/hosts/maya/plugins/publish/extract_playblast.py
+++ b/openpype/hosts/maya/plugins/publish/extract_playblast.py
@@ -9,7 +9,6 @@ from openpype.pipeline import publish
from openpype.hosts.maya.api import lib
from maya import cmds
-import pymel.core as pm
@contextlib.contextmanager
@@ -110,11 +109,11 @@ class ExtractPlayblast(publish.Extractor):
preset["filename"] = path
preset["overwrite"] = True
- pm.refresh(f=True)
+ cmds.refresh(force=True)
- refreshFrameInt = int(pm.playbackOptions(q=True, minTime=True))
- pm.currentTime(refreshFrameInt - 1, edit=True)
- pm.currentTime(refreshFrameInt, edit=True)
+ refreshFrameInt = int(cmds.playbackOptions(q=True, minTime=True))
+ cmds.currentTime(refreshFrameInt - 1, edit=True)
+ cmds.currentTime(refreshFrameInt, edit=True)
# Override transparency if requested.
transparency = instance.data.get("transparency", 0)
@@ -226,7 +225,7 @@ class ExtractPlayblast(publish.Extractor):
tags.append("delete")
# Add camera node name to representation data
- camera_node_name = pm.ls(camera)[0].getTransform().name()
+ camera_node_name = cmds.listRelatives(camera, parent=True)[0]
collected_files = list(frame_collection)
# single frame file shouldn't be in list, only as a string
diff --git a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py
index f2d084b828..b4ed8dce4c 100644
--- a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py
+++ b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py
@@ -8,7 +8,6 @@ from openpype.pipeline import publish
from openpype.hosts.maya.api import lib
from maya import cmds
-import pymel.core as pm
class ExtractThumbnail(publish.Extractor):
@@ -99,11 +98,11 @@ class ExtractThumbnail(publish.Extractor):
preset["filename"] = path
preset["overwrite"] = True
- pm.refresh(f=True)
+ cmds.refresh(force=True)
- refreshFrameInt = int(pm.playbackOptions(q=True, minTime=True))
- pm.currentTime(refreshFrameInt - 1, edit=True)
- pm.currentTime(refreshFrameInt, edit=True)
+ refreshFrameInt = int(cmds.playbackOptions(q=True, minTime=True))
+ cmds.currentTime(refreshFrameInt - 1, edit=True)
+ cmds.currentTime(refreshFrameInt, edit=True)
# Override transparency if requested.
transparency = instance.data.get("transparency", 0)
diff --git a/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py b/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py
index 9b10d2737d..df16c6c357 100644
--- a/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py
+++ b/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py
@@ -30,9 +30,7 @@ class ExtractVRayProxy(publish.Extractor):
# non-animated subsets
keys = ["frameStart", "frameEnd",
"handleStart", "handleEnd",
- "frameStartHandle", "frameEndHandle",
- # Backwards compatibility
- "handles"]
+ "frameStartHandle", "frameEndHandle"]
for key in keys:
instance.data.pop(key, None)
diff --git a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py
index 3b0ffd52d7..7055dc145e 100644
--- a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py
+++ b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py
@@ -1,5 +1,3 @@
-import maya.cmds as cmds
-
import pyblish.api
from openpype.pipeline.publish import (
ValidateContentsOrder, PublishValidationError
@@ -22,10 +20,11 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
families = ["ass"]
label = "Validate Arnold Scene Source"
- def _get_nodes_data(self, nodes):
+ def _get_nodes_by_name(self, nodes):
ungrouped_nodes = []
nodes_by_name = {}
parents = []
+ same_named_nodes = {}
for node in nodes:
node_split = node.split("|")
if len(node_split) == 2:
@@ -35,21 +34,38 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
if parent:
parents.append(parent)
- nodes_by_name[node_split[-1]] = node
- for shape in cmds.listRelatives(node, shapes=True):
- nodes_by_name[shape.split("|")[-1]] = shape
+ node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1]
+
+ # Check for same same nodes, which can happen in different
+ # hierarchies.
+ if node_name in nodes_by_name:
+ try:
+ same_named_nodes[node_name].append(node)
+ except KeyError:
+ same_named_nodes[node_name] = [
+ nodes_by_name[node_name], node
+ ]
+
+ nodes_by_name[node_name] = node
+
+ if same_named_nodes:
+ message = "Found nodes with the same name:"
+ for name, nodes in same_named_nodes.items():
+ message += "\n\n\"{}\":\n{}".format(name, "\n".join(nodes))
+
+ raise PublishValidationError(message)
return ungrouped_nodes, nodes_by_name, parents
def process(self, instance):
ungrouped_nodes = []
- nodes, content_nodes_by_name, content_parents = self._get_nodes_data(
- instance.data["setMembers"]
+ nodes, content_nodes_by_name, content_parents = (
+ self._get_nodes_by_name(instance.data["contentMembers"])
)
ungrouped_nodes.extend(nodes)
- nodes, proxy_nodes_by_name, proxy_parents = self._get_nodes_data(
+ nodes, proxy_nodes_by_name, proxy_parents = self._get_nodes_by_name(
instance.data.get("proxy", [])
)
ungrouped_nodes.extend(nodes)
@@ -66,11 +82,11 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
return
# Validate for content and proxy nodes amount being the same.
- if len(instance.data["setMembers"]) != len(instance.data["proxy"]):
+ if len(instance.data["contentMembers"]) != len(instance.data["proxy"]):
raise PublishValidationError(
"Amount of content nodes ({}) and proxy nodes ({}) needs to "
"be the same.".format(
- len(instance.data["setMembers"]),
+ len(instance.data["contentMembers"]),
len(instance.data["proxy"])
)
)
diff --git a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py
new file mode 100644
index 0000000000..e27723e104
--- /dev/null
+++ b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py
@@ -0,0 +1,74 @@
+import pyblish.api
+from openpype.hosts.maya.api import lib
+from openpype.pipeline.publish import (
+ ValidateContentsOrder, PublishValidationError, RepairAction
+)
+
+
+class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin):
+ """Validate Arnold Scene Source Cbid.
+
+ It is required for the proxy and content nodes to share the same cbid.
+ """
+
+ order = ValidateContentsOrder
+ hosts = ["maya"]
+ families = ["ass"]
+ label = "Validate Arnold Scene Source CBID"
+ actions = [RepairAction]
+
+ @staticmethod
+ def _get_nodes_by_name(nodes):
+ nodes_by_name = {}
+ for node in nodes:
+ node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1]
+ nodes_by_name[node_name] = node
+
+ return nodes_by_name
+
+ @classmethod
+ def get_invalid_couples(cls, instance):
+ content_nodes_by_name = cls._get_nodes_by_name(
+ instance.data["contentMembers"]
+ )
+ proxy_nodes_by_name = cls._get_nodes_by_name(
+ instance.data.get("proxy", [])
+ )
+
+ invalid_couples = []
+ for content_name, content_node in content_nodes_by_name.items():
+ proxy_node = proxy_nodes_by_name.get(content_name, None)
+
+ if not proxy_node:
+ cls.log.debug(
+ "Content node '{}' has no matching proxy node.".format(
+ content_node
+ )
+ )
+ continue
+
+ content_id = lib.get_id(content_node)
+ proxy_id = lib.get_id(proxy_node)
+ if content_id != proxy_id:
+ invalid_couples.append((content_node, proxy_node))
+
+ return invalid_couples
+
+ def process(self, instance):
+ # Proxy validation.
+ if not instance.data.get("proxy", []):
+ return
+
+ # Validate for proxy nodes sharing the same cbId as content nodes.
+ invalid_couples = self.get_invalid_couples(instance)
+ if invalid_couples:
+ raise PublishValidationError(
+ "Found proxy nodes with mismatching cbid:\n{}".format(
+ invalid_couples
+ )
+ )
+
+ @classmethod
+ def repair(cls, instance):
+ for content_node, proxy_node in cls.get_invalid_couples(cls, instance):
+ lib.set_id(proxy_node, lib.get_id(content_node), overwrite=False)
diff --git a/openpype/hosts/maya/plugins/publish/validate_attributes.py b/openpype/hosts/maya/plugins/publish/validate_attributes.py
index 7a1f0cf086..6ca9afb9a4 100644
--- a/openpype/hosts/maya/plugins/publish/validate_attributes.py
+++ b/openpype/hosts/maya/plugins/publish/validate_attributes.py
@@ -1,13 +1,17 @@
-import pymel.core as pm
+from collections import defaultdict
+
+from maya import cmds
import pyblish.api
+
+from openpype.hosts.maya.api.lib import set_attribute
from openpype.pipeline.publish import (
RepairContextAction,
ValidateContentsOrder,
)
-class ValidateAttributes(pyblish.api.ContextPlugin):
+class ValidateAttributes(pyblish.api.InstancePlugin):
"""Ensure attributes are consistent.
Attributes to validate and their values comes from the
@@ -27,86 +31,80 @@ class ValidateAttributes(pyblish.api.ContextPlugin):
attributes = None
- def process(self, context):
+ def process(self, instance):
# Check for preset existence.
-
if not self.attributes:
return
- invalid = self.get_invalid(context, compute=True)
+ invalid = self.get_invalid(instance, compute=True)
if invalid:
raise RuntimeError(
"Found attributes with invalid values: {}".format(invalid)
)
@classmethod
- def get_invalid(cls, context, compute=False):
- invalid = context.data.get("invalid_attributes", [])
+ def get_invalid(cls, instance, compute=False):
if compute:
- invalid = cls.get_invalid_attributes(context)
-
- return invalid
+ return cls.get_invalid_attributes(instance)
+ else:
+ return instance.data.get("invalid_attributes", [])
@classmethod
- def get_invalid_attributes(cls, context):
+ def get_invalid_attributes(cls, instance):
invalid_attributes = []
- for instance in context:
- # Filter publisable instances.
- if not instance.data["publish"]:
+
+ # Filter families.
+ families = [instance.data["family"]]
+ families += instance.data.get("families", [])
+ families = set(families) & set(cls.attributes.keys())
+ if not families:
+ return []
+
+ # Get all attributes to validate.
+ attributes = defaultdict(dict)
+ for family in families:
+ if family not in cls.attributes:
+ # No attributes to validate for family
continue
- # Filter families.
- families = [instance.data["family"]]
- families += instance.data.get("families", [])
- families = list(set(families) & set(cls.attributes.keys()))
- if not families:
+ for preset_attr, preset_value in cls.attributes[family].items():
+ node_name, attribute_name = preset_attr.split(".", 1)
+ attributes[node_name][attribute_name] = preset_value
+
+ if not attributes:
+ return []
+
+ # Get invalid attributes.
+ nodes = cmds.ls(long=True)
+ for node in nodes:
+ node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1]
+ if node_name not in attributes:
continue
- # Get all attributes to validate.
- attributes = {}
- for family in families:
- for preset in cls.attributes[family]:
- [node_name, attribute_name] = preset.split(".")
- try:
- attributes[node_name].update(
- {attribute_name: cls.attributes[family][preset]}
- )
- except KeyError:
- attributes.update({
- node_name: {
- attribute_name: cls.attributes[family][preset]
- }
- })
+ for attr_name, expected in attributes.items():
- # Get invalid attributes.
- nodes = pm.ls()
- for node in nodes:
- name = node.name(stripNamespace=True)
- if name not in attributes.keys():
+ # Skip if attribute does not exist
+ if not cmds.attributeQuery(attr_name, node=node, exists=True):
continue
- presets_to_validate = attributes[name]
- for attribute in node.listAttr():
- names = [attribute.shortName(), attribute.longName()]
- attribute_name = list(
- set(names) & set(presets_to_validate.keys())
+ plug = "{}.{}".format(node, attr_name)
+ value = cmds.getAttr(plug)
+ if value != expected:
+ invalid_attributes.append(
+ {
+ "attribute": plug,
+ "expected": expected,
+ "current": value
+ }
)
- if attribute_name:
- expected = presets_to_validate[attribute_name[0]]
- if attribute.get() != expected:
- invalid_attributes.append(
- {
- "attribute": attribute,
- "expected": expected,
- "current": attribute.get()
- }
- )
- context.data["invalid_attributes"] = invalid_attributes
+ instance.data["invalid_attributes"] = invalid_attributes
return invalid_attributes
@classmethod
def repair(cls, instance):
invalid = cls.get_invalid(instance)
for data in invalid:
- data["attribute"].set(data["expected"])
+ node, attr = data["attribute"].split(".", 1)
+ value = data["expected"]
+ set_attribute(node=node, attribute=attr, value=value)
diff --git a/openpype/hosts/maya/plugins/publish/validate_frame_range.py b/openpype/hosts/maya/plugins/publish/validate_frame_range.py
index 59b06874b3..ccb351c880 100644
--- a/openpype/hosts/maya/plugins/publish/validate_frame_range.py
+++ b/openpype/hosts/maya/plugins/publish/validate_frame_range.py
@@ -4,6 +4,7 @@ from maya import cmds
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
+ PublishValidationError
)
from openpype.hosts.maya.api.lib_rendersetup import (
get_attr_overrides,
@@ -49,7 +50,6 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
frame_start_handle = int(context.data.get("frameStartHandle"))
frame_end_handle = int(context.data.get("frameEndHandle"))
- handles = int(context.data.get("handles"))
handle_start = int(context.data.get("handleStart"))
handle_end = int(context.data.get("handleEnd"))
frame_start = int(context.data.get("frameStart"))
@@ -66,8 +66,6 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
assert frame_start_handle <= frame_end_handle, (
"start frame is lower then end frame")
- assert handles >= 0, ("handles cannot have negative values")
-
# compare with data on instance
errors = []
if [ef for ef in self.exclude_families
diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py b/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py
index fa4c66952c..a580a1c787 100644
--- a/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py
+++ b/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py
@@ -1,8 +1,14 @@
-import pymel.core as pc
from maya import cmds
import pyblish.api
+
import openpype.hosts.maya.api.action
-from openpype.hosts.maya.api.lib import maintained_selection
+from openpype.hosts.maya.api.lib import (
+ maintained_selection,
+ delete_after,
+ undo_chunk,
+ get_attribute,
+ set_attribute
+)
from openpype.pipeline.publish import (
RepairAction,
ValidateMeshOrder,
@@ -31,60 +37,68 @@ class ValidateMeshArnoldAttributes(pyblish.api.InstancePlugin):
else:
active = False
+ @classmethod
+ def get_default_attributes(cls):
+ # Get default arnold attribute values for mesh type.
+ defaults = {}
+ with delete_after() as tmp:
+ transform = cmds.createNode("transform")
+ tmp.append(transform)
+
+ mesh = cmds.createNode("mesh", parent=transform)
+ for attr in cmds.listAttr(mesh, string="ai*"):
+ plug = "{}.{}".format(mesh, attr)
+ try:
+ defaults[attr] = get_attribute(plug)
+ except RuntimeError:
+ cls.log.debug("Ignoring arnold attribute: {}".format(attr))
+
+ return defaults
+
@classmethod
def get_invalid_attributes(cls, instance, compute=False):
invalid = []
if compute:
- # Get default arnold attributes.
- temp_transform = pc.polyCube()[0]
- for shape in pc.ls(instance, type="mesh"):
- for attr in temp_transform.getShape().listAttr():
- if not attr.attrName().startswith("ai"):
- continue
+ meshes = cmds.ls(instance, type="mesh", long=True)
+ if not meshes:
+ return []
- target_attr = pc.PyNode(
- "{}.{}".format(shape.name(), attr.attrName())
- )
- if attr.get() != target_attr.get():
- invalid.append(target_attr)
-
- pc.delete(temp_transform)
+ # Compare the values against the defaults
+ defaults = cls.get_default_attributes()
+ for mesh in meshes:
+ for attr_name, default_value in defaults.items():
+ plug = "{}.{}".format(mesh, attr_name)
+ if get_attribute(plug) != default_value:
+ invalid.append(plug)
instance.data["nondefault_arnold_attributes"] = invalid
- else:
- invalid.extend(instance.data["nondefault_arnold_attributes"])
- return invalid
+ return instance.data.get("nondefault_arnold_attributes", [])
@classmethod
def get_invalid(cls, instance):
- invalid = []
-
- for attr in cls.get_invalid_attributes(instance, compute=False):
- invalid.append(attr.node().name())
-
- return invalid
+ invalid_attrs = cls.get_invalid_attributes(instance, compute=False)
+ invalid_nodes = set(attr.split(".", 1)[0] for attr in invalid_attrs)
+ return sorted(invalid_nodes)
@classmethod
def repair(cls, instance):
with maintained_selection():
- with pc.UndoChunk():
- temp_transform = pc.polyCube()[0]
-
+ with undo_chunk():
+ defaults = cls.get_default_attributes()
attributes = cls.get_invalid_attributes(
instance, compute=False
)
for attr in attributes:
- source = pc.PyNode(
- "{}.{}".format(
- temp_transform.getShape(), attr.attrName()
- )
+ node, attr_name = attr.split(".", 1)
+ value = defaults[attr_name]
+ set_attribute(
+ node=node,
+ attribute=attr_name,
+ value=value
)
- attr.set(source.get())
-
- pc.delete(temp_transform)
def process(self, instance):
diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py b/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py
index be23f61ec5..74269cc506 100644
--- a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py
+++ b/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py
@@ -1,10 +1,11 @@
-import pyblish.api
-import openpype.hosts.maya.api.action
import math
-import maya.api.OpenMaya as om
-import pymel.core as pm
-
from six.moves import xrange
+
+from maya import cmds
+import maya.api.OpenMaya as om
+import pyblish.api
+
+import openpype.hosts.maya.api.action
from openpype.pipeline.publish import ValidateMeshOrder
@@ -185,8 +186,7 @@ class GetOverlappingUVs(object):
center, radius = self._createBoundingCircle(meshfn)
for i in xrange(meshfn.numPolygons): # noqa: F821
- rayb1, face1Orig, face1Vec = self._createRayGivenFace(
- meshfn, i)
+ rayb1, face1Orig, face1Vec = self._createRayGivenFace(meshfn, i)
if not rayb1:
continue
cui = center[2*i]
@@ -206,8 +206,8 @@ class GetOverlappingUVs(object):
if (dsqr >= (ri + rj) * (ri + rj)):
continue
- rayb2, face2Orig, face2Vec = self._createRayGivenFace(
- meshfn, j)
+ rayb2, face2Orig, face2Vec = self._createRayGivenFace(meshfn,
+ j)
if not rayb2:
continue
# Exclude the degenerate face
@@ -240,37 +240,45 @@ class ValidateMeshHasOverlappingUVs(pyblish.api.InstancePlugin):
optional = True
@classmethod
- def _get_overlapping_uvs(cls, node):
- """ Check if mesh has overlapping UVs.
+ def _get_overlapping_uvs(cls, mesh):
+ """Return overlapping UVs of mesh.
+
+ Args:
+ mesh (str): Mesh node name
+
+ Returns:
+ list: Overlapping uvs for the input mesh in all uv sets.
- :param node: node to check
- :type node: str
- :returns: True is has overlapping UVs, False otherwise
- :rtype: bool
"""
ovl = GetOverlappingUVs()
+ # Store original uv set
+ original_current_uv_set = cmds.polyUVSet(mesh,
+ query=True,
+ currentUVSet=True)
+
overlapping_faces = []
- for i, uv in enumerate(pm.polyUVSet(node, q=1, auv=1)):
- pm.polyUVSet(node, cuv=1, uvSet=uv)
- overlapping_faces.extend(ovl._getOverlapUVFaces(str(node)))
+ for uv_set in cmds.polyUVSet(mesh, query=True, allUVSets=True):
+ cmds.polyUVSet(mesh, currentUVSet=True, uvSet=uv_set)
+ overlapping_faces.extend(ovl._getOverlapUVFaces(mesh))
+
+ # Restore original uv set
+ cmds.polyUVSet(mesh, currentUVSet=True, uvSet=original_current_uv_set)
return overlapping_faces
@classmethod
def get_invalid(cls, instance, compute=False):
- invalid = []
+
if compute:
- instance.data["overlapping_faces"] = []
- for node in pm.ls(instance, type="mesh"):
+ invalid = []
+ for node in cmds.ls(instance, type="mesh"):
faces = cls._get_overlapping_uvs(node)
invalid.extend(faces)
- # Store values for later.
- instance.data["overlapping_faces"].extend(faces)
- else:
- invalid.extend(instance.data["overlapping_faces"])
- return invalid
+ instance.data["overlapping_faces"] = invalid
+
+ return instance.data.get("overlapping_faces", [])
def process(self, instance):
diff --git a/openpype/hosts/maya/plugins/publish/validate_no_namespace.py b/openpype/hosts/maya/plugins/publish/validate_no_namespace.py
index e91b99359d..0ff03f9165 100644
--- a/openpype/hosts/maya/plugins/publish/validate_no_namespace.py
+++ b/openpype/hosts/maya/plugins/publish/validate_no_namespace.py
@@ -1,4 +1,3 @@
-import pymel.core as pm
import maya.cmds as cmds
import pyblish.api
@@ -12,7 +11,7 @@ import openpype.hosts.maya.api.action
def get_namespace(node_name):
# ensure only node's name (not parent path)
- node_name = node_name.rsplit("|")[-1]
+ node_name = node_name.rsplit("|", 1)[-1]
# ensure only namespace
return node_name.rpartition(":")[0]
@@ -45,13 +44,11 @@ class ValidateNoNamespace(pyblish.api.InstancePlugin):
invalid = cls.get_invalid(instance)
- # Get nodes with pymel since we'll be renaming them
- # Since we don't want to keep checking the hierarchy
- # or full paths
- nodes = pm.ls(invalid)
+ # Iterate over the nodes by long to short names to iterate the lowest
+ # in hierarchy nodes first. This way we avoid having renamed parents
+ # before renaming children nodes
+ for node in sorted(invalid, key=len, reverse=True):
- for node in nodes:
- namespace = node.namespace()
- if namespace:
- name = node.nodeName()
- node.rename(name[len(namespace):])
+ node_name = node.rsplit("|", 1)[-1]
+ node_name_without_namespace = node_name.rsplit(":")[-1]
+ cmds.rename(node, node_name_without_namespace)
diff --git a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py
index 53f340cd2c..ebf7b3138d 100644
--- a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py
+++ b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py
@@ -275,15 +275,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
# go through definitions and test if such node.attribute exists.
# if so, compare its value from the one required.
for attribute, data in cls.get_nodes(instance, renderer).items():
- # Validate the settings has values.
- if not data["values"]:
- cls.log.error(
- "Settings for {}.{} is missing values.".format(
- node, attribute
- )
- )
- continue
-
for node in data["nodes"]:
try:
render_value = cmds.getAttr(
@@ -316,6 +307,15 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
)
result = {}
for attr, values in OrderedDict(validation_settings).items():
+ values = [convert_to_int_or_float(v) for v in values if v]
+
+ # Validate the settings has values.
+ if not values:
+ cls.log.error(
+ "Settings for {} is missing values.".format(attr)
+ )
+ continue
+
cls.log.debug("{}: {}".format(attr, values))
if "." not in attr:
cls.log.warning(
@@ -324,8 +324,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
)
continue
- values = [convert_to_int_or_float(v) for v in values]
-
node_type, attribute_name = attr.split(".", 1)
# first get node of that type
diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py b/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py
index f3ed1a36ef..499bfd4e37 100644
--- a/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py
+++ b/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py
@@ -1,14 +1,22 @@
-import pymel.core as pc
+from collections import defaultdict
+
+from maya import cmds
import pyblish.api
import openpype.hosts.maya.api.action
+from openpype.hosts.maya.api.lib import get_id, set_id
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
)
+def get_basename(node):
+ """Return node short name without namespace"""
+ return node.rsplit("|", 1)[-1].rsplit(":", 1)[-1]
+
+
class ValidateRigOutputIds(pyblish.api.InstancePlugin):
"""Validate rig output ids.
@@ -30,43 +38,48 @@ class ValidateRigOutputIds(pyblish.api.InstancePlugin):
@classmethod
def get_invalid(cls, instance, compute=False):
- invalid = cls.get_invalid_matches(instance, compute=compute)
- return [x["node"].longName() for x in invalid]
+ invalid_matches = cls.get_invalid_matches(instance, compute=compute)
+ return list(invalid_matches.keys())
@classmethod
def get_invalid_matches(cls, instance, compute=False):
- invalid = []
+ invalid = {}
if compute:
out_set = next(x for x in instance if x.endswith("out_SET"))
- instance_nodes = pc.sets(out_set, query=True)
- instance_nodes.extend(
- [x.getShape() for x in instance_nodes if x.getShape()])
- scene_nodes = pc.ls(type="transform") + pc.ls(type="mesh")
+ instance_nodes = cmds.sets(out_set, query=True, nodesOnly=True)
+ instance_nodes = cmds.ls(instance_nodes, long=True)
+ for node in instance_nodes:
+ shapes = cmds.listRelatives(node, shapes=True, fullPath=True)
+ if shapes:
+ instance_nodes.extend(shapes)
+
+ scene_nodes = cmds.ls(type="transform") + cmds.ls(type="mesh")
scene_nodes = set(scene_nodes) - set(instance_nodes)
+ scene_nodes_by_basename = defaultdict(list)
+ for node in scene_nodes:
+ basename = get_basename(node)
+ scene_nodes_by_basename[basename].append(node)
+
for instance_node in instance_nodes:
- matches = []
- basename = instance_node.name(stripNamespace=True)
- for scene_node in scene_nodes:
- if scene_node.name(stripNamespace=True) == basename:
- matches.append(scene_node)
+ basename = get_basename(instance_node)
+ if basename not in scene_nodes_by_basename:
+ continue
- if matches:
- ids = [instance_node.cbId.get()]
- ids.extend([x.cbId.get() for x in matches])
- ids = set(ids)
+ matches = scene_nodes_by_basename[basename]
- if len(ids) > 1:
- cls.log.error(
- "\"{}\" id mismatch to: {}".format(
- instance_node.longName(), matches
- )
- )
- invalid.append(
- {"node": instance_node, "matches": matches}
+ ids = set(get_id(node) for node in matches)
+ ids.add(get_id(instance_node))
+
+ if len(ids) > 1:
+ cls.log.error(
+ "\"{}\" id mismatch to: {}".format(
+ instance_node.longName(), matches
)
+ )
+ invalid[instance_node] = matches
instance.data["mismatched_output_ids"] = invalid
else:
@@ -76,19 +89,21 @@ class ValidateRigOutputIds(pyblish.api.InstancePlugin):
@classmethod
def repair(cls, instance):
- invalid = cls.get_invalid_matches(instance)
+ invalid_matches = cls.get_invalid_matches(instance)
multiple_ids_match = []
- for data in invalid:
- ids = [x.cbId.get() for x in data["matches"]]
+ for instance_node, matches in invalid_matches.items():
+ ids = set(get_id(node) for node in matches)
# If there are multiple scene ids matched, and error needs to be
# raised for manual correction.
if len(ids) > 1:
- multiple_ids_match.append(data)
+ multiple_ids_match.append({"node": instance_node,
+ "matches": matches})
continue
- data["node"].cbId.set(ids[0])
+ id_to_set = next(iter(ids))
+ set_id(instance_node, id_to_set, overwrite=True)
if multiple_ids_match:
raise RuntimeError(
diff --git a/openpype/hosts/maya/tools/mayalookassigner/app.py b/openpype/hosts/maya/tools/mayalookassigner/app.py
index f9508657e5..2a8775fff6 100644
--- a/openpype/hosts/maya/tools/mayalookassigner/app.py
+++ b/openpype/hosts/maya/tools/mayalookassigner/app.py
@@ -24,6 +24,7 @@ from .commands import (
remove_unused_looks
)
from .vray_proxies import vrayproxy_assign_look
+from . import arnold_standin
module = sys.modules[__name__]
module.window = None
@@ -43,7 +44,7 @@ class MayaLookAssignerWindow(QtWidgets.QWidget):
filename = get_workfile()
self.setObjectName("lookManager")
- self.setWindowTitle("Look Manager 1.3.0 - [{}]".format(filename))
+ self.setWindowTitle("Look Manager 1.4.0 - [{}]".format(filename))
self.setWindowFlags(QtCore.Qt.Window)
self.setParent(parent)
@@ -240,18 +241,37 @@ class MayaLookAssignerWindow(QtWidgets.QWidget):
))
nodes = item["nodes"]
+ # Assign Vray Proxy look.
if cmds.pluginInfo('vrayformaya', query=True, loaded=True):
self.echo("Getting vray proxy nodes ...")
vray_proxies = set(cmds.ls(type="VRayProxy", long=True))
- if vray_proxies:
- for vp in vray_proxies:
- if vp in nodes:
- vrayproxy_assign_look(vp, subset_name)
+ for vp in vray_proxies:
+ if vp in nodes:
+ vrayproxy_assign_look(vp, subset_name)
- nodes = list(set(item["nodes"]).difference(vray_proxies))
+ nodes = list(set(item["nodes"]).difference(vray_proxies))
+ else:
+ self.echo(
+ "Could not assign to VRayProxy because vrayformaya plugin "
+ "is not loaded."
+ )
- # Assign look
+ # Assign Arnold Standin look.
+ if cmds.pluginInfo("mtoa", query=True, loaded=True):
+ arnold_standins = set(cmds.ls(type="aiStandIn", long=True))
+ for standin in arnold_standins:
+ if standin in nodes:
+ arnold_standin.assign_look(standin, subset_name)
+ else:
+ self.echo(
+ "Could not assign to aiStandIn because mtoa plugin is not "
+ "loaded."
+ )
+
+ nodes = list(set(item["nodes"]).difference(arnold_standins))
+
+ # Assign look
if nodes:
assign_look_by_version(nodes, version_id=version["_id"])
diff --git a/openpype/hosts/maya/tools/mayalookassigner/arnold_standin.py b/openpype/hosts/maya/tools/mayalookassigner/arnold_standin.py
new file mode 100644
index 0000000000..7eeeb72553
--- /dev/null
+++ b/openpype/hosts/maya/tools/mayalookassigner/arnold_standin.py
@@ -0,0 +1,247 @@
+import os
+import json
+from collections import defaultdict
+import logging
+
+from maya import cmds
+
+from openpype.pipeline import legacy_io
+from openpype.client import get_last_version_by_subset_name
+from openpype.hosts.maya import api
+from . import lib
+
+
+log = logging.getLogger(__name__)
+
+
+ATTRIBUTE_MAPPING = {
+ "primaryVisibility": "visibility", # Camera
+ "castsShadows": "visibility", # Shadow
+ "receiveShadows": "receive_shadows",
+ "aiSelfShadows": "self_shadows",
+ "aiOpaque": "opaque",
+ "aiMatte": "matte",
+ "aiVisibleInDiffuseTransmission": "visibility",
+ "aiVisibleInSpecularTransmission": "visibility",
+ "aiVisibleInVolume": "visibility",
+ "aiVisibleInDiffuseReflection": "visibility",
+ "aiVisibleInSpecularReflection": "visibility",
+ "aiSubdivUvSmoothing": "subdiv_uv_smoothing",
+ "aiDispHeight": "disp_height",
+ "aiDispPadding": "disp_padding",
+ "aiDispZeroValue": "disp_zero_value",
+ "aiStepSize": "step_size",
+ "aiVolumePadding": "volume_padding",
+ "aiSubdivType": "subdiv_type",
+ "aiSubdivIterations": "subdiv_iterations"
+}
+
+
+def calculate_visibility_mask(attributes):
+ # https://arnoldsupport.com/2018/11/21/backdoor-setting-visibility/
+ mapping = {
+ "primaryVisibility": 1, # Camera
+ "castsShadows": 2, # Shadow
+ "aiVisibleInDiffuseTransmission": 4,
+ "aiVisibleInSpecularTransmission": 8,
+ "aiVisibleInVolume": 16,
+ "aiVisibleInDiffuseReflection": 32,
+ "aiVisibleInSpecularReflection": 64
+ }
+ mask = 255
+ for attr, value in mapping.items():
+ if attributes.get(attr, True):
+ continue
+
+ mask -= value
+
+ return mask
+
+
+def get_nodes_by_id(standin):
+ """Get node id from aiStandIn via json sidecar.
+
+ Args:
+ standin (string): aiStandIn node.
+
+ Returns:
+ (dict): Dictionary with node full name/path and id.
+ """
+ path = cmds.getAttr(standin + ".dso")
+ json_path = None
+ for f in os.listdir(os.path.dirname(path)):
+ if f.endswith(".json"):
+ json_path = os.path.join(os.path.dirname(path), f)
+ break
+
+ if not json_path:
+ log.warning("Could not find json file for {}.".format(standin))
+ return {}
+
+ with open(json_path, "r") as f:
+ return json.load(f)
+
+
+def shading_engine_assignments(shading_engine, attribute, nodes, assignments):
+ """Full assignments with shader or disp_map.
+
+ Args:
+ shading_engine (string): Shading engine for material.
+ attribute (string): "surfaceShader" or "displacementShader"
+ nodes: (list): Nodes paths relative to aiStandIn.
+ assignments (dict): Assignments by nodes.
+ """
+ shader_inputs = cmds.listConnections(
+ shading_engine + "." + attribute, source=True
+ )
+ if not shader_inputs:
+ log.info(
+ "Shading engine \"{}\" missing input \"{}\"".format(
+ shading_engine, attribute
+ )
+ )
+ return
+
+ # Strip off component assignments
+ for i, node in enumerate(nodes):
+ if "." in node:
+ log.warning(
+ "Converting face assignment to full object assignment. This "
+ "conversion can be lossy: {}".format(node)
+ )
+ nodes[i] = node.split(".")[0]
+
+ shader_type = "shader" if attribute == "surfaceShader" else "disp_map"
+ assignment = "{}='{}'".format(shader_type, shader_inputs[0])
+ for node in nodes:
+ assignments[node].append(assignment)
+
+
+def assign_look(standin, subset):
+ log.info("Assigning {} to {}.".format(subset, standin))
+
+ nodes_by_id = get_nodes_by_id(standin)
+
+ # Group by asset id so we run over the look per asset
+ node_ids_by_asset_id = defaultdict(set)
+ for node_id in nodes_by_id:
+ asset_id = node_id.split(":", 1)[0]
+ node_ids_by_asset_id[asset_id].add(node_id)
+
+ project_name = legacy_io.active_project()
+ for asset_id, node_ids in node_ids_by_asset_id.items():
+
+ # Get latest look version
+ version = get_last_version_by_subset_name(
+ project_name,
+ subset_name=subset,
+ asset_id=asset_id,
+ fields=["_id"]
+ )
+ if not version:
+ log.info("Didn't find last version for subset name {}".format(
+ subset
+ ))
+ continue
+
+ relationships = lib.get_look_relationships(version["_id"])
+ shader_nodes, container_node = lib.load_look(version["_id"])
+ namespace = shader_nodes[0].split(":")[0]
+
+ # Get only the node ids and paths related to this asset
+ # And get the shader edits the look supplies
+ asset_nodes_by_id = {
+ node_id: nodes_by_id[node_id] for node_id in node_ids
+ }
+ edits = list(
+ api.lib.iter_shader_edits(
+ relationships, shader_nodes, asset_nodes_by_id
+ )
+ )
+
+ # Create assignments
+ node_assignments = {}
+ for edit in edits:
+ for node in edit["nodes"]:
+ if node not in node_assignments:
+ node_assignments[node] = []
+
+ if edit["action"] == "assign":
+ if not cmds.ls(edit["shader"], type="shadingEngine"):
+ log.info("Skipping non-shader: %s" % edit["shader"])
+ continue
+
+ shading_engine_assignments(
+ shading_engine=edit["shader"],
+ attribute="surfaceShader",
+ nodes=edit["nodes"],
+ assignments=node_assignments
+ )
+ shading_engine_assignments(
+ shading_engine=edit["shader"],
+ attribute="displacementShader",
+ nodes=edit["nodes"],
+ assignments=node_assignments
+ )
+
+ if edit["action"] == "setattr":
+ visibility = False
+ for attr, value in edit["attributes"].items():
+ if attr not in ATTRIBUTE_MAPPING:
+ log.warning(
+ "Skipping setting attribute {} on {} because it is"
+ " not recognized.".format(attr, edit["nodes"])
+ )
+ continue
+
+ if isinstance(value, str):
+ value = "'{}'".format(value)
+
+ if ATTRIBUTE_MAPPING[attr] == "visibility":
+ visibility = True
+ continue
+
+ assignment = "{}={}".format(ATTRIBUTE_MAPPING[attr], value)
+
+ for node in edit["nodes"]:
+ node_assignments[node].append(assignment)
+
+ if visibility:
+ mask = calculate_visibility_mask(edit["attributes"])
+ assignment = "visibility={}".format(mask)
+
+ for node in edit["nodes"]:
+ node_assignments[node].append(assignment)
+
+ # Assign shader
+ # Clear all current shader assignments
+ plug = standin + ".operators"
+ num = cmds.getAttr(plug, size=True)
+ for i in reversed(range(num)):
+ cmds.removeMultiInstance("{}[{}]".format(plug, i), b=True)
+
+ # Create new assignment overrides
+ index = 0
+ for node, assignments in node_assignments.items():
+ if not assignments:
+ continue
+
+ with api.lib.maintained_selection():
+ operator = cmds.createNode("aiSetParameter")
+ operator = cmds.rename(operator, namespace + ":" + operator)
+
+ cmds.setAttr(operator + ".selection", node, type="string")
+ for i, assignment in enumerate(assignments):
+ cmds.setAttr(
+ "{}.assignment[{}]".format(operator, i),
+ assignment,
+ type="string"
+ )
+
+ cmds.connectAttr(
+ operator + ".out", "{}[{}]".format(plug, index)
+ )
+
+ index += 1
+
+ cmds.sets(operator, edit=True, addElement=container_node)
diff --git a/openpype/hosts/maya/tools/mayalookassigner/commands.py b/openpype/hosts/maya/tools/mayalookassigner/commands.py
index 3d9746511d..c5e6c973cf 100644
--- a/openpype/hosts/maya/tools/mayalookassigner/commands.py
+++ b/openpype/hosts/maya/tools/mayalookassigner/commands.py
@@ -13,6 +13,7 @@ from openpype.pipeline import (
from openpype.hosts.maya.api import lib
from .vray_proxies import get_alembic_ids_cache
+from . import arnold_standin
log = logging.getLogger(__name__)
@@ -44,33 +45,11 @@ def get_namespace_from_node(node):
return parts[0] if len(parts) > 1 else u":"
-def list_descendents(nodes):
- """Include full descendant hierarchy of given nodes.
-
- This is a workaround to cmds.listRelatives(allDescendents=True) because
- this way correctly keeps children instance paths (see Maya documentation)
-
- This fixes LKD-26: assignments not working as expected on instanced shapes.
-
- Return:
- list: List of children descendents of nodes
-
- """
- result = []
- while True:
- nodes = cmds.listRelatives(nodes,
- fullPath=True)
- if nodes:
- result.extend(nodes)
- else:
- return result
-
-
def get_selected_nodes():
"""Get information from current selection"""
selection = cmds.ls(selection=True, long=True)
- hierarchy = list_descendents(selection)
+ hierarchy = lib.get_all_children(selection)
return list(set(selection + hierarchy))
@@ -105,10 +84,12 @@ def create_asset_id_hash(nodes):
path = cmds.getAttr("{}.fileName".format(node))
ids = get_alembic_ids_cache(path)
for k, _ in ids.items():
- pid = k.split(":")[0]
- if node not in node_id_hash[pid]:
- node_id_hash[pid].append(node)
-
+ id = k.split(":")[0]
+ node_id_hash[id].append(node)
+ elif cmds.nodeType(node) == "aiStandIn":
+ for id, _ in arnold_standin.get_nodes_by_id(node).items():
+ id = id.split(":")[0]
+ node_id_hash[id].append(node)
else:
value = lib.get_id(node)
if value is None:
diff --git a/openpype/hosts/maya/tools/mayalookassigner/lib.py b/openpype/hosts/maya/tools/mayalookassigner/lib.py
new file mode 100644
index 0000000000..fddaf6112d
--- /dev/null
+++ b/openpype/hosts/maya/tools/mayalookassigner/lib.py
@@ -0,0 +1,87 @@
+import json
+import logging
+
+from openpype.pipeline import (
+ legacy_io,
+ get_representation_path,
+ registered_host,
+ discover_loader_plugins,
+ loaders_from_representation,
+ load_container
+)
+from openpype.client import get_representation_by_name
+from openpype.hosts.maya.api import lib
+
+
+log = logging.getLogger(__name__)
+
+
+def get_look_relationships(version_id):
+ # type: (str) -> dict
+ """Get relations for the look.
+
+ Args:
+ version_id (str): Parent version Id.
+
+ Returns:
+ dict: Dictionary of relations.
+ """
+
+ project_name = legacy_io.active_project()
+ json_representation = get_representation_by_name(
+ project_name, representation_name="json", version_id=version_id
+ )
+
+ # Load relationships
+ shader_relation = get_representation_path(json_representation)
+ with open(shader_relation, "r") as f:
+ relationships = json.load(f)
+
+ return relationships
+
+
+def load_look(version_id):
+ # type: (str) -> list
+ """Load look from version.
+
+ Get look from version and invoke Loader for it.
+
+ Args:
+ version_id (str): Version ID
+
+ Returns:
+ list of shader nodes.
+
+ """
+
+ project_name = legacy_io.active_project()
+ # Get representations of shader file and relationships
+ look_representation = get_representation_by_name(
+ project_name, representation_name="ma", version_id=version_id
+ )
+
+ # See if representation is already loaded, if so reuse it.
+ host = registered_host()
+ representation_id = str(look_representation['_id'])
+ for container in host.ls():
+ if (container['loader'] == "LookLoader" and
+ container['representation'] == representation_id):
+ log.info("Reusing loaded look ...")
+ container_node = container['objectName']
+ break
+ else:
+ log.info("Using look for the first time ...")
+
+ # Load file
+ all_loaders = discover_loader_plugins()
+ loaders = loaders_from_representation(all_loaders, representation_id)
+ loader = next(
+ (i for i in loaders if i.__name__ == "LookLoader"), None)
+ if loader is None:
+ raise RuntimeError("Could not find LookLoader, this is a bug")
+
+ # Reference the look file
+ with lib.maintained_selection():
+ container_node = load_container(loader, look_representation)[0]
+
+ return lib.get_container_members(container_node), container_node
diff --git a/openpype/hosts/maya/tools/mayalookassigner/vray_proxies.py b/openpype/hosts/maya/tools/mayalookassigner/vray_proxies.py
index 889396e555..1d2ec5fd87 100644
--- a/openpype/hosts/maya/tools/mayalookassigner/vray_proxies.py
+++ b/openpype/hosts/maya/tools/mayalookassigner/vray_proxies.py
@@ -3,26 +3,16 @@
import os
from collections import defaultdict
import logging
-import json
import six
import alembic.Abc
from maya import cmds
-from openpype.client import (
- get_representation_by_name,
- get_last_version_by_subset_name,
-)
-from openpype.pipeline import (
- legacy_io,
- load_container,
- loaders_from_representation,
- discover_loader_plugins,
- get_representation_path,
- registered_host,
-)
-from openpype.hosts.maya.api import lib
+from openpype.client import get_last_version_by_subset_name
+from openpype.pipeline import legacy_io
+import openpype.hosts.maya.lib as maya_lib
+from . import lib
log = logging.getLogger(__name__)
@@ -149,79 +139,6 @@ def assign_vrayproxy_shaders(vrayproxy, assignments):
index += 1
-def get_look_relationships(version_id):
- # type: (str) -> dict
- """Get relations for the look.
-
- Args:
- version_id (str): Parent version Id.
-
- Returns:
- dict: Dictionary of relations.
- """
-
- project_name = legacy_io.active_project()
- json_representation = get_representation_by_name(
- project_name, representation_name="json", version_id=version_id
- )
-
- # Load relationships
- shader_relation = get_representation_path(json_representation)
- with open(shader_relation, "r") as f:
- relationships = json.load(f)
-
- return relationships
-
-
-def load_look(version_id):
- # type: (str) -> list
- """Load look from version.
-
- Get look from version and invoke Loader for it.
-
- Args:
- version_id (str): Version ID
-
- Returns:
- list of shader nodes.
-
- """
-
- project_name = legacy_io.active_project()
- # Get representations of shader file and relationships
- look_representation = get_representation_by_name(
- project_name, representation_name="ma", version_id=version_id
- )
-
- # See if representation is already loaded, if so reuse it.
- host = registered_host()
- representation_id = str(look_representation['_id'])
- for container in host.ls():
- if (container['loader'] == "LookLoader" and
- container['representation'] == representation_id):
- log.info("Reusing loaded look ...")
- container_node = container['objectName']
- break
- else:
- log.info("Using look for the first time ...")
-
- # Load file
- all_loaders = discover_loader_plugins()
- loaders = loaders_from_representation(all_loaders, representation_id)
- loader = next(
- (i for i in loaders if i.__name__ == "LookLoader"), None)
- if loader is None:
- raise RuntimeError("Could not find LookLoader, this is a bug")
-
- # Reference the look file
- with lib.maintained_selection():
- container_node = load_container(loader, look_representation)
-
- # Get container members
- shader_nodes = lib.get_container_members(container_node)
- return shader_nodes
-
-
def vrayproxy_assign_look(vrayproxy, subset="lookDefault"):
# type: (str, str) -> None
"""Assign look to vray proxy.
@@ -263,8 +180,8 @@ def vrayproxy_assign_look(vrayproxy, subset="lookDefault"):
))
continue
- relationships = get_look_relationships(version["_id"])
- shadernodes = load_look(version["_id"])
+ relationships = lib.get_look_relationships(version["_id"])
+ shadernodes, _ = lib.load_look(version["_id"])
# Get only the node ids and paths related to this asset
# And get the shader edits the look supplies
@@ -272,8 +189,10 @@ def vrayproxy_assign_look(vrayproxy, subset="lookDefault"):
node_id: nodes_by_id[node_id] for node_id in node_ids
}
edits = list(
- lib.iter_shader_edits(
- relationships, shadernodes, asset_nodes_by_id))
+ maya_lib.iter_shader_edits(
+ relationships, shadernodes, asset_nodes_by_id
+ )
+ )
# Create assignments
assignments = {}
diff --git a/openpype/hosts/nuke/plugins/load/actions.py b/openpype/hosts/nuke/plugins/load/actions.py
index e562c74c58..3227a7ed98 100644
--- a/openpype/hosts/nuke/plugins/load/actions.py
+++ b/openpype/hosts/nuke/plugins/load/actions.py
@@ -74,8 +74,7 @@ class SetFrameRangeWithHandlesLoader(load.LoaderPlugin):
return
# Include handles
- handles = version_data.get("handles", 0)
- start -= handles
- end += handles
+ start -= version_data.get("handleStart", 0)
+ end += version_data.get("handleEnd", 0)
lib.update_frame_range(start, end)
diff --git a/openpype/hosts/nuke/plugins/load/load_script_precomp.py b/openpype/hosts/nuke/plugins/load/load_script_precomp.py
index 90581c2f22..53e9a76003 100644
--- a/openpype/hosts/nuke/plugins/load/load_script_precomp.py
+++ b/openpype/hosts/nuke/plugins/load/load_script_precomp.py
@@ -138,7 +138,6 @@ class LinkAsGroup(load.LoaderPlugin):
"version": version_doc.get("name"),
"colorspace": version_data.get("colorspace"),
"source": version_data.get("source"),
- "handles": version_data.get("handles"),
"fps": version_data.get("fps"),
"author": version_data.get("author")
})
diff --git a/openpype/hosts/nuke/plugins/publish/collect_context_data.py b/openpype/hosts/nuke/plugins/publish/collect_context_data.py
index b487c946f0..f1b4965205 100644
--- a/openpype/hosts/nuke/plugins/publish/collect_context_data.py
+++ b/openpype/hosts/nuke/plugins/publish/collect_context_data.py
@@ -49,8 +49,6 @@ class CollectContextData(pyblish.api.ContextPlugin):
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect,
- # backward compatibility handles
- "handles": handle_start,
"handleStart": handle_start,
"handleEnd": handle_end,
"step": 1,
diff --git a/openpype/hosts/nuke/plugins/publish/collect_gizmo.py b/openpype/hosts/nuke/plugins/publish/collect_gizmo.py
index 3a877fc194..e3c40a7a90 100644
--- a/openpype/hosts/nuke/plugins/publish/collect_gizmo.py
+++ b/openpype/hosts/nuke/plugins/publish/collect_gizmo.py
@@ -28,7 +28,6 @@ class CollectGizmo(pyblish.api.InstancePlugin):
# Add version data to instance
version_data = {
- "handles": handle_start,
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": first_frame + handle_start,
diff --git a/openpype/hosts/nuke/plugins/publish/collect_model.py b/openpype/hosts/nuke/plugins/publish/collect_model.py
index 9da056052b..3fdf376d0c 100644
--- a/openpype/hosts/nuke/plugins/publish/collect_model.py
+++ b/openpype/hosts/nuke/plugins/publish/collect_model.py
@@ -28,7 +28,6 @@ class CollectModel(pyblish.api.InstancePlugin):
# Add version data to instance
version_data = {
- "handles": handle_start,
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": first_frame + handle_start,
diff --git a/openpype/hosts/nuke/plugins/publish/collect_reads.py b/openpype/hosts/nuke/plugins/publish/collect_reads.py
index a1144fbcc3..831ae29a27 100644
--- a/openpype/hosts/nuke/plugins/publish/collect_reads.py
+++ b/openpype/hosts/nuke/plugins/publish/collect_reads.py
@@ -103,7 +103,6 @@ class CollectNukeReads(pyblish.api.InstancePlugin):
# Add version data to instance
version_data = {
- "handles": handle_start,
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": first_frame + handle_start,
@@ -123,7 +122,8 @@ class CollectNukeReads(pyblish.api.InstancePlugin):
"frameStart": first_frame,
"frameEnd": last_frame,
"colorspace": colorspace,
- "handles": int(asset_doc["data"].get("handles", 0)),
+ "handleStart": handle_start,
+ "handleEnd": handle_end,
"step": 1,
"fps": int(nuke.root()['fps'].value())
})
diff --git a/openpype/hosts/tvpaint/api/pipeline.py b/openpype/hosts/tvpaint/api/pipeline.py
index 575e6aa755..58fbd09545 100644
--- a/openpype/hosts/tvpaint/api/pipeline.py
+++ b/openpype/hosts/tvpaint/api/pipeline.py
@@ -504,14 +504,9 @@ def set_context_settings(project_name, asset_doc):
print("Frame range was not found!")
return
- handles = asset_doc["data"].get("handles") or 0
handle_start = asset_doc["data"].get("handleStart")
handle_end = asset_doc["data"].get("handleEnd")
- if handle_start is None or handle_end is None:
- handle_start = handles
- handle_end = handles
-
# Always start from 0 Mark In and set only Mark Out
mark_in = 0
mark_out = mark_in + (frame_end - frame_start) + handle_start + handle_end
diff --git a/openpype/lib/execute.py b/openpype/lib/execute.py
index 834394b02f..ef456395e7 100644
--- a/openpype/lib/execute.py
+++ b/openpype/lib/execute.py
@@ -170,11 +170,13 @@ def clean_envs_for_openpype_process(env=None):
"""
if env is None:
env = os.environ
- return {
- key: value
- for key, value in env.items()
- if key not in ("PYTHONPATH",)
- }
+
+ # Exclude some environment variables from a copy of the environment
+ env = env.copy()
+ for key in ["PYTHONPATH", "PYTHONHOME"]:
+ env.pop(key, None)
+
+ return env
def run_openpype_process(*args, **kwargs):
diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py
index 0c899a500c..5c598df94b 100644
--- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py
+++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py
@@ -76,6 +76,11 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
"use_gpu",
default=cls.use_gpu,
label="Use GPU"
+ ),
+ BoolDef(
+ "suspend_publish",
+ default=False,
+ label="Suspend publish"
)
]
@@ -87,6 +92,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
instance.data["attributeValues"] = self.get_attr_values_from_data(
instance.data)
+ # add suspend_publish attributeValue to instance data
+ instance.data["suspend_publish"] = instance.data["attributeValues"][
+ "suspend_publish"]
+
instance.data["toBeRenderedOn"] = "deadline"
families = instance.data["families"]
diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py
index 41bc103d5c..4765772bcf 100644
--- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py
+++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py
@@ -944,17 +944,28 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# we cannot attach AOVs to other subsets as we consider every
# AOV subset of its own.
- config = instance.data["colorspaceConfig"]
additional_data = {
"renderProducts": instance.data["renderProducts"],
"colorspaceConfig": instance.data["colorspaceConfig"],
"display": instance.data["colorspaceDisplay"],
- "view": instance.data["colorspaceView"],
- "colorspaceTemplate": config.replace(
- str(context.data["anatomy"].roots["work"]), "{root[work]}"
- )
+ "view": instance.data["colorspaceView"]
}
+ # Get templated path from absolute config path.
+ anatomy = instance.context.data["anatomy"]
+ colorspaceTemplate = instance.data["colorspaceConfig"]
+ success, rootless_staging_dir = (
+ anatomy.find_root_template_from_path(colorspaceTemplate)
+ )
+ if success:
+ colorspaceTemplate = rootless_staging_dir
+ else:
+ self.log.warning((
+ "Could not find root path for remapping \"{}\"."
+ " This may cause issues on farm."
+ ).format(colorspaceTemplate))
+ additional_data["colorspaceTemplate"] = colorspaceTemplate
+
if len(data.get("attachTo")) > 0:
assert len(data.get("expectedFiles")[0].keys()) == 1, (
"attaching multiple AOVs or renderable cameras to "
diff --git a/openpype/modules/kitsu/utils/update_op_with_zou.py b/openpype/modules/kitsu/utils/update_op_with_zou.py
index 1f38648dfa..4f4f0810bc 100644
--- a/openpype/modules/kitsu/utils/update_op_with_zou.py
+++ b/openpype/modules/kitsu/utils/update_op_with_zou.py
@@ -329,6 +329,7 @@ def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne:
"code": project_code,
"fps": float(project["fps"]),
"zou_id": project["id"],
+ "active": project['project_status_name'] != "Closed",
}
)
@@ -379,7 +380,7 @@ def sync_all_projects(
# Iterate projects
dbcon = AvalonMongoDB()
dbcon.install()
- all_projects = gazu.project.all_open_projects()
+ all_projects = gazu.project.all_projects()
for project in all_projects:
if ignore_projects and project["name"] in ignore_projects:
continue
@@ -404,7 +405,21 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict):
if not project:
project = gazu.project.get_project_by_name(project["name"])
- log.info("Synchronizing {}...".format(project["name"]))
+ # Get all statuses for projects from Kitsu
+ all_status = gazu.project.all_project_status()
+ for status in all_status:
+ if project['project_status_id'] == status['id']:
+ project['project_status_name'] = status['name']
+ break
+
+ # Do not sync closed kitsu project that is not found in openpype
+ if (
+ project['project_status_name'] == "Closed"
+ and not get_project(project['name'])
+ ):
+ return
+
+ log.info(f"Synchronizing {project['name']}...")
# Get all assets from zou
all_assets = gazu.asset.all_assets_for_project(project)
@@ -429,6 +444,9 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict):
log.info("Project created: {}".format(project_name))
bulk_writes.append(write_project_to_op(project, dbcon))
+ if project['project_status_name'] == "Closed":
+ return
+
# Try to find project document
if not project_dict:
project_dict = get_project(project_name)
diff --git a/openpype/pipeline/colorspace.py b/openpype/pipeline/colorspace.py
index e96563fa98..b21008af9f 100644
--- a/openpype/pipeline/colorspace.py
+++ b/openpype/pipeline/colorspace.py
@@ -218,8 +218,7 @@ def get_data_subprocess(config_path, data_type):
log.info("Executing: {}".format(" ".join(args)))
process_kwargs = {
- "logger": log,
- "env": {}
+ "logger": log
}
run_openpype_process(*args, **process_kwargs)
diff --git a/openpype/plugins/publish/collect_context_entities.py b/openpype/plugins/publish/collect_context_entities.py
index 31fbeb5dbd..312f5f0eb5 100644
--- a/openpype/plugins/publish/collect_context_entities.py
+++ b/openpype/plugins/publish/collect_context_entities.py
@@ -72,24 +72,9 @@ class CollectContextEntities(pyblish.api.ContextPlugin):
context.data["frameStart"] = frame_start
context.data["frameEnd"] = frame_end
- handles = data.get("handles") or 0
- handle_start = data.get("handleStart")
- if handle_start is None:
- handle_start = handles
- self.log.info((
- "Key \"handleStart\" is not set."
- " Using value from \"handles\" key {}."
- ).format(handle_start))
+ handle_start = data.get("handleStart") or 0
+ handle_end = data.get("handleEnd") or 0
- handle_end = data.get("handleEnd")
- if handle_end is None:
- handle_end = handles
- self.log.info((
- "Key \"handleEnd\" is not set."
- " Using value from \"handles\" key {}."
- ).format(handle_end))
-
- context.data["handles"] = int(handles)
context.data["handleStart"] = int(handle_start)
context.data["handleEnd"] = int(handle_end)
diff --git a/openpype/plugins/publish/collect_custom_staging_dir.py b/openpype/plugins/publish/collect_custom_staging_dir.py
index 72ab0fe34d..b749b251c0 100644
--- a/openpype/plugins/publish/collect_custom_staging_dir.py
+++ b/openpype/plugins/publish/collect_custom_staging_dir.py
@@ -42,16 +42,17 @@ class CollectCustomStagingDir(pyblish.api.InstancePlugin):
subset_name = instance.data["subset"]
host_name = instance.context.data["hostName"]
project_name = instance.context.data["projectName"]
-
+ project_settings = instance.context.data["project_settings"]
anatomy = instance.context.data["anatomy"]
- anatomy_data = copy.deepcopy(instance.data["anatomyData"])
- task = anatomy_data.get("task", {})
+ task = instance.data["anatomyData"].get("task", {})
transient_tml, is_persistent = get_custom_staging_dir_info(
project_name, host_name, family, task.get("name"),
- task.get("type"), subset_name, anatomy=anatomy, log=self.log)
- result_str = "Not adding"
+ task.get("type"), subset_name, project_settings=project_settings,
+ anatomy=anatomy, log=self.log)
+
if transient_tml:
+ anatomy_data = copy.deepcopy(instance.data["anatomyData"])
anatomy_data["root"] = anatomy.roots
scene_name = instance.context.data.get("currentFile")
if scene_name:
@@ -61,6 +62,8 @@ class CollectCustomStagingDir(pyblish.api.InstancePlugin):
instance.data["stagingDir_persistent"] = is_persistent
result_str = "Adding '{}' as".format(transient_dir)
+ else:
+ result_str = "Not adding"
self.log.info("{} custom staging dir for instance with '{}'".format(
result_str, family
diff --git a/openpype/plugins/publish/extract_burnin.py b/openpype/plugins/publish/extract_burnin.py
index 95575444b2..88a9ac6f2f 100644
--- a/openpype/plugins/publish/extract_burnin.py
+++ b/openpype/plugins/publish/extract_burnin.py
@@ -336,8 +336,7 @@ class ExtractBurnin(publish.Extractor):
# Run burnin script
process_kwargs = {
- "logger": self.log,
- "env": {}
+ "logger": self.log
}
run_openpype_process(*args, **process_kwargs)
@@ -457,12 +456,6 @@ class ExtractBurnin(publish.Extractor):
frame_end = 1
frame_end = int(frame_end)
- handles = instance.data.get("handles")
- if handles is None:
- handles = context.data.get("handles")
- if handles is None:
- handles = 0
-
handle_start = instance.data.get("handleStart")
if handle_start is None:
handle_start = context.data.get("handleStart")
diff --git a/openpype/plugins/publish/integrate.py b/openpype/plugins/publish/integrate.py
index 760b1a6b37..07131ec3ae 100644
--- a/openpype/plugins/publish/integrate.py
+++ b/openpype/plugins/publish/integrate.py
@@ -412,7 +412,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
self.log.debug("{}".format(op_session.to_data()))
op_session.commit()
- # Backwards compatibility
+ # Backwards compatibility used in hero integration.
# todo: can we avoid the need to store this?
instance.data["published_representations"] = {
p["representation"]["_id"]: p for p in prepared_representations
@@ -912,7 +912,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
# Include optional data if present in
optionals = [
- "frameStart", "frameEnd", "step", "handles",
+ "frameStart", "frameEnd", "step",
"handleEnd", "handleStart", "sourceHashes"
]
for key in optionals:
diff --git a/openpype/plugins/publish/integrate_legacy.py b/openpype/plugins/publish/integrate_legacy.py
index 1d0177f151..3f1f6ad0c9 100644
--- a/openpype/plugins/publish/integrate_legacy.py
+++ b/openpype/plugins/publish/integrate_legacy.py
@@ -987,7 +987,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# Include optional data if present in
optionals = [
- "frameStart", "frameEnd", "step", "handles",
+ "frameStart", "frameEnd", "step",
"handleEnd", "handleStart", "sourceHashes"
]
for key in optionals:
diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json
index f23a298b87..3d9fb78ac5 100644
--- a/openpype/settings/defaults/project_settings/maya.json
+++ b/openpype/settings/defaults/project_settings/maya.json
@@ -1278,7 +1278,6 @@
"dynamics": false,
"fluids": false,
"follicles": false,
- "gpuCacheDisplayFilter": false,
"greasePencils": false,
"grid": false,
"hairSystems": true,
@@ -1305,7 +1304,10 @@
"polymeshes": true,
"strokes": false,
"subdivSurfaces": false,
- "textures": false
+ "textures": false,
+ "pluginObjects": {
+ "gpuCacheDisplayFilter": false
+ }
},
"Camera Options": {
"displayGateMask": false,
@@ -1339,6 +1341,21 @@
},
"ExtractLook": {
"maketx_arguments": []
+ },
+ "ExtractGPUCache": {
+ "enabled": false,
+ "families": [
+ "model",
+ "animation",
+ "pointcache"
+ ],
+ "step": 1.0,
+ "stepSave": 1,
+ "optimize": true,
+ "optimizationThreshold": 40000,
+ "optimizeAnimationsForMotionBlur": true,
+ "writeMaterials": true,
+ "useBaseTessellation": true
}
},
"load": {
diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json
index a4a986bad8..1d78f5a03f 100644
--- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json
+++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json
@@ -426,11 +426,6 @@
"key": "follicles",
"label": "Follicles"
},
- {
- "type": "boolean",
- "key": "gpuCacheDisplayFilter",
- "label": "GPU Cache"
- },
{
"type": "boolean",
"key": "greasePencils",
@@ -565,6 +560,12 @@
"type": "boolean",
"key": "textures",
"label": "Texture Placements"
+ },
+ {
+ "type": "dict-modifiable",
+ "key": "pluginObjects",
+ "label": "Plugin Objects",
+ "object_type": "boolean"
}
]
},
diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json
index 7ced375cb5..346948c658 100644
--- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json
+++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json
@@ -1025,6 +1025,65 @@
}
}
]
+ },
+ {
+ "type": "dict",
+ "collapsible": true,
+ "key": "ExtractGPUCache",
+ "label": "Extract GPU Cache",
+ "checkbox_key": "enabled",
+ "children": [
+ {
+ "type": "boolean",
+ "key": "enabled",
+ "label": "Enabled"
+ },
+ {
+ "key": "families",
+ "label": "Families",
+ "type": "list",
+ "object_type": "text"
+ },
+ {
+ "key": "step",
+ "label": "Step",
+ "type": "number",
+ "decimal": 4,
+ "minimum": 1
+ },
+ {
+ "key": "stepSave",
+ "label": "Step Save",
+ "type": "number",
+ "minimum": 1
+ },
+ {
+ "key": "optimize",
+ "label": "Optimize Hierarchy",
+ "type": "boolean"
+ },
+ {
+ "key": "optimizationThreshold",
+ "label": "Optimization Threshold",
+ "type": "number",
+ "minimum": 1
+ },
+ {
+ "key": "optimizeAnimationsForMotionBlur",
+ "label": "Optimize Animations For Motion Blur",
+ "type": "boolean"
+ },
+ {
+ "key": "writeMaterials",
+ "label": "Write Materials",
+ "type": "boolean"
+ },
+ {
+ "key": "useBaseTessellation",
+ "label": "User Base Tesselation",
+ "type": "boolean"
+ }
+ ]
}
]
}
diff --git a/openpype/tools/loader/model.py b/openpype/tools/loader/model.py
index 5944808f8b..39e0bd98c3 100644
--- a/openpype/tools/loader/model.py
+++ b/openpype/tools/loader/model.py
@@ -365,8 +365,6 @@ class SubsetsModel(TreeModel, BaseRepresentationModel):
handle_end = version_data.get("handleEnd", None)
if handle_start is not None and handle_end is not None:
handles = "{}-{}".format(str(handle_start), str(handle_end))
- else:
- handles = version_data.get("handles", None)
if frame_start is not None and frame_end is not None:
# Remove superfluous zeros from numbers (3.0 -> 3) to improve
diff --git a/openpype/tools/workfiles/files_widget.py b/openpype/tools/workfiles/files_widget.py
index 18be746d49..2f338cf516 100644
--- a/openpype/tools/workfiles/files_widget.py
+++ b/openpype/tools/workfiles/files_widget.py
@@ -379,7 +379,7 @@ class FilesWidget(QtWidgets.QWidget):
# Disable/Enable buttons based on available files in model
has_valid_items = self._workarea_files_model.has_valid_items()
- self._btn_browse.setEnabled(has_valid_items)
+ self._btn_browse.setEnabled(True)
self._btn_open.setEnabled(has_valid_items)
if self._publish_context_select_mode:
@@ -617,14 +617,24 @@ class FilesWidget(QtWidgets.QWidget):
ext_filter = "Work File (*{0})".format(
" *".join(self._get_host_extensions())
)
+ dir_key = "directory"
+ if qtpy.API in ("pyside", "pyside2", "pyside6"):
+ dir_key = "dir"
+
+ workfile_root = self._workfiles_root
+ # Find existing directory of workfile root
+ # - Qt will use 'cwd' instead, if path does not exist, which may lead
+ # to igniter directory
+ while workfile_root:
+ if os.path.exists(workfile_root):
+ break
+ workfile_root = os.path.dirname(workfile_root)
+
kwargs = {
"caption": "Work Files",
- "filter": ext_filter
+ "filter": ext_filter,
+ dir_key: workfile_root
}
- if qtpy.API in ("pyside", "pyside2", "pyside6"):
- kwargs["dir"] = self._workfiles_root
- else:
- kwargs["directory"] = self._workfiles_root
work_file = QtWidgets.QFileDialog.getOpenFileName(**kwargs)[0]
if work_file:
diff --git a/openpype/tools/workfiles/save_as_dialog.py b/openpype/tools/workfiles/save_as_dialog.py
index de21deee42..aa881e7946 100644
--- a/openpype/tools/workfiles/save_as_dialog.py
+++ b/openpype/tools/workfiles/save_as_dialog.py
@@ -51,7 +51,7 @@ class CommentMatcher(object):
# Create a regex group for extensions
extensions = registered_host().file_extensions()
any_extension = "(?:{})".format(
- "|".join(re.escape(ext[1:]) for ext in extensions)
+ "|".join(re.escape(ext.lstrip(".")) for ext in extensions)
)
# Use placeholders that will never be in the filename
@@ -373,7 +373,7 @@ class SaveAsDialog(QtWidgets.QDialog):
if not data["comment"]:
data.pop("comment", None)
- data["ext"] = data["ext"][1:]
+ data["ext"] = data["ext"].lstrip(".")
anatomy_filled = self.anatomy.format(data)
return anatomy_filled[self.template_key]["file"]
@@ -413,7 +413,7 @@ class SaveAsDialog(QtWidgets.QDialog):
if not data["comment"]:
data.pop("comment", None)
- data["ext"] = data["ext"][1:]
+ data["ext"] = data["ext"].lstrip(".")
version = get_last_workfile_with_version(
self.root, template, data, extensions
diff --git a/openpype/vendor/python/common/capture.py b/openpype/vendor/python/common/capture.py
index 09a42d84d1..224699f916 100644
--- a/openpype/vendor/python/common/capture.py
+++ b/openpype/vendor/python/common/capture.py
@@ -732,11 +732,23 @@ def _applied_viewport_options(options, panel):
"""Context manager for applying `options` to `panel`"""
options = dict(ViewportOptions, **(options or {}))
+ plugin_options = options.pop("pluginObjects", {})
+ # BUGFIX Maya 2020 some keys in viewport options dict may not be unicode
+ # This is a local OpenPype edit to capture.py for issue #4730
+ # TODO: Remove when dropping Maya 2020 compatibility
+ if int(cmds.about(version=True)) <= 2020:
+ options = {
+ str(key): value for key, value in options.items()
+ }
+ plugin_options = {
+ str(key): value for key, value in plugin_options.items()
+ }
+
+ # Backwards compatibility for `pluginObjects` flattened into `options`
# separate the plugin display filter options since they need to
# be set differently (see #55)
- plugins = cmds.pluginDisplayFilter(query=True, listFilters=True)
- plugin_options = dict()
+ plugins = set(cmds.pluginDisplayFilter(query=True, listFilters=True))
for plugin in plugins:
if plugin in options:
plugin_options[plugin] = options.pop(plugin)
@@ -745,7 +757,14 @@ def _applied_viewport_options(options, panel):
try:
cmds.modelEditor(panel, edit=True, **options)
except TypeError as e:
- logger.error("Cannot apply options {}".format(e))
+ # Try to set as much as possible of the state by setting them one by
+ # one. This way we can also report the failing key values explicitly.
+ for key, value in options.items():
+ try:
+ cmds.modelEditor(panel, edit=True, **{key: value})
+ except TypeError:
+ logger.error("Failing to apply option '{}': {}".format(key,
+ value))
# plugin display filter options
for plugin, state in plugin_options.items():
diff --git a/openpype/version.py b/openpype/version.py
index 4d6ee5590e..d9e29d691e 100644
--- a/openpype/version.py
+++ b/openpype/version.py
@@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
-__version__ = "3.15.4-nightly.1"
+__version__ = "3.15.4-nightly.3"
diff --git a/website/docs/admin_hosts_maya.md b/website/docs/admin_hosts_maya.md
index e1bf35fb17..52fde72c4c 100644
--- a/website/docs/admin_hosts_maya.md
+++ b/website/docs/admin_hosts_maya.md
@@ -50,10 +50,6 @@ just one instance of this node type but if that is not so, validator will go thr
instances and check the value there. Node type for **VRay** settings is `VRaySettingsNode`, for **Renderman**
it is `rmanGlobals`, for **Redshift** it is `RedshiftOptions`.
-:::info getting attribute values
-If you do not know what an attributes value is supposed to be, for example for dropdown menu (enum), try changing the attribute and look in the script editor where it should log what the attribute was set to.
-:::
-
### Model Name Validator
`ValidateRenderSettings`
@@ -110,6 +106,35 @@ or Deadlines **Draft Tile Assembler**.
This is useful to fix some specific renderer glitches and advanced hacking of Maya Scene files. `Patch name` is label for patch for easier orientation.
`Patch regex` is regex used to find line in file, after `Patch line` string is inserted. Note that you need to add line ending.
+### Extract GPU Cache
+
+
+
+- **Step** Specifies how often samples are taken during file creation. By default, one sample of your object's transformations is taken every frame and saved to the Alembic file.
+
+ For example, a value of 2 caches the transformations of the current object at every other frame of the Cache Time Range.
+
+- **Step Save** Specifies which samples are saved during cache creation. For example, a value of 2 specifies that only every other sample specified by the Step # frame(s) option is saved to your Alembic file.
+
+- **Optimize Hierarchy** When on, nodes and objects in a selected hierarchy are consolidated to maximize the performance of the cache file during playback.
+- **Optimization Threshold** (Available only when Optimize Hierarchy is on.) Specifies the maximum number of vertices contained in a single draw primitive. The default value of 40000 may be ideal for most Maya supported graphics cards. When set to the default value, after optimization, each object in the GPU cache file(s) will have no more than 40000 vertices. This value can be set higher depending on the memory available on your system graphics card.
+
+- **Optimize Animations for Motion Blur** When on, objects with animated transform nodes display with motion blur when the cache is played back in Viewport 2.0 render mode. See Viewport 2.0 options.
+
+ Maya first determines if the GPU cache includes animation data. If the GPU cache is static and does not contain animation data, Maya does not optimize the GPU cache for motion blur.
+
+:::note Motion Blur does not support Cached Playback.
+:::
+
+- **Write Materials** When on, Maya exports the Lambert and Phong materials from source geometry to the GPU Cache file. These materials display when the GPU-cached file is played back in Viewport 2.0.
+
+ GPU-cached objects support all the high-quality lighting and shading effects provide by the Viewport 2.0 rendering mode. See Viewport 2.0 options.
+
+:::note Lambert and Phong materials do not display on GPU-cached files when they are played back in scene view's High Quality Rendering or Default Quality Rendering modes.
+:::
+
+- **Use Base Tessellation** Exports geometry with base tessellation and no smoothing applied. If this setting is turned off, the extractor will export geometry with the current Smooth Mesh Preview setting applied.
+
### Extract Playblast Settings (review)
These settings provide granular control over how the playblasts or reviews are produced in Maya.
diff --git a/website/docs/artist_hosts_aftereffects.md b/website/docs/artist_hosts_aftereffects.md
index 939ef4034c..d9522d5765 100644
--- a/website/docs/artist_hosts_aftereffects.md
+++ b/website/docs/artist_hosts_aftereffects.md
@@ -90,7 +90,7 @@ If there is an option of automatic repair, there will be `Repair` button on the
There are currently 2 options of `render` item:
- Render of farm - allows offload rendering and publishing to Deadline - requires Deadline module being enabled
-- Validate Scene Settings - enables validation plugin which controls setting in DB (or asset control system like Ftrak) and scene itself
+- Validate Scene Settings - enables validation plugin which controls setting in DB (or asset control system like Ftrack) and scene itself

@@ -100,6 +100,23 @@ There are currently 2 options of `render` item:
- `Validate` - if you would like to run only collecting and validating phases (nothing will be published yet)
- `Publish` - standard way how to kick off full publishing process
+#### Support help
+If you would like to ask for help admin or support, you could use any of the three options on the `Note` button on bottom left:
+- `Go to details` - switches into a more detailed list of published instances and plugins.
+- `Copy report` - stash full publishing log to a clipboard
+- `Export report` - save log into a file for sending it via mail or any communication tool
+
+If you are able to fix the workfile yourself, use the first button on the right to set the UI to initial state before publish. (Click the `Publish` button to start again.)
+
+#### Legacy instances
+
+All screenshots from Publish are from updated dialog, before publishing was being done by regular `Pyblish` tool.
+New publishing process should be backward compatible, eg. if you have a workfile with instances created in the previous publishing approach, they will be translated automatically and
+could be used right away.
+
+If you hit on unexpected behaviour with old instances, contact support first, then you could try to delete and recreate instances from scratch.
+Nuclear option is to purge workfile metadata in `Window > Metadata > Basic > Label`. This is only for most determined daredevils though!
+
### Load
When you want to load existing published work, you can use the `Loader` tool. You can reach it in the extension's panel.
@@ -134,20 +151,3 @@ You can switch to a previous version of the image or update to the latest.


-
-#### Support help
-If you would like to ask for help admin or support, you could use any of the three options on the `Note` button on bottom left:
-- `Go to details` - switches into a more detailed list of published instances and plugins.
-- `Copy report` - stash full publishing log to a clipboard
-- `Export report` - save log into a file for sending it via mail or any communication tool
-
-If you are able to fix the workfile yourself, use the first button on the right to set the UI to initial state before publish. (Click the `Publish` button to start again.)
-
-#### Legacy instances
-
-All screenshots from Publish are from updated dialog, before publishing was being done by regular `Pyblish` tool.
-New publishing process should be backward compatible, eg. if you have a workfile with instances created in the previous publishing approach, they will be translated automatically and
-could be used right away.
-
-If you hit on unexpected behaviour with old instances, contact support first, then you could try to delete and recreate instances from scratch.
-Nuclear option is to purge workfile metadata in `Window > Metadata > Basic > Label`. This is only for most determined daredevils though!
diff --git a/website/docs/assets/maya-admin_gpu_cache.png b/website/docs/assets/maya-admin_gpu_cache.png
new file mode 100644
index 0000000000..8b07b06c1e
Binary files /dev/null and b/website/docs/assets/maya-admin_gpu_cache.png differ
diff --git a/website/docs/module_kitsu.md b/website/docs/module_kitsu.md
index 05cff87fcc..d79c78fecf 100644
--- a/website/docs/module_kitsu.md
+++ b/website/docs/module_kitsu.md
@@ -53,5 +53,5 @@ There are four settings available:
## Q&A
### Is it safe to rename an entity from Kitsu?
-Absolutely! Entities are linked by their unique IDs between the two databases.
+Absolutely! Entities are linked by their unique IDs between the two databases.
But renaming from the OP's Project Manager won't apply the change to Kitsu, it'll be overridden during the next synchronization.
diff --git a/website/docs/project_settings/settings_project_global.md b/website/docs/project_settings/settings_project_global.md
index 821585ae21..2de9038f3f 100644
--- a/website/docs/project_settings/settings_project_global.md
+++ b/website/docs/project_settings/settings_project_global.md
@@ -194,6 +194,74 @@ A profile may generate multiple outputs from a single input. Each output must de
- Nuke extractor settings path: `project_settings/nuke/publish/ExtractReviewDataMov/outputs/baking/add_custom_tags`
- Filtering by input length. Input may be video, sequence or single image. It is possible that `.mp4` should be created only when input is video or sequence and to create review `.png` when input is single frame. In some cases the output should be created even if it's single frame or multi frame input.
+
+### Extract Burnin
+
+Plugin is responsible for adding burnins into review representations.
+
+Burnins are text values painted on top of input and may be surrounded with box in 6 available positions `Top Left`, `Top Center`, `Top Right`, `Bottom Left`, `Bottom Center`, `Bottom Right`.
+
+
+
+The Extract Burnin plugin creates new representations based on plugin presets, representations in instance and whether the reviewable matches the profile filter.
+A burnin can also be directly linked by name in the output definitions of the [Extract Review plug-in settings](#extract-review) so _can_ be triggered without a matching profile.
+
+#### Burnin formatting options (`options`)
+
+The formatting options define the font style for the burnin texts.
+The X and Y offset define the margin around texts and (background) boxes.
+
+#### Burnin profiles (`profiles`)
+
+Plugin process is skipped if `profiles` are not set at all. Profiles contain list of profile items. Each burnin profile may specify filters for **hosts**, **tasks** and **families**. Filters work the same way as described in [Profile Filters](#profile-filters).
+
+#### Profile burnins
+
+A burnin profile may set multiple burnin outputs from one input. The burnin's name represents the unique **filename suffix** to avoid overriding files with same name.
+
+| Key | Description | Type | Example |
+| --- | --- | --- | --- |
+| **Top Left** | Top left corner content. | str | "{dd}.{mm}.{yyyy}" |
+| **Top Centered** | Top center content. | str | "v{version:0>3}" |
+| **Top Right** | Top right corner content. | str | "Static text" |
+| **Bottom Left** | Bottom left corner content. | str | "{asset}" |
+| **Bottom Centered** | Bottom center content. | str | "{username}" |
+| **Bottom Right** | Bottom right corner content. | str | "{frame_start}-{current_frame}-{frame_end}" |
+
+Each burnin profile can be configured with additional family filtering and can
+add additional tags to the burnin representation, these can be configured under
+the profile's **Additional filtering** section.
+
+:::note Filename suffix
+The filename suffix is appended to filename of the source representation. For
+example, if the source representation has suffix **"h264"** and the burnin
+suffix is **"client"** then the final suffix is **"h264_client"**.
+:::
+
+**Available keys in burnin content**
+
+- It is possible to use same keys as in [Anatomy](admin_settings_project_anatomy.md#available-template-keys).
+- It is allowed to use Anatomy templates themselves in burnins if they can be filled with available data.
+
+- Additional keys in burnins:
+
+ | Burnin key | Description |
+ | --- | --- |
+ | frame_start | First frame number. |
+ | frame_end | Last frame number. |
+ | current_frame | Frame number for each frame. |
+ | duration | Count number of frames. |
+ | resolution_width | Resolution width. |
+ | resolution_height | Resolution height. |
+ | fps | Fps of an output. |
+ | timecode | Timecode by frame start and fps. |
+ | focalLength | **Only available in Maya**
Camera focal length per frame. Use syntax `{focalLength:.2f}` for decimal truncating. Eg. `35.234985` with `{focalLength:.2f}` would produce `35.23`, whereas `{focalLength:.0f}` would produce `35`. |
+
+:::warning
+`timecode` is a specific key that can be **only at the end of content**. (`"BOTTOM_RIGHT": "TC: {timecode}"`)
+:::
+
+
### IntegrateAssetNew
Saves information for all published subsets into DB, published assets are available for other hosts, tools and tasks after.