Merge branch 'develop' into feature/simplified_creator_api

This commit is contained in:
Jakub Trllo 2023-02-14 17:03:20 +01:00
commit e4d59039d8
58 changed files with 1070 additions and 1040 deletions

View file

@ -1,19 +0,0 @@
name: Automate Projects
on:
issues:
types: [opened, labeled]
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
jobs:
assign_one_project:
runs-on: ubuntu-latest
name: Assign to One Project
steps:
- name: Assign NEW bugs to triage
uses: srggrs/assign-one-project-github-action@1.2.0
if: contains(github.event.issue.labels.*.name, 'bug')
with:
project: 'https://github.com/pypeclub/pype/projects/2'
column_name: 'Needs triage'

View file

@ -13,7 +13,7 @@ jobs:
if: github.event.pull_request.milestone == null
uses: zoispag/action-assign-milestone@v1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}"
milestone: 'next-minor'
run_if_develop:
@ -24,5 +24,5 @@ jobs:
if: github.event.pull_request.milestone == null
uses: zoispag/action-assign-milestone@v1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
milestone: 'next-patch'
repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}"
milestone: 'next-patch'

View file

@ -12,7 +12,7 @@ jobs:
uses: "WyriHaximus/github-action-get-milestones@master"
id: milestones
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
- run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number')
id: querymilestone
@ -31,7 +31,7 @@ jobs:
with:
title: 'next-patch'
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
generate-next-minor:
runs-on: ubuntu-latest
@ -40,7 +40,7 @@ jobs:
uses: "WyriHaximus/github-action-get-milestones@master"
id: milestones
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
- run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number')
id: querymilestone
@ -59,4 +59,4 @@ jobs:
with:
title: 'next-minor'
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"

View file

@ -14,10 +14,10 @@ jobs:
- name: 🚛 Checkout Code
uses: actions/checkout@v2
- name: 🔨 Merge develop to main
- name: 🔨 Merge develop to main
uses: everlytic/branch-merge@1.1.0
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
source_ref: 'develop'
target_branch: 'main'
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'
@ -26,4 +26,4 @@ jobs:
uses: benc-uk/workflow-dispatch@v1
with:
workflow: Nightly Prerelease
token: ${{ secrets.ADMIN_TOKEN }}
token: ${{ secrets.YNPUT_BOT_TOKEN }}

View file

@ -25,43 +25,15 @@ jobs:
- name: 🔎 Determine next version type
id: version_type
run: |
TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.GITHUB_TOKEN }})
echo ::set-output name=type::$TYPE
TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.YNPUT_BOT_TOKEN }})
echo "type=${TYPE}" >> $GITHUB_OUTPUT
- name: 💉 Inject new version into files
id: version
if: steps.version_type.outputs.type != 'skip'
run: |
RESULT=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.GITHUB_TOKEN }})
echo ::set-output name=next_tag::$RESULT
# - name: "✏️ Generate full changelog"
# if: steps.version_type.outputs.type != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# unreleasedLabel: ${{ steps.version.outputs.next_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
- name: "🖨️ Print changelog to console"
if: steps.version_type.outputs.type != 'skip'
run: cat CHANGELOG.md
NEW_VERSION_TAG=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.YNPUT_BOT_TOKEN }})
echo "next_tag=${NEW_VERSION_TAG}" >> $GITHUB_OUTPUT
- name: 💾 Commit and Tag
id: git_commit
@ -80,7 +52,7 @@ jobs:
- name: Push to protected main branch
uses: CasperWA/push-protected@v2.10.0
with:
token: ${{ secrets.ADMIN_TOKEN }}
token: ${{ secrets.YNPUT_BOT_TOKEN }}
branch: main
tags: true
unprotect_reviews: true
@ -89,7 +61,7 @@ jobs:
uses: everlytic/branch-merge@1.1.0
if: steps.version_type.outputs.type != 'skip'
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'

View file

@ -26,34 +26,12 @@ jobs:
- name: 💉 Inject new version into files
id: version
run: |
echo ::set-output name=current_version::${GITHUB_REF#refs/*/}
RESULT=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/})
LASTRELEASE=$(python ./tools/ci_tools.py --lastversion release)
NEW_VERSION=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/})
LAST_VERSION=$(python ./tools/ci_tools.py --lastversion release)
echo ::set-output name=last_release::$LASTRELEASE
echo ::set-output name=release_tag::$RESULT
# - name: "✏️ Generate full changelog"
# if: steps.version.outputs.release_tag != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# futureRelease: ${{ steps.version.outputs.release_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
echo "current_version=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT
echo "last_release=${LAST_VERSION}" >> $GITHUB_OUTPUT
echo "release_tag=${NEW_VERSION}" >> $GITHUB_OUTPUT
- name: 💾 Commit and Tag
id: git_commit
@ -70,43 +48,17 @@ jobs:
if: steps.version.outputs.release_tag != 'skip'
uses: CasperWA/push-protected@v2.10.0
with:
token: ${{ secrets.ADMIN_TOKEN }}
token: ${{ secrets.YNPUT_BOT_TOKEN }}
branch: main
tags: true
unprotect_reviews: true
- name: "✏️ Generate last changelog"
if: steps.version.outputs.release_tag != 'skip'
id: generate-last-changelog
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: ${{ steps.version.outputs.last_release }}
maxIssues: 100
pullRequests: true
prWoLabels: false
author: false
unreleased: true
compareLink: true
stripGeneratorNotice: true
verbose: true
futureRelease: ${{ steps.version.outputs.release_tag }}
excludeTagsRegex: "CI/.+"
releaseBranch: "main"
stripHeaders: true
base: 'none'
- name: 🚀 Github Release
if: steps.version.outputs.release_tag != 'skip'
uses: ncipollo/release-action@v1
with:
body: ${{ steps.generate-last-changelog.outputs.changelog }}
tag: ${{ steps.version.outputs.release_tag }}
token: ${{ secrets.ADMIN_TOKEN }}
token: ${{ secrets.YNPUT_BOT_TOKEN }}
- name: ☠ Delete Pre-release
if: steps.version.outputs.release_tag != 'skip'
@ -118,7 +70,7 @@ jobs:
if: steps.version.outputs.release_tag != 'skip'
uses: everlytic/branch-merge@1.1.0
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'

View file

@ -28,7 +28,7 @@ jobs:
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: 🧵 Install Requirements
shell: pwsh
run: |
@ -64,27 +64,3 @@ jobs:
run: |
export SKIP_THIRD_PARTY_VALIDATION="1"
./tools/build.sh
# MacOS-latest:
# runs-on: macos-latest
# strategy:
# matrix:
# python-version: [3.9]
# steps:
# - name: 🚛 Checkout Code
# uses: actions/checkout@v2
# - name: Set up Python
# uses: actions/setup-python@v2
# with:
# python-version: ${{ matrix.python-version }}
# - name: 🧵 Install Requirements
# run: |
# ./tools/create_env.sh
# - name: 🔨 Build
# run: |
# ./tools/build.sh

View file

@ -1,112 +0,0 @@
from .settings import (
get_system_settings,
get_project_settings,
get_current_project_settings,
get_anatomy_settings,
SystemSettings,
ProjectSettings
)
from .lib import (
PypeLogger,
Logger,
Anatomy,
execute,
run_subprocess,
version_up,
get_asset,
get_workdir_data,
get_version_from_path,
get_last_version_from_path,
get_app_environments_for_context,
source_hash,
get_latest_version,
get_local_site_id,
change_openpype_mongo_url,
create_project_folders,
get_project_basic_paths
)
from .lib.mongo import (
get_default_components
)
from .lib.applications import (
ApplicationManager
)
from .lib.avalon_context import (
BuildWorkfile
)
from . import resources
from .plugin import (
Extractor,
ValidatePipelineOrder,
ValidateContentsOrder,
ValidateSceneOrder,
ValidateMeshOrder,
)
# temporary fix, might
from .action import (
get_errored_instances_from_context,
RepairAction,
RepairContextAction
)
__all__ = [
"get_system_settings",
"get_project_settings",
"get_current_project_settings",
"get_anatomy_settings",
"get_project_basic_paths",
"SystemSettings",
"ProjectSettings",
"PypeLogger",
"Logger",
"Anatomy",
"execute",
"get_default_components",
"ApplicationManager",
"BuildWorkfile",
# Resources
"resources",
# plugin classes
"Extractor",
# ordering
"ValidatePipelineOrder",
"ValidateContentsOrder",
"ValidateSceneOrder",
"ValidateMeshOrder",
# action
"get_errored_instances_from_context",
"RepairAction",
"RepairContextAction",
# get contextual data
"version_up",
"get_asset",
"get_workdir_data",
"get_version_from_path",
"get_last_version_from_path",
"get_app_environments_for_context",
"source_hash",
"run_subprocess",
"get_latest_version",
"get_local_site_id",
"change_openpype_mongo_url",
"get_project_basic_paths",
"create_project_folders"
]

View file

@ -164,7 +164,6 @@ def get_linked_representation_id(
# Recursive graph lookup for inputs
{"$graphLookup": graph_lookup}
]
conn = get_project_connection(project_name)
result = conn.aggregate(query_pipeline)
referenced_version_ids = _process_referenced_pipeline_result(
@ -213,7 +212,7 @@ def _process_referenced_pipeline_result(result, link_type):
for output in sorted(outputs_recursive, key=lambda o: o["depth"]):
output_links = output.get("data", {}).get("inputLinks")
if not output_links:
if not output_links and output["type"] != "hero_version":
continue
# Leaf
@ -232,6 +231,9 @@ def _process_referenced_pipeline_result(result, link_type):
def _filter_input_links(input_links, link_type, correctly_linked_ids):
if not input_links: # to handle hero versions
return
for input_link in input_links:
if link_type and input_link["type"] != link_type:
continue

View file

@ -44,7 +44,7 @@ class AppendBlendLoader(plugin.AssetLoader):
"""
representations = ["blend"]
families = ["*"]
families = ["workfile"]
label = "Append Workfile"
order = 9
@ -68,7 +68,7 @@ class ImportBlendLoader(plugin.AssetLoader):
"""
representations = ["blend"]
families = ["*"]
families = ["workfile"]
label = "Import Workfile"
order = 9

View file

@ -143,6 +143,9 @@ class ExtractSubsetResources(publish.Extractor):
# create staging dir path
staging_dir = self.staging_dir(instance)
# append staging dir for later cleanup
instance.context.data["cleanupFullPaths"].append(staging_dir)
# add default preset type for thumbnail and reviewable video
# update them with settings and override in case the same
# are found in there
@ -548,30 +551,3 @@ class ExtractSubsetResources(publish.Extractor):
"Path `{}` is containing more that one clip".format(path)
)
return clips[0]
def staging_dir(self, instance):
"""Provide a temporary directory in which to store extracted files
Upon calling this method the staging directory is stored inside
the instance.data['stagingDir']
"""
staging_dir = instance.data.get('stagingDir', None)
openpype_temp_dir = os.getenv("OPENPYPE_TEMP_DIR")
if not staging_dir:
if openpype_temp_dir and os.path.exists(openpype_temp_dir):
staging_dir = os.path.normpath(
tempfile.mkdtemp(
prefix="pyblish_tmp_",
dir=openpype_temp_dir
)
)
else:
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
instance.data['stagingDir'] = staging_dir
instance.context.data["cleanupFullPaths"].append(staging_dir)
return staging_dir

View file

@ -108,9 +108,9 @@ class ExtractRender(pyblish.api.InstancePlugin):
output = process.communicate()[0]
if process.returncode != 0:
raise ValueError(output.decode("utf-8"))
raise ValueError(output.decode("utf-8", errors="backslashreplace"))
self.log.debug(output.decode("utf-8"))
self.log.debug(output.decode("utf-8", errors="backslashreplace"))
# Generate representations.
extension = collection.tail[1:]

View file

@ -6,7 +6,7 @@ from openpype.hosts.maya.api import (
from maya import cmds
class CreateAss(plugin.Creator):
class CreateArnoldSceneSource(plugin.Creator):
"""Arnold Scene Source"""
name = "ass"
@ -29,7 +29,7 @@ class CreateAss(plugin.Creator):
maskOperator = False
def __init__(self, *args, **kwargs):
super(CreateAss, self).__init__(*args, **kwargs)
super(CreateArnoldSceneSource, self).__init__(*args, **kwargs)
# Add animation data
self.data.update(lib.collect_animation_data())
@ -52,7 +52,7 @@ class CreateAss(plugin.Creator):
self.data["maskOperator"] = self.maskOperator
def process(self):
instance = super(CreateAss, self).process()
instance = super(CreateArnoldSceneSource, self).process()
nodes = []
@ -61,6 +61,6 @@ class CreateAss(plugin.Creator):
cmds.sets(nodes, rm=instance)
assContent = cmds.sets(name="content_SET")
assProxy = cmds.sets(name="proxy_SET", empty=True)
assContent = cmds.sets(name=instance + "_content_SET")
assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True)
cmds.sets([assContent, assProxy], forceElement=instance)

View file

@ -1,3 +1,5 @@
from maya import cmds
from openpype.hosts.maya.api import (
lib,
plugin
@ -37,3 +39,9 @@ class CreatePointCache(plugin.Creator):
# Default to not send to farm.
self.data["farm"] = False
self.data["priority"] = 50
def process(self):
instance = super(CreatePointCache, self).process()
assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True)
cmds.sets(assProxy, forceElement=instance)

View file

@ -1,132 +0,0 @@
import os
from openpype.pipeline import (
legacy_io,
load,
get_representation_path
)
from openpype.settings import get_project_settings
class AlembicStandinLoader(load.LoaderPlugin):
"""Load Alembic as Arnold Standin"""
families = ["animation", "model", "proxyAbc", "pointcache"]
representations = ["abc"]
label = "Import Alembic as Arnold Standin"
order = -5
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, options):
import maya.cmds as cmds
import mtoa.ui.arnoldmenu
from openpype.hosts.maya.api.pipeline import containerise
from openpype.hosts.maya.api.lib import unique_namespace
version = context["version"]
version_data = version.get("data", {})
family = version["data"]["families"]
self.log.info("version_data: {}\n".format(version_data))
self.log.info("family: {}\n".format(family))
frameStart = version_data.get("frameStart", None)
asset = context["asset"]["name"]
namespace = namespace or unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
# Root group
label = "{}:{}".format(namespace, name)
root = cmds.group(name=label, empty=True)
settings = get_project_settings(os.environ['AVALON_PROJECT'])
colors = settings["maya"]["load"]["colors"]
fps = legacy_io.Session["AVALON_FPS"]
c = colors.get(family[0])
if c is not None:
r = (float(c[0]) / 255)
g = (float(c[1]) / 255)
b = (float(c[2]) / 255)
cmds.setAttr(root + ".useOutlinerColor", 1)
cmds.setAttr(root + ".outlinerColor",
r, g, b)
transform_name = label + "_ABC"
standinShape = cmds.ls(mtoa.ui.arnoldmenu.createStandIn())[0]
standin = cmds.listRelatives(standinShape, parent=True,
typ="transform")
standin = cmds.rename(standin, transform_name)
standinShape = cmds.listRelatives(standin, children=True)[0]
cmds.parent(standin, root)
# Set the standin filepath
cmds.setAttr(standinShape + ".dso", self.fname, type="string")
cmds.setAttr(standinShape + ".abcFPS", float(fps))
if frameStart is None:
cmds.setAttr(standinShape + ".useFrameExtension", 0)
elif "model" in family:
cmds.setAttr(standinShape + ".useFrameExtension", 0)
else:
cmds.setAttr(standinShape + ".useFrameExtension", 1)
nodes = [root, standin]
self[:] = nodes
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def update(self, container, representation):
import pymel.core as pm
path = get_representation_path(representation)
fps = legacy_io.Session["AVALON_FPS"]
# Update the standin
standins = list()
members = pm.sets(container['objectName'], query=True)
self.log.info("container:{}".format(container))
for member in members:
shape = member.getShape()
if (shape and shape.type() == "aiStandIn"):
standins.append(shape)
for standin in standins:
standin.dso.set(path)
standin.abcFPS.set(float(fps))
if "modelMain" in container['objectName']:
standin.useFrameExtension.set(0)
else:
standin.useFrameExtension.set(1)
container = pm.PyNode(container["objectName"])
container.representation.set(str(representation["_id"]))
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
import maya.cmds as cmds
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass

View file

@ -0,0 +1,218 @@
import os
import clique
import maya.cmds as cmds
import mtoa.ui.arnoldmenu
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path
)
from openpype.hosts.maya.api.lib import (
unique_namespace, get_attribute_input, maintained_selection
)
from openpype.hosts.maya.api.pipeline import containerise
def is_sequence(files):
sequence = False
collections, remainder = clique.assemble(files)
if collections:
sequence = True
return sequence
class ArnoldStandinLoader(load.LoaderPlugin):
"""Load as Arnold standin"""
families = ["ass", "animation", "model", "proxyAbc", "pointcache"]
representations = ["ass", "abc"]
label = "Load as Arnold standin"
order = -5
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, options):
version = context['version']
version_data = version.get("data", {})
self.log.info("version_data: {}\n".format(version_data))
asset = context['asset']['name']
namespace = namespace or unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
# Root group
label = "{}:{}".format(namespace, name)
root = cmds.group(name=label, empty=True)
# Set color.
settings = get_project_settings(context["project"]["name"])
color = settings['maya']['load']['colors'].get('ass')
if color is not None:
cmds.setAttr(root + ".useOutlinerColor", True)
cmds.setAttr(
root + ".outlinerColor", color[0], color[1], color[2]
)
with maintained_selection():
# Create transform with shape
transform_name = label + "_standin"
standin_shape = mtoa.ui.arnoldmenu.createStandIn()
standin = cmds.listRelatives(standin_shape, parent=True)[0]
standin = cmds.rename(standin, transform_name)
standin_shape = cmds.listRelatives(standin, shapes=True)[0]
cmds.parent(standin, root)
# Set the standin filepath
path, operator = self._setup_proxy(
standin_shape, self.fname, namespace
)
cmds.setAttr(standin_shape + ".dso", path, type="string")
sequence = is_sequence(os.listdir(os.path.dirname(self.fname)))
cmds.setAttr(standin_shape + ".useFrameExtension", sequence)
nodes = [root, standin]
if operator is not None:
nodes.append(operator)
self[:] = nodes
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def get_next_free_multi_index(self, attr_name):
"""Find the next unconnected multi index at the input attribute."""
for index in range(10000000):
connection_info = cmds.connectionInfo(
"{}[{}]".format(attr_name, index),
sourceFromDestination=True
)
if len(connection_info or []) == 0:
return index
def _get_proxy_path(self, path):
basename_split = os.path.basename(path).split(".")
proxy_basename = (
basename_split[0] + "_proxy." + ".".join(basename_split[1:])
)
proxy_path = "/".join([os.path.dirname(path), proxy_basename])
return proxy_basename, proxy_path
def _setup_proxy(self, shape, path, namespace):
proxy_basename, proxy_path = self._get_proxy_path(path)
options_node = "defaultArnoldRenderOptions"
merge_operator = get_attribute_input(options_node + ".operator")
if merge_operator is None:
merge_operator = cmds.createNode("aiMerge")
cmds.connectAttr(
merge_operator + ".message", options_node + ".operator"
)
merge_operator = merge_operator.split(".")[0]
string_replace_operator = cmds.createNode(
"aiStringReplace", name=namespace + ":string_replace_operator"
)
node_type = "alembic" if path.endswith(".abc") else "procedural"
cmds.setAttr(
string_replace_operator + ".selection",
"*.(@node=='{}')".format(node_type),
type="string"
)
cmds.setAttr(
string_replace_operator + ".match",
proxy_basename,
type="string"
)
cmds.setAttr(
string_replace_operator + ".replace",
os.path.basename(path),
type="string"
)
cmds.connectAttr(
string_replace_operator + ".out",
"{}.inputs[{}]".format(
merge_operator,
self.get_next_free_multi_index(merge_operator + ".inputs")
)
)
# We setup the string operator no matter whether there is a proxy or
# not. This makes it easier to update since the string operator will
# always be created. Return original path to use for standin.
if not os.path.exists(proxy_path):
return path, string_replace_operator
return proxy_path, string_replace_operator
def update(self, container, representation):
# Update the standin
members = cmds.sets(container['objectName'], query=True)
for member in members:
if cmds.nodeType(member) == "aiStringReplace":
string_replace_operator = member
shapes = cmds.listRelatives(member, shapes=True)
if not shapes:
continue
if cmds.nodeType(shapes[0]) == "aiStandIn":
standin = shapes[0]
path = get_representation_path(representation)
proxy_basename, proxy_path = self._get_proxy_path(path)
# Whether there is proxy or so, we still update the string operator.
# If no proxy exists, the string operator wont replace anything.
cmds.setAttr(
string_replace_operator + ".match",
"resources/" + proxy_basename,
type="string"
)
cmds.setAttr(
string_replace_operator + ".replace",
os.path.basename(path),
type="string"
)
dso_path = path
if os.path.exists(proxy_path):
dso_path = proxy_path
cmds.setAttr(standin + ".dso", dso_path, type="string")
sequence = is_sequence(os.listdir(os.path.dirname(path)))
cmds.setAttr(standin + ".useFrameExtension", sequence)
cmds.setAttr(
container["objectName"] + ".representation",
str(representation["_id"]),
type="string"
)
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass

View file

@ -1,290 +0,0 @@
import os
import clique
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path
)
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api.plugin import get_reference_node
from openpype.hosts.maya.api.lib import (
maintained_selection,
unique_namespace
)
from openpype.hosts.maya.api.pipeline import containerise
class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
"""Load Arnold Proxy as reference"""
families = ["ass"]
representations = ["ass"]
label = "Reference .ASS standin with Proxy"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, options):
import maya.cmds as cmds
import pymel.core as pm
version = context['version']
version_data = version.get("data", {})
self.log.info("version_data: {}\n".format(version_data))
frameStart = version_data.get("frameStart", None)
try:
family = context["representation"]["context"]["family"]
except ValueError:
family = "ass"
with maintained_selection():
groupName = "{}:{}".format(namespace, name)
path = self.fname
proxyPath_base = os.path.splitext(path)[0]
if frameStart is not None:
proxyPath_base = os.path.splitext(proxyPath_base)[0]
publish_folder = os.path.split(path)[0]
files_in_folder = os.listdir(publish_folder)
collections, remainder = clique.assemble(files_in_folder)
if collections:
hashes = collections[0].padding * '#'
coll = collections[0].format('{head}[index]{tail}')
filename = coll.replace('[index]', hashes)
path = os.path.join(publish_folder, filename)
proxyPath = proxyPath_base + ".ma"
project_name = context["project"]["name"]
file_url = self.prepare_root_value(proxyPath,
project_name)
nodes = cmds.file(file_url,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName=groupName)
cmds.makeIdentity(groupName, apply=False, rotate=True,
translate=True, scale=True)
# Set attributes
proxyShape = pm.ls(nodes, type="mesh")[0]
proxyShape.aiTranslator.set('procedural')
proxyShape.dso.set(path)
proxyShape.aiOverrideShaders.set(0)
settings = get_project_settings(project_name)
colors = settings['maya']['load']['colors']
c = colors.get(family)
if c is not None:
cmds.setAttr(groupName + ".useOutlinerColor", 1)
cmds.setAttr(groupName + ".outlinerColor",
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
)
self[:] = nodes
return nodes
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
from maya import cmds
import pymel.core as pm
node = container["objectName"]
representation["context"].pop("frame", None)
path = get_representation_path(representation)
print(path)
# path = self.fname
print(self.fname)
proxyPath = os.path.splitext(path)[0] + ".ma"
print(proxyPath)
# Get reference node from container members
members = cmds.sets(node, query=True, nodesOnly=True)
reference_node = get_reference_node(members)
assert os.path.exists(proxyPath), "%s does not exist." % proxyPath
try:
file_url = self.prepare_root_value(proxyPath,
representation["context"]
["project"]
["name"])
content = cmds.file(file_url,
loadReference=reference_node,
type="mayaAscii",
returnNewNodes=True)
# Set attributes
proxyShape = pm.ls(content, type="mesh")[0]
proxyShape.aiTranslator.set('procedural')
proxyShape.dso.set(path)
proxyShape.aiOverrideShaders.set(0)
except RuntimeError as exc:
# When changing a reference to a file that has load errors the
# command will raise an error even if the file is still loaded
# correctly (e.g. when raising errors on Arnold attributes)
# When the file is loaded and has content, we consider it's fine.
if not cmds.referenceQuery(reference_node, isLoaded=True):
raise
content = cmds.referenceQuery(reference_node,
nodes=True,
dagPath=True)
if not content:
raise
self.log.warning("Ignoring file read error:\n%s", exc)
# Add new nodes of the reference to the container
cmds.sets(content, forceElement=node)
# Remove any placeHolderList attribute entries from the set that
# are remaining from nodes being removed from the referenced file.
members = cmds.sets(node, query=True)
invalid = [x for x in members if ".placeHolderList" in x]
if invalid:
cmds.sets(invalid, remove=node)
# Update metadata
cmds.setAttr("{}.representation".format(node),
str(representation["_id"]),
type="string")
class AssStandinLoader(load.LoaderPlugin):
"""Load .ASS file as standin"""
families = ["ass"]
representations = ["ass"]
label = "Load .ASS file as standin"
order = -5
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, options):
import maya.cmds as cmds
import mtoa.ui.arnoldmenu
import pymel.core as pm
version = context['version']
version_data = version.get("data", {})
self.log.info("version_data: {}\n".format(version_data))
frameStart = version_data.get("frameStart", None)
asset = context['asset']['name']
namespace = namespace or unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
# cmds.loadPlugin("gpuCache", quiet=True)
# Root group
label = "{}:{}".format(namespace, name)
root = pm.group(name=label, empty=True)
settings = get_project_settings(os.environ['AVALON_PROJECT'])
colors = settings['maya']['load']['colors']
c = colors.get('ass')
if c is not None:
cmds.setAttr(root + ".useOutlinerColor", 1)
cmds.setAttr(root + ".outlinerColor",
c[0], c[1], c[2])
# Create transform with shape
transform_name = label + "_ASS"
# transform = pm.createNode("transform", name=transform_name,
# parent=root)
standinShape = pm.PyNode(mtoa.ui.arnoldmenu.createStandIn())
standin = standinShape.getParent()
standin.rename(transform_name)
pm.parent(standin, root)
# Set the standin filepath
standinShape.dso.set(self.fname)
if frameStart is not None:
standinShape.useFrameExtension.set(1)
nodes = [root, standin]
self[:] = nodes
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def update(self, container, representation):
import pymel.core as pm
path = get_representation_path(representation)
files_in_path = os.listdir(os.path.split(path)[0])
sequence = 0
collections, remainder = clique.assemble(files_in_path)
if collections:
sequence = 1
# Update the standin
standins = list()
members = pm.sets(container['objectName'], query=True)
for member in members:
shape = member.getShape()
if (shape and shape.type() == "aiStandIn"):
standins.append(shape)
for standin in standins:
standin.dso.set(path)
standin.useFrameExtension.set(sequence)
container = pm.PyNode(container["objectName"])
container.representation.set(str(representation["_id"]))
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
import maya.cmds as cmds
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass

View file

@ -1,19 +1,18 @@
from maya import cmds
from openpype.pipeline.publish import KnownPublishError
import pyblish.api
class CollectAssData(pyblish.api.InstancePlugin):
"""Collect Ass data."""
class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
"""Collect Arnold Scene Source data."""
# Offset to be after renderable camera collection.
order = pyblish.api.CollectorOrder + 0.2
label = 'Collect Ass'
label = "Collect Arnold Scene Source"
families = ["ass"]
def process(self, instance):
objsets = instance.data['setMembers']
objsets = instance.data["setMembers"]
for objset in objsets:
objset = str(objset)
@ -21,15 +20,12 @@ class CollectAssData(pyblish.api.InstancePlugin):
if members is None:
self.log.warning("Skipped empty instance: \"%s\" " % objset)
continue
if "content_SET" in objset:
instance.data['setMembers'] = members
self.log.debug('content members: {}'.format(members))
elif objset.startswith("proxy_SET"):
if len(members) != 1:
msg = "You have multiple proxy meshes, please only use one"
raise KnownPublishError(msg)
instance.data['proxy'] = members
self.log.debug('proxy members: {}'.format(members))
if objset.endswith("content_SET"):
instance.data["setMembers"] = cmds.ls(members, long=True)
self.log.debug("content members: {}".format(members))
elif objset.endswith("proxy_SET"):
instance.data["proxy"] = cmds.ls(members, long=True)
self.log.debug("proxy members: {}".format(members))
# Use camera in object set if present else default to render globals
# camera.

View file

@ -1,3 +1,5 @@
from maya import cmds
import pyblish.api
@ -12,3 +14,31 @@ class CollectPointcache(pyblish.api.InstancePlugin):
def process(self, instance):
if instance.data.get("farm"):
instance.data["families"].append("publish.farm")
proxy_set = None
for node in instance.data["setMembers"]:
if cmds.nodeType(node) != "objectSet":
continue
members = cmds.sets(node, query=True)
if members is None:
self.log.warning("Skipped empty objectset: \"%s\" " % node)
continue
if node.endswith("proxy_SET"):
proxy_set = node
instance.data["proxy"] = []
instance.data["proxyRoots"] = []
for member in members:
instance.data["proxy"].extend(cmds.ls(member, long=True))
instance.data["proxyRoots"].extend(
cmds.ls(member, long=True)
)
instance.data["proxy"].extend(
cmds.listRelatives(member, shapes=True, fullPath=True)
)
self.log.debug(
"proxy members: {}".format(instance.data["proxy"])
)
if proxy_set:
instance.remove(proxy_set)
instance.data["setMembers"].remove(proxy_set)

View file

@ -42,7 +42,6 @@ Provides:
import re
import os
import platform
import json
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
@ -320,7 +319,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
"renderSetupIncludeLights"
),
"strict_error_checking": render_instance.data.get(
"strict_error_checking")
"strict_error_checking", True
)
}
# Collect Deadline url if Deadline module is enabled

View file

@ -0,0 +1,160 @@
import os
from maya import cmds
import arnold
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import (
maintained_selection, attribute_values, delete_after
)
class ExtractArnoldSceneSource(publish.Extractor):
"""Extract the content of the instance to an Arnold Scene Source file."""
label = "Extract Arnold Scene Source"
hosts = ["maya"]
families = ["ass"]
asciiAss = False
def process(self, instance):
staging_dir = self.staging_dir(instance)
filename = "{}.ass".format(instance.name)
file_path = os.path.join(staging_dir, filename)
# Mask
mask = arnold.AI_NODE_ALL
node_types = {
"options": arnold.AI_NODE_OPTIONS,
"camera": arnold.AI_NODE_CAMERA,
"light": arnold.AI_NODE_LIGHT,
"shape": arnold.AI_NODE_SHAPE,
"shader": arnold.AI_NODE_SHADER,
"override": arnold.AI_NODE_OVERRIDE,
"driver": arnold.AI_NODE_DRIVER,
"filter": arnold.AI_NODE_FILTER,
"color_manager": arnold.AI_NODE_COLOR_MANAGER,
"operator": arnold.AI_NODE_OPERATOR
}
for key in node_types.keys():
if instance.data.get("mask" + key.title()):
mask = mask ^ node_types[key]
# Motion blur
attribute_data = {
"defaultArnoldRenderOptions.motion_blur_enable": instance.data.get(
"motionBlur", True
),
"defaultArnoldRenderOptions.motion_steps": instance.data.get(
"motionBlurKeys", 2
),
"defaultArnoldRenderOptions.motion_frames": instance.data.get(
"motionBlurLength", 0.5
)
}
# Write out .ass file
kwargs = {
"filename": file_path,
"startFrame": instance.data.get("frameStartHandle", 1),
"endFrame": instance.data.get("frameEndHandle", 1),
"frameStep": instance.data.get("step", 1),
"selected": True,
"asciiAss": self.asciiAss,
"shadowLinks": True,
"lightLinks": True,
"boundingBox": True,
"expandProcedurals": instance.data.get("expandProcedurals", False),
"camera": instance.data["camera"],
"mask": mask
}
filenames = self._extract(
instance.data["setMembers"], attribute_data, kwargs
)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
"name": "ass",
"ext": "ass",
"files": filenames if len(filenames) > 1 else filenames[0],
"stagingDir": staging_dir,
"frameStart": kwargs["startFrame"]
}
instance.data["representations"].append(representation)
self.log.info(
"Extracted instance {} to: {}".format(instance.name, staging_dir)
)
# Extract proxy.
if not instance.data.get("proxy", []):
return
kwargs["filename"] = file_path.replace(".ass", "_proxy.ass")
filenames = self._extract(
instance.data["proxy"], attribute_data, kwargs
)
representation = {
"name": "proxy",
"ext": "ass",
"files": filenames if len(filenames) > 1 else filenames[0],
"stagingDir": staging_dir,
"frameStart": kwargs["startFrame"],
"outputName": "proxy"
}
instance.data["representations"].append(representation)
def _extract(self, nodes, attribute_data, kwargs):
self.log.info("Writing: " + kwargs["filename"])
filenames = []
# Duplicating nodes so they are direct children of the world. This
# makes the hierarchy of any exported ass file the same.
with delete_after() as delete_bin:
duplicate_nodes = []
for node in nodes:
duplicate_transform = cmds.duplicate(node)[0]
# Discard the children.
shapes = cmds.listRelatives(duplicate_transform, shapes=True)
children = cmds.listRelatives(
duplicate_transform, children=True
)
cmds.delete(set(children) - set(shapes))
duplicate_transform = cmds.parent(
duplicate_transform, world=True
)[0]
cmds.rename(duplicate_transform, node.split("|")[-1])
duplicate_transform = "|" + node.split("|")[-1]
duplicate_nodes.append(duplicate_transform)
delete_bin.append(duplicate_transform)
with attribute_values(attribute_data):
with maintained_selection():
self.log.info(
"Writing: {}".format(duplicate_nodes)
)
cmds.select(duplicate_nodes, noExpand=True)
self.log.info(
"Extracting ass sequence with: {}".format(kwargs)
)
exported_files = cmds.arnoldExportAss(**kwargs)
for file in exported_files:
filenames.append(os.path.split(file)[1])
self.log.info("Exported: {}".format(filenames))
return filenames

View file

@ -1,106 +0,0 @@
import os
from maya import cmds
import arnold
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection, attribute_values
class ExtractAssStandin(publish.Extractor):
"""Extract the content of the instance to a ass file"""
label = "Arnold Scene Source (.ass)"
hosts = ["maya"]
families = ["ass"]
asciiAss = False
def process(self, instance):
staging_dir = self.staging_dir(instance)
filename = "{}.ass".format(instance.name)
filenames = []
file_path = os.path.join(staging_dir, filename)
# Mask
mask = arnold.AI_NODE_ALL
node_types = {
"options": arnold.AI_NODE_OPTIONS,
"camera": arnold.AI_NODE_CAMERA,
"light": arnold.AI_NODE_LIGHT,
"shape": arnold.AI_NODE_SHAPE,
"shader": arnold.AI_NODE_SHADER,
"override": arnold.AI_NODE_OVERRIDE,
"driver": arnold.AI_NODE_DRIVER,
"filter": arnold.AI_NODE_FILTER,
"color_manager": arnold.AI_NODE_COLOR_MANAGER,
"operator": arnold.AI_NODE_OPERATOR
}
for key in node_types.keys():
if instance.data.get("mask" + key.title()):
mask = mask ^ node_types[key]
# Motion blur
values = {
"defaultArnoldRenderOptions.motion_blur_enable": instance.data.get(
"motionBlur", True
),
"defaultArnoldRenderOptions.motion_steps": instance.data.get(
"motionBlurKeys", 2
),
"defaultArnoldRenderOptions.motion_frames": instance.data.get(
"motionBlurLength", 0.5
)
}
# Write out .ass file
kwargs = {
"filename": file_path,
"startFrame": instance.data.get("frameStartHandle", 1),
"endFrame": instance.data.get("frameEndHandle", 1),
"frameStep": instance.data.get("step", 1),
"selected": True,
"asciiAss": self.asciiAss,
"shadowLinks": True,
"lightLinks": True,
"boundingBox": True,
"expandProcedurals": instance.data.get("expandProcedurals", False),
"camera": instance.data["camera"],
"mask": mask
}
self.log.info("Writing: '%s'" % file_path)
with attribute_values(values):
with maintained_selection():
self.log.info(
"Writing: {}".format(instance.data["setMembers"])
)
cmds.select(instance.data["setMembers"], noExpand=True)
self.log.info(
"Extracting ass sequence with: {}".format(kwargs)
)
exported_files = cmds.arnoldExportAss(**kwargs)
for file in exported_files:
filenames.append(os.path.split(file)[1])
self.log.info("Exported: {}".format(filenames))
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'ass',
'ext': 'ass',
'files': filenames if len(filenames) > 1 else filenames[0],
"stagingDir": staging_dir,
'frameStart': kwargs["startFrame"]
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s"
% (instance.name, staging_dir))

View file

@ -1,81 +0,0 @@
import os
import contextlib
from maya import cmds
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection
class ExtractAssProxy(publish.Extractor):
"""Extract proxy model as Maya Ascii to use as arnold standin
"""
order = publish.Extractor.order + 0.2
label = "Ass Proxy (Maya ASCII)"
hosts = ["maya"]
families = ["ass"]
def process(self, instance):
@contextlib.contextmanager
def unparent(root):
"""Temporarily unparent `root`"""
parent = cmds.listRelatives(root, parent=True)
if parent:
cmds.parent(root, world=True)
yield
self.log.info("{} - {}".format(root, parent))
cmds.parent(root, parent)
else:
yield
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = "{0}.ma".format(instance.name)
path = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
# Get only the shape contents we need in such a way that we avoid
# taking along intermediateObjects
proxy = instance.data.get('proxy', None)
if not proxy:
self.log.info("no proxy mesh")
return
members = cmds.ls(proxy,
dag=True,
transforms=True,
noIntermediate=True)
self.log.info(members)
with maintained_selection():
with unparent(members[0]):
cmds.select(members, noExpand=True)
cmds.file(path,
force=True,
typ="mayaAscii",
exportSelected=True,
preserveReferences=False,
channels=False,
constraints=False,
expressions=False,
constructionHistory=False)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'ma',
'ext': 'ma',
'files': filename,
"stagingDir": stagingdir
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -1,4 +1,5 @@
import os
import copy
from maya import cmds
@ -9,6 +10,7 @@ from openpype.hosts.maya.api.lib import (
maintained_selection,
iter_visible_nodes_in_range
)
from openpype.lib import StringTemplate
class ExtractAlembic(publish.Extractor):
@ -23,9 +25,7 @@ class ExtractAlembic(publish.Extractor):
label = "Extract Pointcache (Alembic)"
hosts = ["maya"]
families = ["pointcache",
"model",
"vrayproxy"]
families = ["pointcache", "model", "vrayproxy"]
targets = ["local", "remote"]
def process(self, instance):
@ -87,6 +87,7 @@ class ExtractAlembic(publish.Extractor):
end=end))
suspend = not instance.data.get("refresh", False)
self.log.info(nodes)
with suspended_refresh(suspend=suspend):
with maintained_selection():
cmds.select(nodes, noExpand=True)
@ -101,9 +102,9 @@ class ExtractAlembic(publish.Extractor):
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': filename,
"name": "abc",
"ext": "abc",
"files": filename,
"stagingDir": dirname
}
instance.data["representations"].append(representation)
@ -112,6 +113,48 @@ class ExtractAlembic(publish.Extractor):
self.log.info("Extracted {} to {}".format(instance, dirname))
# Extract proxy.
if not instance.data.get("proxy"):
return
path = path.replace(".abc", "_proxy.abc")
if not instance.data.get("includeParentHierarchy", True):
# Set the root nodes if we don't want to include parents
# The roots are to be considered the ones that are the actual
# direct members of the set
options["root"] = instance.data["proxyRoots"]
with suspended_refresh(suspend=suspend):
with maintained_selection():
cmds.select(instance.data["proxy"])
extract_alembic(
file=path,
startFrame=start,
endFrame=end,
**options
)
template_data = copy.deepcopy(instance.data["anatomyData"])
template_data.update({"ext": "abc"})
templates = instance.context.data["anatomy"].templates["publish"]
published_filename_without_extension = StringTemplate(
templates["file"]
).format(template_data).replace(".abc", "_proxy")
transfers = []
destination = os.path.join(
instance.data["resourcesDir"],
filename.replace(
filename.split(".")[0],
published_filename_without_extension
)
)
transfers.append((path, destination))
for source, destination in transfers:
self.log.debug("Transfer: {} > {}".format(source, destination))
instance.data["transfers"] = transfers
def get_members_and_roots(self, instance):
return instance[:], instance.data.get("setMembers")

View file

@ -0,0 +1,106 @@
import maya.cmds as cmds
import pyblish.api
from openpype.pipeline.publish import (
ValidateContentsOrder, PublishValidationError
)
class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
"""Validate Arnold Scene Source.
We require at least 1 root node/parent for the meshes. This is to ensure we
can duplicate the nodes and preserve the names.
If using proxies we need the nodes to share the same names and not be
parent to the world. This ends up needing at least two groups with content
nodes and proxy nodes in another.
"""
order = ValidateContentsOrder
hosts = ["maya"]
families = ["ass"]
label = "Validate Arnold Scene Source"
def _get_nodes_data(self, nodes):
ungrouped_nodes = []
nodes_by_name = {}
parents = []
for node in nodes:
node_split = node.split("|")
if len(node_split) == 2:
ungrouped_nodes.append(node)
parent = "|".join(node_split[:-1])
if parent:
parents.append(parent)
nodes_by_name[node_split[-1]] = node
for shape in cmds.listRelatives(node, shapes=True):
nodes_by_name[shape.split("|")[-1]] = shape
return ungrouped_nodes, nodes_by_name, parents
def process(self, instance):
ungrouped_nodes = []
nodes, content_nodes_by_name, content_parents = self._get_nodes_data(
instance.data["setMembers"]
)
ungrouped_nodes.extend(nodes)
nodes, proxy_nodes_by_name, proxy_parents = self._get_nodes_data(
instance.data.get("proxy", [])
)
ungrouped_nodes.extend(nodes)
# Validate against nodes directly parented to world.
if ungrouped_nodes:
raise PublishValidationError(
"Found nodes parented to the world: {}\n"
"All nodes need to be grouped.".format(ungrouped_nodes)
)
# Proxy validation.
if not instance.data.get("proxy", []):
return
# Validate for content and proxy nodes amount being the same.
if len(instance.data["setMembers"]) != len(instance.data["proxy"]):
raise PublishValidationError(
"Amount of content nodes ({}) and proxy nodes ({}) needs to "
"be the same.".format(
len(instance.data["setMembers"]),
len(instance.data["proxy"])
)
)
# Validate against content and proxy nodes sharing same parent.
if list(set(content_parents) & set(proxy_parents)):
raise PublishValidationError(
"Content and proxy nodes cannot share the same parent."
)
# Validate for content and proxy nodes sharing same names.
sorted_content_names = sorted(content_nodes_by_name.keys())
sorted_proxy_names = sorted(proxy_nodes_by_name.keys())
odd_content_names = list(
set(sorted_content_names) - set(sorted_proxy_names)
)
odd_content_nodes = [
content_nodes_by_name[x] for x in odd_content_names
]
odd_proxy_names = list(
set(sorted_proxy_names) - set(sorted_content_names)
)
odd_proxy_nodes = [
proxy_nodes_by_name[x] for x in odd_proxy_names
]
if not sorted_content_names == sorted_proxy_names:
raise PublishValidationError(
"Content and proxy nodes need to share the same names.\n"
"Content nodes not matching: {}\n"
"Proxy nodes not matching: {}".format(
odd_content_nodes, odd_proxy_nodes
)
)

View file

@ -2,11 +2,13 @@ import os
import types
import maya.cmds as cmds
from mtoa.core import createOptions
import pyblish.api
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
PublishValidationError
)
@ -34,8 +36,9 @@ class ValidateAssRelativePaths(pyblish.api.InstancePlugin):
"defaultArnoldRenderOptions.pspath"
)
except ValueError:
assert False, ("Can not validate, render setting were not opened "
"yet so Arnold setting cannot be validate")
raise PublishValidationError(
"Default Arnold options has not been created yet."
)
scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True))
scene_name, _ = os.path.splitext(scene_basename)
@ -66,6 +69,8 @@ class ValidateAssRelativePaths(pyblish.api.InstancePlugin):
@classmethod
def repair(cls, instance):
createOptions()
texture_path = cmds.getAttr("defaultArnoldRenderOptions.tspath")
procedural_path = cmds.getAttr("defaultArnoldRenderOptions.pspath")

View file

@ -53,12 +53,18 @@ class GizmoMenu():
item_type = item.get("sourcetype")
if item_type == ("python" or "file"):
if item_type == "python":
parent.addCommand(
item["title"],
command=str(item["command"]),
icon=item.get("icon"),
shortcut=item.get("hotkey")
shortcut=item.get("shortcut")
)
elif item_type == "file":
parent.addCommand(
item['title'],
"nuke.createNode('{}')".format(item.get('file_name')),
shortcut=item.get('shortcut')
)
# add separator

View file

@ -1,7 +1,7 @@
import os
import nuke
import pyblish.api
import openpype.api as api
from openpype.lib import get_version_from_path
import openpype.hosts.nuke.api as napi
from openpype.pipeline import KnownPublishError
@ -57,7 +57,7 @@ class CollectContextData(pyblish.api.ContextPlugin):
"fps": root_node['fps'].value(),
"currentFile": current_file,
"version": int(api.get_version_from_path(current_file)),
"version": int(get_version_from_path(current_file)),
"host": pyblish.api.current_host(),
"hostVersion": nuke.NUKE_VERSION_STRING

View file

@ -117,12 +117,12 @@ def run_subprocess(*args, **kwargs):
full_output = ""
_stdout, _stderr = proc.communicate()
if _stdout:
_stdout = _stdout.decode("utf-8")
_stdout = _stdout.decode("utf-8", errors="backslashreplace")
full_output += _stdout
logger.debug(_stdout)
if _stderr:
_stderr = _stderr.decode("utf-8")
_stderr = _stderr.decode("utf-8", errors="backslashreplace")
# Add additional line break if output already contains stdout
if full_output:
full_output += "\n"

View file

@ -35,7 +35,7 @@ class OpenPypeVersion:
self.prerelease = prerelease
is_valid = True
if not major or not minor or not patch:
if major is None or minor is None or patch is None:
is_valid = False
self.is_valid = is_valid
@ -157,7 +157,7 @@ def get_openpype_version_from_path(path, build=True):
# fix path for application bundle on macos
if platform.system().lower() == "darwin":
path = os.path.join(path, "Contents", "MacOS", "lib", "Python")
path = os.path.join(path, "MacOS")
version_file = os.path.join(path, "openpype", "version.py")
if not os.path.isfile(version_file):
@ -189,6 +189,11 @@ def get_openpype_executable():
exe_list = config.GetConfigEntryWithDefault("OpenPypeExecutable", "")
dir_list = config.GetConfigEntryWithDefault(
"OpenPypeInstallationDirs", "")
# clean '\ ' for MacOS pasting
if platform.system().lower() == "darwin":
exe_list = exe_list.replace("\\ ", " ")
dir_list = dir_list.replace("\\ ", " ")
return exe_list, dir_list
@ -218,8 +223,8 @@ def get_requested_openpype_executable(
requested_version_obj = OpenPypeVersion.from_string(requested_version)
if not requested_version_obj:
print((
">>> Requested version does not match version regex \"{}\""
).format(VERSION_REGEX))
">>> Requested version '{}' does not match version regex '{}'"
).format(requested_version, VERSION_REGEX))
return None
print((
@ -272,7 +277,8 @@ def get_requested_openpype_executable(
# Deadline decide.
exe_list = [
os.path.join(version_dir, "openpype_console.exe"),
os.path.join(version_dir, "openpype_console")
os.path.join(version_dir, "openpype_console"),
os.path.join(version_dir, "MacOS", "openpype_console")
]
return FileUtils.SearchFileList(";".join(exe_list))

View file

@ -73,7 +73,7 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin):
"""
# fix path for application bundle on macos
if platform.system().lower() == "darwin":
path = os.path.join(path, "Contents", "MacOS", "lib", "Python")
path = os.path.join(path, "MacOS")
version_file = os.path.join(path, "openpype", "version.py")
if not os.path.isfile(version_file):
@ -107,8 +107,11 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin):
"Scanning for compatible requested "
f"version {requested_version}"))
dir_list = self.GetConfigEntry("OpenPypeInstallationDirs")
# clean '\ ' for MacOS pasting
if platform.system().lower() == "darwin":
dir_list = dir_list.replace("\\ ", " ")
install_dir = DirectoryUtils.SearchDirectoryList(dir_list)
if dir:
if install_dir:
sub_dirs = [
f.path for f in os.scandir(install_dir)
if f.is_dir()
@ -120,6 +123,9 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin):
openpype_versions.append((version, subdir))
exe_list = self.GetConfigEntry("OpenPypeExecutable")
# clean '\ ' for MacOS pasting
if platform.system().lower() == "darwin":
exe_list = exe_list.replace("\\ ", " ")
exe = FileUtils.SearchFileList(exe_list)
if openpype_versions:
# if looking for requested compatible version,
@ -161,7 +167,9 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin):
os.path.join(
compatible_versions[-1][1], "openpype_console.exe"),
os.path.join(
compatible_versions[-1][1], "openpype_console")
compatible_versions[-1][1], "openpype_console"),
os.path.join(
compatible_versions[-1][1], "MacOS", "openpype_console")
]
exe = FileUtils.SearchFileList(";".join(exe_list))

View file

@ -204,10 +204,10 @@ def info_about_input(oiiotool_path, filepath):
_stdout, _stderr = popen.communicate()
output = ""
if _stdout:
output += _stdout.decode("utf-8")
output += _stdout.decode("utf-8", errors="backslashreplace")
if _stderr:
output += _stderr.decode("utf-8")
output += _stderr.decode("utf-8", errors="backslashreplace")
output = output.replace("\r\n", "\n")
xml_started = False

View file

@ -60,6 +60,7 @@ class BaseAnatomy(object):
def __init__(self, project_doc, local_settings, site_name):
project_name = project_doc["name"]
self.project_name = project_name
self.project_code = project_doc["data"]["code"]
if (site_name and
site_name not in ["studio", "local", get_local_site_id()]):

View file

@ -28,7 +28,6 @@ from openpype.lib import (
TemplateUnsolved,
)
from openpype.pipeline import (
schema,
legacy_io,
Anatomy,
)
@ -643,7 +642,10 @@ def get_representation_path(representation, root=None, dbcon=None):
def path_from_config():
try:
version_, subset, asset, project = dbcon.parenthood(representation)
project_name = dbcon.active_project()
version_, subset, asset, project = get_representation_parents(
project_name, representation
)
except ValueError:
log.debug(
"Representation %s wasn't found in database, "

View file

@ -10,11 +10,17 @@ import six
import pyblish.plugin
import pyblish.api
from openpype.lib import Logger, filter_profiles
from openpype.lib import (
Logger,
filter_profiles
)
from openpype.settings import (
get_project_settings,
get_system_settings,
)
from openpype.pipeline import (
tempdir
)
from .contants import (
DEFAULT_PUBLISH_TEMPLATE,
@ -595,7 +601,7 @@ def context_plugin_should_run(plugin, context):
Args:
plugin (pyblish.api.Plugin): Plugin with filters.
context (pyblish.api.Context): Pyblish context with insances.
context (pyblish.api.Context): Pyblish context with instances.
Returns:
bool: Context plugin should run based on valid instances.
@ -609,12 +615,21 @@ def context_plugin_should_run(plugin, context):
def get_instance_staging_dir(instance):
"""Unified way how staging dir is stored and created on instances.
First check if 'stagingDir' is already set in instance data. If there is
not create new in tempdir.
First check if 'stagingDir' is already set in instance data.
In case there already is new tempdir will not be created.
It also supports `OPENPYPE_TMPDIR`, so studio can define own temp
shared repository per project or even per more granular context.
Template formatting is supported also with optional keys. Folder is
created in case it doesn't exists.
Available anatomy formatting keys:
- root[work | <root name key>]
- project[name | code]
Note:
Staging dir does not have to be necessarily in tempdir so be carefull
about it's usage.
Staging dir does not have to be necessarily in tempdir so be careful
about its usage.
Args:
instance (pyblish.lib.Instance): Instance for which we want to get
@ -623,12 +638,27 @@ def get_instance_staging_dir(instance):
Returns:
str: Path to staging dir of instance.
"""
staging_dir = instance.data.get('stagingDir')
if staging_dir:
return staging_dir
staging_dir = instance.data.get("stagingDir")
if not staging_dir:
anatomy = instance.context.data.get("anatomy")
# get customized tempdir path from `OPENPYPE_TMPDIR` env var
custom_temp_dir = tempdir.create_custom_tempdir(
anatomy.project_name, anatomy)
if custom_temp_dir:
staging_dir = os.path.normpath(
tempfile.mkdtemp(
prefix="pyblish_tmp_",
dir=custom_temp_dir
)
)
else:
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
instance.data["stagingDir"] = staging_dir
instance.data['stagingDir'] = staging_dir
return staging_dir

View file

@ -0,0 +1,59 @@
"""
Temporary folder operations
"""
import os
from openpype.lib import StringTemplate
from openpype.pipeline import Anatomy
def create_custom_tempdir(project_name, anatomy=None):
""" Create custom tempdir
Template path formatting is supporting:
- optional key formatting
- available keys:
- root[work | <root name key>]
- project[name | code]
Args:
project_name (str): project name
anatomy (openpype.pipeline.Anatomy)[optional]: Anatomy object
Returns:
str | None: formatted path or None
"""
openpype_tempdir = os.getenv("OPENPYPE_TMPDIR")
if not openpype_tempdir:
return
custom_tempdir = None
if "{" in openpype_tempdir:
if anatomy is None:
anatomy = Anatomy(project_name)
# create base formate data
data = {
"root": anatomy.roots,
"project": {
"name": anatomy.project_name,
"code": anatomy.project_code,
}
}
# path is anatomy template
custom_tempdir = StringTemplate.format_template(
openpype_tempdir, data).normalized()
else:
# path is absolute
custom_tempdir = openpype_tempdir
# create the dir path if it doesn't exists
if not os.path.exists(custom_tempdir):
try:
# create it if it doesn't exists
os.makedirs(custom_tempdir)
except IOError as error:
raise IOError(
"Path couldn't be created: {}".format(error)) from error
return custom_tempdir

View file

@ -34,12 +34,24 @@ class AddSyncSite(load.LoaderPlugin):
return self._sync_server
def load(self, context, name=None, namespace=None, data=None):
self.log.info("Adding {} to representation: {}".format(
data["site_name"], data["_id"]))
family = context["representation"]["context"]["family"]
project_name = data["project_name"]
repre_id = data["_id"]
""""Adds site skeleton information on representation_id
Looks for loaded containers for workfile, adds them site skeleton too
(eg. they should be downloaded too).
Args:
context (dict):
name (str):
namespace (str):
data (dict): expects {"site_name": SITE_NAME_TO_ADD}
"""
# self.log wont propagate
project_name = context["project"]["name"]
repre_doc = context["representation"]
family = repre_doc["context"]["family"]
repre_id = repre_doc["_id"]
site_name = data["site_name"]
print("Adding {} to representation: {}".format(
data["site_name"], repre_id))
self.sync_server.add_site(project_name, repre_id, site_name,
force=True)
@ -52,6 +64,8 @@ class AddSyncSite(load.LoaderPlugin):
)
for link_repre_id in links:
try:
print("Adding {} to linked representation: {}".format(
data["site_name"], link_repre_id))
self.sync_server.add_site(project_name, link_repre_id,
site_name,
force=False)

View file

@ -3,7 +3,10 @@ from openpype.pipeline import load
class RemoveSyncSite(load.LoaderPlugin):
"""Remove sync site and its files on representation"""
"""Remove sync site and its files on representation.
Removes files only on local site!
"""
representations = ["*"]
families = ["*"]
@ -24,13 +27,18 @@ class RemoveSyncSite(load.LoaderPlugin):
return self._sync_server
def load(self, context, name=None, namespace=None, data=None):
self.log.info("Removing {} on representation: {}".format(
data["site_name"], data["_id"]))
self.sync_server.remove_site(data["project_name"],
data["_id"],
data["site_name"],
project_name = context["project"]["name"]
repre_doc = context["representation"]
repre_id = repre_doc["_id"]
site_name = data["site_name"]
print("Removing {} on representation: {}".format(site_name, repre_id))
self.sync_server.remove_site(project_name,
repre_id,
site_name,
True)
self.log.debug("Site added.")
self.log.debug("Site removed.")
def filepath_from_context(self, context):
"""No real file loading"""

View file

@ -506,6 +506,43 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
return version_doc
def _validate_repre_files(self, files, is_sequence_representation):
"""Validate representation files before transfer preparation.
Check if files contain only filenames instead of full paths and check
if sequence don't contain more than one sequence or has remainders.
Args:
files (Union[str, List[str]]): Files from representation.
is_sequence_representation (bool): Files are for sequence.
Raises:
KnownPublishError: If validations don't pass.
"""
if not files:
return
if not is_sequence_representation:
files = [files]
if any(os.path.isabs(fname) for fname in files):
raise KnownPublishError("Given file names contain full paths")
if not is_sequence_representation:
return
src_collections, remainders = clique.assemble(files)
if len(files) < 2 or len(src_collections) != 1 or remainders:
raise KnownPublishError((
"Files of representation does not contain proper"
" sequence files.\nCollected collections: {}"
"\nCollected remainders: {}"
).format(
", ".join([str(col) for col in src_collections]),
", ".join([str(rem) for rem in remainders])
))
def prepare_representation(self, repre,
template_name,
existing_repres_by_name,
@ -587,7 +624,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
is_udim = bool(repre.get("udim"))
# handle publish in place
if "originalDirname" in template:
if "{originalDirname}" in template:
# store as originalDirname only original value without project root
# if instance collected originalDirname is present, it should be
# used for all represe
@ -606,24 +643,64 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
template_data["originalDirname"] = without_root
is_sequence_representation = isinstance(files, (list, tuple))
if is_sequence_representation:
# Collection of files (sequence)
if any(os.path.isabs(fname) for fname in files):
raise KnownPublishError("Given file names contain full paths")
self._validate_repre_files(files, is_sequence_representation)
# Output variables of conditions below:
# - transfers (List[Tuple[str, str]]): src -> dst filepaths to copy
# - repre_context (Dict[str, Any]): context data used to fill template
# - template_data (Dict[str, Any]): source data used to fill template
# - to add required data to 'repre_context' not used for
# formatting
# - anatomy_filled (Dict[str, Any]): filled anatomy of last file
# - to fill 'publishDir' on instance.data -> not ideal
# Treat template with 'orignalBasename' in special way
if "{originalBasename}" in template:
# Remove 'frame' from template data
template_data.pop("frame", None)
# Find out first frame string value
first_index_padded = None
if not is_udim and is_sequence_representation:
col = clique.assemble(files)[0][0]
sorted_frames = tuple(sorted(col.indexes))
# First frame used for end value
first_frame = sorted_frames[0]
# Get last frame for padding
last_frame = sorted_frames[-1]
# Use padding from collection of length of last frame as string
padding = max(col.padding, len(str(last_frame)))
first_index_padded = get_frame_padded(
frame=first_frame,
padding=padding
)
# Convert files to list for single file as remaining part is only
# transfers creation (iteration over files)
if not is_sequence_representation:
files = [files]
repre_context = None
transfers = []
for src_file_name in files:
template_data["originalBasename"], _ = os.path.splitext(
src_file_name)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled[template_name]["path"]
src = os.path.join(stagingdir, src_file_name)
transfers.append((src, dst))
if repre_context is None:
repre_context = dst.used_values
if not is_udim and first_index_padded is not None:
repre_context["frame"] = first_index_padded
elif is_sequence_representation:
# Collection of files (sequence)
src_collections, remainders = clique.assemble(files)
if len(files) < 2 or len(src_collections) != 1 or remainders:
raise KnownPublishError((
"Files of representation does not contain proper"
" sequence files.\nCollected collections: {}"
"\nCollected remainders: {}"
).format(
", ".join([str(col) for col in src_collections]),
", ".join([str(rem) for rem in remainders])
))
src_collection = src_collections[0]
template_data["originalBasename"] = src_collection.head[:-1]
destination_indexes = list(src_collection.indexes)
# Use last frame for minimum padding
# - that should cover both 'udim' and 'frame' minimum padding
@ -645,11 +722,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
# In case source are published in place we need to
# skip renumbering
repre_frame_start = repre.get("frameStart")
if (
"originalBasename" not in template
and repre_frame_start is not None
):
index_frame_start = int(repre["frameStart"])
if repre_frame_start is not None:
index_frame_start = int(repre_frame_start)
# Shift destination sequence to the start frame
destination_indexes = [
index_frame_start + idx
@ -705,15 +779,6 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
else:
# Single file
fname = files
if os.path.isabs(fname):
self.log.error(
"Filename in representation is filepath {}".format(fname)
)
raise KnownPublishError(
"This is a bug. Representation file name is full path"
)
template_data["originalBasename"], _ = os.path.splitext(fname)
# Manage anatomy template data
template_data.pop("frame", None)
if is_udim:
@ -725,7 +790,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
dst = os.path.normpath(template_filled)
# Single file transfer
src = os.path.join(stagingdir, fname)
src = os.path.join(stagingdir, files)
transfers = [(src, dst)]
# todo: Are we sure the assumption each representation

View file

@ -386,6 +386,25 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin):
repre["_id"] = old_repre["_id"]
update_data = prepare_representation_update_data(
old_repre, repre)
# Keep previously synchronized sites up-to-date
# by comparing old and new sites and adding old sites
# if missing in new ones
old_repre_files_sites = [
f.get("sites", []) for f in old_repre.get("files", [])
]
for i, file in enumerate(repre.get("files", [])):
repre_sites_names = {
s["name"] for s in file.get("sites", [])
}
for site in old_repre_files_sites[i]:
if site["name"] not in repre_sites_names:
# Pop the date to tag for sync
site.pop("created_dt", None)
file["sites"].append(site)
update_data["files"][i] = file
op_session.update_entity(
project_name,
old_repre["type"],

View file

@ -340,13 +340,11 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
_stdout, _stderr = proc.communicate()
if _stdout:
for line in _stdout.split(b"\r\n"):
print(line.decode("utf-8"))
print(_stdout.decode("utf-8", errors="backslashreplace"))
# This will probably never happen as ffmpeg use stdout
if _stderr:
for line in _stderr.split(b"\r\n"):
print(line.decode("utf-8"))
print(_stderr.decode("utf-8", errors="backslashreplace"))
if proc.returncode != 0:
raise RuntimeError(

View file

@ -72,6 +72,11 @@
"key": "command",
"label": "Python command"
},
{
"type": "text",
"key": "icon",
"label": "Icon Path"
},
{
"type": "text",
"key": "shortcut",

View file

@ -16,7 +16,11 @@ from openpype.lib.attribute_definitions import (
UISeparatorDef,
UILabelDef
)
from openpype.tools.utils import CustomTextComboBox
from openpype.tools.utils import (
CustomTextComboBox,
FocusSpinBox,
FocusDoubleSpinBox,
)
from openpype.widgets.nice_checkbox import NiceCheckbox
from .files_widget import FilesWidget
@ -142,6 +146,9 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
if attr_def.label:
label_widget = QtWidgets.QLabel(attr_def.label, self)
tooltip = attr_def.tooltip
if tooltip:
label_widget.setToolTip(tooltip)
layout.addWidget(
label_widget, row, 0, 1, expand_cols
)
@ -243,10 +250,10 @@ class NumberAttrWidget(_BaseAttrDefWidget):
def _ui_init(self):
decimals = self.attr_def.decimals
if decimals > 0:
input_widget = QtWidgets.QDoubleSpinBox(self)
input_widget = FocusDoubleSpinBox(self)
input_widget.setDecimals(decimals)
else:
input_widget = QtWidgets.QSpinBox(self)
input_widget = FocusSpinBox(self)
if self.attr_def.tooltip:
input_widget.setToolTip(self.attr_def.tooltip)

View file

@ -1480,23 +1480,21 @@ class RepresentationWidget(QtWidgets.QWidget):
repre_ids = []
data_by_repre_id = {}
selected_side = action_representation.get("selected_side")
site_name = "{}_site_name".format(selected_side)
is_sync_loader = tools_lib.is_sync_loader(loader)
for item in items:
item_id = item.get("_id")
repre_ids.append(item_id)
repre_id = item["_id"]
repre_ids.append(repre_id)
if not is_sync_loader:
continue
site_name = "{}_site_name".format(selected_side)
data_site_name = item.get(site_name)
if not data_site_name:
continue
data_by_repre_id[item_id] = {
"_id": item_id,
"site_name": data_site_name,
"project_name": self.dbcon.active_project()
data_by_repre_id[repre_id] = {
"site_name": data_site_name
}
repre_contexts = get_repres_contexts(repre_ids, self.dbcon)
@ -1586,8 +1584,8 @@ def _load_representations_by_loader(loader, repre_contexts,
version_name = version_doc.get("name")
try:
if data_by_repre_id:
_id = repre_context["representation"]["_id"]
data = data_by_repre_id.get(_id)
repre_id = repre_context["representation"]["_id"]
data = data_by_repre_id.get(repre_id)
options.update(data)
load_with_repre_context(
loader,

View file

@ -1,4 +1,6 @@
from .widgets import (
FocusSpinBox,
FocusDoubleSpinBox,
CustomTextComboBox,
PlaceholderLineEdit,
BaseClickableFrame,
@ -34,6 +36,8 @@ from .overlay_messages import (
__all__ = (
"FocusSpinBox",
"FocusDoubleSpinBox",
"CustomTextComboBox",
"PlaceholderLineEdit",
"BaseClickableFrame",

View file

@ -13,6 +13,34 @@ from openpype.lib.attribute_definitions import AbstractAttrDef
log = logging.getLogger(__name__)
class FocusSpinBox(QtWidgets.QSpinBox):
"""QSpinBox which allow scroll wheel changes only in active state."""
def __init__(self, *args, **kwargs):
super(FocusSpinBox, self).__init__(*args, **kwargs)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
def wheelEvent(self, event):
if not self.hasFocus():
event.ignore()
else:
super(FocusSpinBox, self).wheelEvent(event)
class FocusDoubleSpinBox(QtWidgets.QDoubleSpinBox):
"""QDoubleSpinBox which allow scroll wheel changes only in active state."""
def __init__(self, *args, **kwargs):
super(FocusDoubleSpinBox, self).__init__(*args, **kwargs)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
def wheelEvent(self, event):
if not self.hasFocus():
event.ignore()
else:
super(FocusDoubleSpinBox, self).wheelEvent(event)
class CustomTextComboBox(QtWidgets.QComboBox):
"""Combobox which can have different text showed."""

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
__version__ = "3.15.1-nightly.3"
__version__ = "3.15.1-nightly.5"

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "OpenPype"
version = "3.15.1" # OpenPype
version = "3.15.0" # OpenPype
description = "Open VFX and Animation pipeline with support."
authors = ["OpenPype Team <info@openpype.io>"]
license = "MIT License"

View file

@ -7,16 +7,10 @@ from github import Github
import os
def get_release_type_github(Log, github_token):
# print(Log)
minor_labels = ["Bump Minor"]
# patch_labels = [
# "type: enhancement",
# "type: bug",
# "type: deprecated",
# "type: Feature"]
g = Github(github_token)
repo = g.get_repo("pypeclub/OpenPype")
repo = g.get_repo("ynput/OpenPype")
labels = set()
for line in Log.splitlines():
@ -35,12 +29,12 @@ def get_release_type_github(Log, github_token):
else:
return "patch"
# TODO: if all is working fine, this part can be cleaned up eventually
# TODO: if all is working fine, this part can be cleaned up eventually
# if any(label in labels for label in patch_labels):
# return "patch"
return None
def remove_prefix(text, prefix):
return text[text.startswith(prefix) and len(prefix):]
@ -93,12 +87,16 @@ def file_regex_replace(filename, regex, version):
f.truncate()
def bump_file_versions(version):
def bump_file_versions(version, nightly=False):
filename = "./openpype/version.py"
regex = "(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(-((0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(\.(0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(\+([0-9a-zA-Z-]+(\.[0-9a-zA-Z-]+)*))?"
file_regex_replace(filename, regex, version)
if nightly:
# skip nightly reversion in pyproject.toml
return
# bump pyproject.toml
filename = "pyproject.toml"
regex = "version = \"(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(\+((0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(\.(0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(\+([0-9a-zA-Z-]+(\.[0-9a-zA-Z-]+)*))?\" # OpenPype"
@ -196,7 +194,7 @@ def main():
if options.nightly:
next_tag_v = calculate_next_nightly(github_token=options.github_token)
print(next_tag_v)
bump_file_versions(next_tag_v)
bump_file_versions(next_tag_v, True)
if options.finalize:
new_release = finalize_prerelease(options.finalize)
@ -222,7 +220,7 @@ def main():
new_prerelease = current_prerelease.bump_prerelease().__str__()
print(new_prerelease)
bump_file_versions(new_prerelease)
if options.version:
bump_file_versions(options.version)
print(f"Injected version {options.version} into the release")

View file

@ -0,0 +1,30 @@
---
id: admin_environment
title: Environment
sidebar_label: Environment
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
## OPENPYPE_TMPDIR:
- Custom staging dir directory
- Supports anatomy keys formatting. ex `{root[work]}/{project[name]}/temp`
- supported formatting keys:
- root[work]
- project[name | code]
## OPENPYPE_DEBUG
- setting logger to debug mode
- example value: "1" (to activate)
## OPENPYPE_LOG_LEVEL
- stringified numeric value of log level. [Here for more info](https://docs.python.org/3/library/logging.html#logging-levels)
- example value: "10"
## OPENPYPE_MONGO
- If set it takes precedence over the one set in keyring
- for more details on how to use it go [here](admin_use#check-for-mongodb-database-connection)
## OPENPYPE_USERNAME
- if set it overides system created username

View file

@ -13,18 +13,23 @@ Settings applicable to the full studio.
![general_settings](assets/settings/settings_system_general.png)
**`Studio Name`** - Full name of the studio (can be used as variable on some places)
### Studio Name
Full name of the studio (can be used as variable on some places)
**`Studio Code`** - Studio acronym or a short code (can be used as variable on some places)
### Studio Code
Studio acronym or a short code (can be used as variable on some places)
**`Admin Password`** - After setting admin password, normal user won't have access to OpenPype settings
### Admin Password
After setting admin password, normal user won't have access to OpenPype settings
and Project Manager GUI. Please keep in mind that this is a studio wide password and it is meant purely
as a simple barrier to prevent artists from accidental setting changes.
**`Environment`** - Globally applied environment variables that will be appended to any OpenPype process in the studio.
### Environment
Globally applied environment variables that will be appended to any OpenPype process in the studio.
**`Disk mapping`** - Platform dependent configuration for mapping of virtual disk(s) on an artist's OpenPype machines before OP starts up.
Uses `subst` command, if configured volume character in `Destination` field already exists, no re-mapping is done for that character(volume).
### Disk mapping
- Platform dependent configuration for mapping of virtual disk(s) on an artist's OpenPype machines before OP starts up.
- Uses `subst` command, if configured volume character in `Destination` field already exists, no re-mapping is done for that character(volume).
### FFmpeg and OpenImageIO tools
We bundle FFmpeg tools for all platforms and OpenImageIO tools for Windows and Linux. By default, bundled tools are used, but it is possible to set environment variables `OPENPYPE_FFMPEG_PATHS` and `OPENPYPE_OIIO_PATHS` in system settings environments to look for them in different directory.
@ -171,4 +176,4 @@ In the image before you can see that we set most of the environment variables in
In this example MTOA will automatically will the `MAYA_VERSION`(which is set by Maya Application environment) and `MTOA_VERSION` into the `MTOA` variable. We then use the `MTOA` to set all the other variables needed for it to function within Maya.
![tools](assets/settings/tools_01.png)
All of the tools defined in here can then be assigned to projects. You can also change the tools versions on any project level all the way down to individual asset or shot overrides. So if you just need to upgrade you render plugin for a single shot, while not risking the incompatibilities on the rest of the project, it is possible.
All the tools defined in here can then be assigned to projects. You can also change the tools versions on any project level all the way down to individual asset or shot overrides. So if you just need to upgrade you render plugin for a single shot, while not risking the incompatibilities on the rest of the project, it is possible.

View file

@ -308,6 +308,8 @@ Select its root and Go **OpenPype → Create...** and select **Point Cache**.
After that, publishing will create corresponding **abc** files.
When creating the instance, a objectset child `proxy` will be created. Meshes in the `proxy` objectset will be the viewport representation where loading supports proxies. Proxy representations are stored as `resources` of the subset.
Example setup:
![Maya - Point Cache Example](assets/maya-pointcache_setup.png)
@ -315,6 +317,7 @@ Example setup:
:::note Publish on farm
If your studio has Deadline configured, artists could choose to offload potentially long running export of pointache and publish it to the farm.
Only thing that is necessary is to toggle `Farm` property in created pointcache instance to True.
:::
### Loading Point Caches

View file

@ -0,0 +1,30 @@
---
id: artist_hosts_maya_arnold
title: Arnold for Maya
sidebar_label: Arnold
---
## Arnold Scene Source (.ass)
Arnold Scene Source can be published as a single file or a sequence of files, determined by the frame range.
When creating the instance, two objectsets are created; `content` and `proxy`. Meshes in the `proxy` objectset will be the viewport representation when loading as `standin`. Proxy representations are stored as `resources` of the subset.
### Arnold Scene Source Proxy Workflow
In order to utilize operators and proxies, the content and proxy nodes need to share the same names (including the shape names). This is done by parenting the content and proxy nodes into separate groups. For example:
![Arnold Scene Source](assets/maya-arnold_scene_source.png)
## Standin
Arnold Scene Source `ass` and Alembic `abc` are supported to load as standins.
### Standin Proxy Workflow
If a subset has a proxy representation, this will be used as display in the viewport. At render time the standin path will be replaced using the recommended string replacement workflow;
https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_maya_operators_am_Updating_procedural_file_paths_with_string_replace_html
Since the content and proxy nodes share the same names and hierarchy, any manually shader assignments will be shared.
:::note for advanced users
You can stop the proxy swapping by disabling the string replacement operator found in the container.
![Arnold Standin](assets/maya-arnold_standin.png)
:::

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

After

Width:  |  Height:  |  Size: 86 KiB

Before After
Before After

View file

@ -44,6 +44,7 @@ module.exports = {
"artist_hosts_maya_multiverse",
"artist_hosts_maya_yeti",
"artist_hosts_maya_xgen",
"artist_hosts_maya_arnold",
"artist_hosts_maya_vray",
"artist_hosts_maya_redshift",
],
@ -86,6 +87,7 @@ module.exports = {
type: "category",
label: "Configuration",
items: [
"admin_environment",
"admin_settings",
"admin_settings_system",
"admin_settings_project_anatomy",

View file

@ -4273,9 +4273,9 @@ htmlparser2@^6.1.0:
entities "^2.0.0"
http-cache-semantics@^4.0.0:
version "4.1.0"
resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz#49e91c5cbf36c9b94bcfcd71c23d5249ec74e390"
integrity sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==
version "4.1.1"
resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz#abe02fcb2985460bf0323be664436ec3476a6d5a"
integrity sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==
http-deceiver@^1.2.7:
version "1.2.7"
@ -7180,9 +7180,9 @@ typedarray-to-buffer@^3.1.5:
is-typedarray "^1.0.0"
ua-parser-js@^0.7.30:
version "0.7.31"
resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.31.tgz#649a656b191dffab4f21d5e053e27ca17cbff5c6"
integrity sha512-qLK/Xe9E2uzmYI3qLeOmI0tEOt+TBBQyUIAh4aAgU05FVYzeZrKUdkAZfBNVGRaHVgV0TDkdEngJSw/SyQchkQ==
version "0.7.33"
resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.33.tgz#1d04acb4ccef9293df6f70f2c3d22f3030d8b532"
integrity sha512-s8ax/CeZdK9R/56Sui0WM6y9OFREJarMRHqLB2EwkovemBxNQ+Bqu8GAsUnVcXKgphb++ghr/B2BZx4mahujPw==
unherit@^1.0.4:
version "1.1.3"